dialogsum_reformat / dialogsum_reformat.py
knkarthick's picture
Upload 6 files
4970960
raw
history blame
3.55 kB
import json
import pandas as pd
import datasets
import os
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@inproceedings{chen-etal-2021-dialogsum,
title={{D}ialog{S}um: {A} Real-Life Scenario Dialogue Summarization Dataset},
author={Chen, Yulong and Liu, Yang and Chen, Liang and Zhang, Yue},
journal={arXiv preprint arXiv:1911.12237},
year={2021},
booktitle ={Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021"},
month = {aug},
address = {Online},
publisher = {Association for Computational Linguistics},
url = {https://aclanthology.org/2021.findings-acl.449},
doi = {10.18653/v1/2021.findings-acl.449},
pages = {5062--5074}
}
"""
_DESCRIPTION = """
DialogSUM Corpus contains 13460 chat dialogues with manually annotated
summaries.
There are two features:
- dialogue: text of dialogue.
- summary: human written summary of the dialogue.
- topic: one liner summary of the dialogue.
- id: id of a example.
"""
_HOMEPAGE = "hhttps://aclanthology.org/2021.findings-acl.449"
_LICENSE = "CC BY-NC-ND 4.0"
_URL = "https://huggingface.co/datasets/knkarthick/dialogsum_reformat/tree/main/"
_URL = "https://huggingface.co/datasets/knkarthick/dialogsum_reformat/resolve/main/"
_URLS = {
"train": _URL + "train.json",
"test": _URL + "test.json",
"val": _URL + "val.json",
}
class Dialogsum(datasets.GeneratorBasedBuilder):
"""DialogSum Corpus dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="dialogsum_reformat",
version=datasets.Version("1.0.0", ""),
description="DialogSum Corpus dataset",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"dialogue": datasets.Value("string"),
"summary": datasets.Value("string"),
"topic": datasets.Value("string"),
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["test"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
]
def _generate_examples(self, filepath, split):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
key = 0
with open(os.path.join(filepath, split)) as f :
data = json.load(f)
for info in data :
dialogue_id = info['id']
dialogue_name = info['dialogue']
dialogue_summary = info['summary']
dialogue_topic = info['topic']
yield key, {
"id" : dialogue_id,
"dialogue" : dialogue_name,
"summary" : dialogue_summary,
"topic" : dialogue_topic,
}
key += 1