# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Multi-Document Dataset.""" import json import datasets from datasets import set_caching_enabled set_caching_enabled(False) _CITATION = """ @article{lu2020multi, title={Multi-Document: A Large-scale Dataset for Extreme Multi-document Summarization of Scientific Articles}, author={Arka Das, India}, journal={arXiv preprint arXiv:2010.14235}, year={2022} } """ _DESCRIPTION = """ Multi-Document, a large-scale multi-document summarization dataset created from scientific articles. Multi-Document introduces a challenging multi-document summarization task: writing the related-work section of a paper based on its abstract and the articles it references. """ _URL_TRAIN = "https://github.com/arka0821/multi_document_summarization/raw/master/data/train.json.gz" _URL_TEST = "https://github.com/arka0821/multi_document_summarization/raw/master/data/test.json.gz" _URL_VAL = "https://github.com/arka0821/multi_document_summarization/raw/master/data/val.json.gz" class MultiDocumentSum(datasets.GeneratorBasedBuilder): """ "Multi-Document Dataset.""" VERSION = datasets.Version("1.1.0") def _info(selif): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "docs": datasets.Sequence( { "id": datasets.Value("string"), "text": datasets.Value("string") }, ), "summary": datasets.Value("string"), } ), supervised_keys=None, homepage="https://github.com/arka0821/multi_document_summarization", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" dl_manager.DownloadConfig.force_download=True train_path = dl_manager.download_and_extract(_URL_TRAIN) test_path = dl_manager.download_and_extract(_URL_TEST) val_path = dl_manager.download_and_extract(_URL_VAL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"path": train_path}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"path": test_path}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"path": val_path}, ), ] def _generate_examples(self, path=None): """Yields examples.""" with open(path, encoding="utf-8") as f: data = json.load(f) f.close() for idx, el in enumerate(data): ids = [id["id"] for id in el["docs"]] texts = [text["text"] for text in el["docs"]] tmp = {"id": ids, "text": texts} d = el.copy() d["docs"] = tmp yield idx, d