# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Cleaned Dutch split of the mC4 corpus.""" import json import datasets logger = datasets.logging.get_logger(__name__) _HOMEPAGE = "https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset" _CITATION = """ @article{Narayan2018DontGM, title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization}, author={Shashi Narayan and Shay B. Cohen and Mirella Lapata}, journal={ArXiv}, year={2018}, volume={abs/1808.08745} } """ _DESCRIPTION = """ Extreme Summarization (XSum) Dataset. There are three features: - document: Input news article. - summary: One sentence summary of the article. - id: BBC ID of the article. """ _DATA_URL_NL = "https://huggingface.co/datasets/yhavinga/xsum_dutch/resolve/main/{config}/{split}.json.gz" _DOCUMENT = "document" _SUMMARY = "summary" _ID = "id" _SUPPORTED_VERSIONS = [ datasets.Version("1.0.0", "Default version."), ] class XsumDutchConfig(datasets.BuilderConfig): def __init__(self, **kwargs): super().__init__(**kwargs) class XsumDutch(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ XsumDutchConfig( name=str(version), description=version.description ) for version in _SUPPORTED_VERSIONS ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { _DOCUMENT: datasets.Value("string"), _SUMMARY: datasets.Value("string"), "id": datasets.Value("string"), } ), supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): result = [ datasets.SplitGenerator( name=split, gen_kwargs={ "filepath": dl_manager.download_and_extract( _DATA_URL_NL.format(split=str(split), config=str(self.config.name)) ) }, ) for split in [ datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST, ] ] return result def _generate_examples(self, filepath): """This function returns the examples in the raw (text) form by iterating on all the files.""" logger.info(f"Generating examples from {filepath}") with open(filepath, "r") as file: for _id, line in enumerate(file): example = json.loads(line) yield _id, example