GenMix50k / GenMix50k.py
moon23k's picture
Create GenMix50k.py
1018f27 verified
raw
history blame
2.77 kB
import os
import json
import datasets as ds
_DESCRIPTION = """
Description of the dataset.
"""
_CITATION = """
Citation for the dataset.
"""
_LICENCE = """
License information for the dataset.
"""
_FEATURES = ds.Features({
"x": ds.Value(dtype="string"),
"y": ds.Value(dtype="string")
})
class GenMix50kConfig(ds.BuilderConfig):
def __init__(self, **kwargs):
super(GenMix50kConfig, self).__init__(**kwargs)
self.version = ds.Version("1.0.3")
self.features = _FEATURES
self.citation = _CITATION
class GenMix50k(ds.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
GenMix50kConfig(name="translation"),
GenMix50kConfig(name="dialogue"),
GenMix50kConfig(name="summarization")
]
def _info(self) -> ds.DatasetInfo:
"""Returns the dataset metadata."""
return ds.DatasetInfo(
description=_DESCRIPTION,
features=self.config.features,
citation=self.config.citation,
license=_LICENCE,
supervised_keys=None
)
def _split_generators(self, dl_manager: ds.DownloadManager):
"""Returns SplitGenerators"""
base_url = "https://huggingface.co/datasets/moon23k/GenMix50k/resolve/main/"
data_files = {
"translation": {
"train": base_url + "translation/train.json",
"validation": base_url + "translation/valid.json",
"test": base_url + "translation/test.json"
},
"dialogue": {
"train": base_url + "dialogue/train.json",
"validation": base_url + "dialogue/valid.json",
"test": base_url + "dialogue/test.json"
},
"summarization": {
"train": base_url + "summarization/train.json",
"validation": base_url + "summarization/valid.json",
"test": base_url + "summarization/test.json"
}
}
downloaded_files = dl_manager.download_and_extract(data_files[self.config.name])
return [
ds.SplitGenerator(
name=ds.Split.TRAIN,
gen_kwargs={"filepath": downloaded_files["train"]},
),
ds.SplitGenerator(
name=ds.Split.VALIDATION,
gen_kwargs={"filepath": downloaded_files["validation"]},
),
ds.SplitGenerator(
name=ds.Split.TEST,
gen_kwargs={"filepath": downloaded_files["test"]},
),
]
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
for idx, line in enumerate(f):
example = json.loads(line)
yield idx, example