import json import datasets class ERRNewsConfig(datasets.BuilderConfig): def __init__(self, features, **kwargs): super().__init__(version=datasets.Version("1.0.0"), **kwargs) self.features = features class ERRNews(datasets.GeneratorBasedBuilder): features = ["transcript", "summary", "id"] BUILDER_CONFIGS = [ ERRNewsConfig( name="full", features=features ) ] DEFAULT_CONFIG_NAME = "full" def _info(self): features = datasets.Features( { "transcript": datasets.Value("string"), "summary": datasets.Value("string"), "id": datasets.Value("string"), }) return datasets.DatasetInfo( features=features, supervised_keys=None, version=self.config.version, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" train = "./train.json" test = "./test.json" val = "./val.json" return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "file_path": train, }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "file_path": val, }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "file_path": test, }, ), ] def create_dict(self, data): res = dict() for key in self.config.features: res[key] = data[key] return res def _generate_examples(self, file_path): with open(file_path) as f: data = json.load(f) for idx, transcript in enumerate(data["transcript"]): id_ = data["id"][idx] yield id_, { "transcript": transcript, "summary": data["summary"][idx], "id": id_, }