import os import pathlib from typing import overload import datasets import json from datasets.info import DatasetInfo _VERSION = "0.0.5" _URL= "data/" _URLS = { "train": _URL + "train.jsonl", "validation": _URL + "validation.jsonl", "test": _URL + "test.jsonl" } _DESCRIPTION = """\ CtkFactsNLI is a NLI version of the Czech CTKFacts dataset """ _CITATION = """\ todo """ datasets.utils.version.Version class CtkfactsNli(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("int32"), "label": datasets.ClassLabel(names=["REFUTES", "NOT ENOUGH INFO", "SUPPORTS"]), # datasets.features.Sequence({"text": datasets.Value("string"),"answer_start": datasets.Value("int32"),}) "evidence": datasets.Value("string"), "claim": datasets.Value("string"), } ), # No default supervised_keys (as we have to pass both question # and context as input). supervised_keys=None, version=_VERSION, homepage="https://fcheck.fel.cvut.cz/dataset/", citation=_CITATION, ) def _split_generators(self, dl_manager: datasets.DownloadManager): downloaded_files = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, { "filepath": downloaded_files["train"] }), datasets.SplitGenerator(datasets.Split.VALIDATION, { "filepath": downloaded_files["validation"] }), datasets.SplitGenerator(datasets.Split.TEST, { "filepath": downloaded_files["test"] }), ] def _generate_examples(self, filepath): """This function returns the examples in the raw (text) form.""" key = 0 with open(filepath, encoding="utf-8") as f: for line in f: datapoint = json.loads(line) yield key, { "id": datapoint["id"], "evidence": " ".join(datapoint["evidence"]), "claim": datapoint["claim"], "label": datapoint["label"] } key += 1