Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Catalan
ArXiv:
Libraries:
Datasets
pandas
License:
ibaucells commited on
Commit
8e3c544
1 Parent(s): 297ba5e

Upload xnli_ca.py

Browse files
Files changed (1) hide show
  1. xnli_ca.py +90 -0
xnli_ca.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loading script for the TECA dataset.
2
+ import json
3
+ import datasets
4
+
5
+ logger = datasets.logging.get_logger(__name__)
6
+
7
+ # _CITATION = ""
8
+
9
+ _DESCRIPTION = """
10
+ XNLI-ca is the Catalan professional translation of the development and test partitions of the XNLI dataset, which contain 2490 and 5010 pairs of premises and hypotheses, respectively. This dataset was translated as part of the AINA project.
11
+ """
12
+
13
+ _HOMEPAGE = """https://zenodo.org/record/4621378"""
14
+
15
+
16
+ _URL = "https://huggingface.co/datasets/projecte-aina/xnli-ca/resolve/main/"
17
+ _DEV_FILE = "xnli.dev.ca.json"
18
+ _TEST_FILE = "xnli.test.ca.json"
19
+
20
+
21
+ class xnliConfig(datasets.BuilderConfig):
22
+ """ Builder config for the XNLI-ca dataset """
23
+
24
+ def __init__(self, **kwargs):
25
+ """BuilderConfig for XNLI-ca.
26
+ Args:
27
+ **kwargs: keyword arguments forwarded to super.
28
+ """
29
+ super(xnliConfig, self).__init__(**kwargs)
30
+
31
+
32
+ class xnli(datasets.GeneratorBasedBuilder):
33
+ """ XNLI-ca Dataset """
34
+
35
+ BUILDER_CONFIGS = [
36
+ xnliConfig(
37
+ name="xnli-ca",
38
+ version=datasets.Version("1.0.1"),
39
+ description="XNLI-ca dataset",
40
+ ),
41
+ ]
42
+
43
+ def _info(self):
44
+ return datasets.DatasetInfo(
45
+ description=_DESCRIPTION,
46
+ features=datasets.Features(
47
+ {
48
+ "premise": datasets.Value("string"),
49
+ "hypothesis": datasets.Value("string"),
50
+ "label": datasets.features.ClassLabel
51
+ (names=
52
+ [
53
+ "entailment",
54
+ "neutral",
55
+ "contradiction"
56
+ ]
57
+ ),
58
+ }
59
+ ),
60
+ homepage=_HOMEPAGE,
61
+ # citation=_CITATION,
62
+ )
63
+
64
+ def _split_generators(self, dl_manager):
65
+ """Returns SplitGenerators."""
66
+ urls_to_download = {
67
+ "dev": f"{_URL}{_DEV_FILE}",
68
+ "test": f"{_URL}{_TEST_FILE}",
69
+ }
70
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
71
+
72
+ return [
73
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
74
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
75
+ ]
76
+
77
+ def _generate_examples(self, filepath):
78
+ """This function returns the examples in the raw (text) form."""
79
+ logger.info("generating examples from = %s", filepath)
80
+ with open(filepath, encoding="utf-8") as f:
81
+ data_dict = json.load(f)
82
+ for id_, article in enumerate(data_dict):
83
+ premise = article["premise"]
84
+ hypothesis = article["hypothesis"]
85
+ label = article["label"]
86
+ yield id_, {
87
+ "premise": premise,
88
+ "hypothesis": hypothesis,
89
+ "label": label,
90
+ }