Datasets:

Languages:
Spanish
License:
joanllop commited on
Commit
692129a
1 Parent(s): 71f9585

Delete wnli.py

Browse files
Files changed (1) hide show
  1. wnli.py +0 -106
wnli.py DELETED
@@ -1,106 +0,0 @@
1
- # Loading script for the TECA dataset.
2
- import json
3
- import datasets
4
-
5
- logger = datasets.logging.get_logger(__name__)
6
-
7
- _CITATION = """
8
- ADD CITATION
9
- """
10
-
11
- _DESCRIPTION = """
12
- professional translation into Spanish of Winograd NLI dataset as published in GLUE Benchmark.
13
- The Winograd NLI dataset presents 855 sentence pairs,
14
- in which the first sentence contains an ambiguity and the second one a possible interpretation of it.
15
- The label indicates if the interpretation is correct (1) or not (0).
16
- """
17
-
18
- _HOMEPAGE = """https://cs.nyu.edu/~davise/papers/WinogradSchemas/WS.html"""
19
-
20
- # TODO: upload datasets to github
21
- _URL = "./"
22
- _TRAINING_FILE = "wnli-train-es.tsv"
23
- _DEV_FILE = "wnli-dev-es.tsv"
24
- _TEST_FILE = "wnli-test-shuffled-es.tsv"
25
-
26
-
27
- class WinogradConfig(datasets.BuilderConfig):
28
- """ Builder config for the Winograd-CA dataset """
29
-
30
- def __init__(self, **kwargs):
31
- """BuilderConfig for Winograd-CA.
32
- Args:
33
- **kwargs: keyword arguments forwarded to super.
34
- """
35
- super(WinogradConfig, self).__init__(**kwargs)
36
-
37
-
38
- class Winograd(datasets.GeneratorBasedBuilder):
39
- """ Winograd Dataset """
40
-
41
- BUILDER_CONFIGS = [
42
- WinogradConfig(
43
- name="winograd",
44
- version=datasets.Version("1.0.0"),
45
- description="Winograd dataset",
46
- ),
47
- ]
48
-
49
- def _info(self):
50
- return datasets.DatasetInfo(
51
- description=_DESCRIPTION,
52
- features=datasets.Features(
53
- {
54
- "sentence1": datasets.Value("string"),
55
- "sentence2": datasets.Value("string"),
56
- "label": datasets.features.ClassLabel
57
- (names=
58
- [
59
- "not_entailment",
60
- "entailment"
61
- ]
62
- ),
63
- }
64
- ),
65
- homepage=_HOMEPAGE,
66
- citation=_CITATION,
67
- )
68
-
69
- def _split_generators(self, dl_manager):
70
- """Returns SplitGenerators."""
71
- urls_to_download = {
72
- "train": f"{_URL}{_TRAINING_FILE}",
73
- "dev": f"{_URL}{_DEV_FILE}",
74
- "test": f"{_URL}{_TEST_FILE}",
75
- }
76
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
77
-
78
- return [
79
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
80
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
81
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
82
- ]
83
-
84
- def _generate_examples(self, filepath):
85
- """This function returns the examples in the raw (text) form."""
86
- logger.info("generating examples from = %s", filepath)
87
- with open(filepath, encoding="utf-8") as f:
88
- header = next(f)
89
- process_label = {'0': "not_entailment", '1': "entailment"}
90
- for id_, row in enumerate(f):
91
- if "label" in header:
92
- ref, sentence1, sentence2, score = row[:-1].split('\t')
93
- yield id_, {
94
- "sentence1": sentence1,
95
- "sentence2": sentence2,
96
- "label": process_label[score],
97
- }
98
- else:
99
- ref, sentence1, sentence2 = row.split('\t')
100
- yield id_, {
101
- "sentence1": sentence1,
102
- "sentence2": sentence2,
103
- "label": -1,
104
- }
105
-
106
-