albertvillanova HF staff commited on
Commit
f09a51f
1 Parent(s): 0ad984d

Delete loading script

Browse files
Files changed (1) hide show
  1. assin.py +0 -173
assin.py DELETED
@@ -1,173 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ASSIN dataset."""
16
-
17
-
18
- import xml.etree.ElementTree as ET
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """
24
- @inproceedings{fonseca2016assin,
25
- title={ASSIN: Avaliacao de similaridade semantica e inferencia textual},
26
- author={Fonseca, E and Santos, L and Criscuolo, Marcelo and Aluisio, S},
27
- booktitle={Computational Processing of the Portuguese Language-12th International Conference, Tomar, Portugal},
28
- pages={13--15},
29
- year={2016}
30
- }
31
- """
32
-
33
- _DESCRIPTION = """
34
- The ASSIN (Avaliação de Similaridade Semântica e INferência textual) corpus is a corpus annotated with pairs of sentences written in
35
- Portuguese that is suitable for the exploration of textual entailment and paraphrasing classifiers. The corpus contains pairs of sentences
36
- extracted from news articles written in European Portuguese (EP) and Brazilian Portuguese (BP), obtained from Google News Portugal
37
- and Brazil, respectively. To create the corpus, the authors started by collecting a set of news articles describing the
38
- same event (one news article from Google News Portugal and another from Google News Brazil) from Google News.
39
- Then, they employed Latent Dirichlet Allocation (LDA) models to retrieve pairs of similar sentences between sets of news
40
- articles that were grouped together around the same topic. For that, two LDA models were trained (for EP and for BP)
41
- on external and large-scale collections of unannotated news articles from Portuguese and Brazilian news providers, respectively.
42
- Then, the authors defined a lower and upper threshold for the sentence similarity score of the retrieved pairs of sentences,
43
- taking into account that high similarity scores correspond to sentences that contain almost the same content (paraphrase candidates),
44
- and low similarity scores correspond to sentences that are very different in content from each other (no-relation candidates).
45
- From the collection of pairs of sentences obtained at this stage, the authors performed some manual grammatical corrections
46
- and discarded some of the pairs wrongly retrieved. Furthermore, from a preliminary analysis made to the retrieved sentence pairs
47
- the authors noticed that the number of contradictions retrieved during the previous stage was very low. Additionally, they also
48
- noticed that event though paraphrases are not very frequent, they occur with some frequency in news articles. Consequently,
49
- in contrast with the majority of the currently available corpora for other languages, which consider as labels “neutral”, “entailment”
50
- and “contradiction” for the task of RTE, the authors of the ASSIN corpus decided to use as labels “none”, “entailment” and “paraphrase”.
51
- Finally, the manual annotation of pairs of sentences was performed by human annotators. At least four annotators were randomly
52
- selected to annotate each pair of sentences, which is done in two steps: (i) assigning a semantic similarity label (a score between 1 and 5,
53
- from unrelated to very similar); and (ii) providing an entailment label (one sentence entails the other, sentences are paraphrases,
54
- or no relation). Sentence pairs where at least three annotators do not agree on the entailment label were considered controversial
55
- and thus discarded from the gold standard annotations. The full dataset has 10,000 sentence pairs, half of which in Brazilian Portuguese
56
- and half in European Portuguese. Either language variant has 2,500 pairs for training, 500 for validation and 2,000 for testing.
57
- """
58
-
59
- _HOMEPAGE = "http://nilc.icmc.usp.br/assin/"
60
-
61
- _LICENSE = ""
62
-
63
- _URL = "http://nilc.icmc.usp.br/assin/assin.tar.gz"
64
-
65
-
66
- class Assin(datasets.GeneratorBasedBuilder):
67
- """ASSIN dataset."""
68
-
69
- VERSION = datasets.Version("1.0.0")
70
-
71
- BUILDER_CONFIGS = [
72
- datasets.BuilderConfig(
73
- name="full",
74
- version=VERSION,
75
- description="If you want to use all the ASSIN data (Brazilian Portuguese and European Portuguese)",
76
- ),
77
- datasets.BuilderConfig(
78
- name="ptpt",
79
- version=VERSION,
80
- description="If you want to use only the ASSIN European Portuguese subset",
81
- ),
82
- datasets.BuilderConfig(
83
- name="ptbr",
84
- version=VERSION,
85
- description="If you want to use only the ASSIN Brazilian Portuguese subset",
86
- ),
87
- ]
88
-
89
- DEFAULT_CONFIG_NAME = "full"
90
-
91
- def _info(self):
92
- features = datasets.Features(
93
- {
94
- "sentence_pair_id": datasets.Value("int64"),
95
- "premise": datasets.Value("string"),
96
- "hypothesis": datasets.Value("string"),
97
- "relatedness_score": datasets.Value("float32"),
98
- "entailment_judgment": datasets.features.ClassLabel(names=["NONE", "ENTAILMENT", "PARAPHRASE"]),
99
- }
100
- )
101
- return datasets.DatasetInfo(
102
- description=_DESCRIPTION,
103
- features=features,
104
- supervised_keys=None,
105
- homepage=_HOMEPAGE,
106
- license=_LICENSE,
107
- citation=_CITATION,
108
- )
109
-
110
- def _split_generators(self, dl_manager):
111
- """Returns SplitGenerators."""
112
- archive = dl_manager.download(_URL)
113
-
114
- train_paths = []
115
- dev_paths = []
116
- test_paths = []
117
-
118
- if self.config.name == "full" or self.config.name == "ptpt":
119
- train_paths.append("assin-ptpt-train.xml")
120
- dev_paths.append("assin-ptpt-dev.xml")
121
- test_paths.append("assin-ptpt-test.xml")
122
-
123
- if self.config.name == "full" or self.config.name == "ptbr":
124
- train_paths.append("assin-ptbr-train.xml")
125
- dev_paths.append("assin-ptbr-dev.xml")
126
- test_paths.append("assin-ptbr-test.xml")
127
-
128
- return [
129
- datasets.SplitGenerator(
130
- name=datasets.Split.TRAIN,
131
- gen_kwargs={
132
- "filepaths": train_paths,
133
- "files": dl_manager.iter_archive(archive),
134
- },
135
- ),
136
- datasets.SplitGenerator(
137
- name=datasets.Split.TEST,
138
- gen_kwargs={
139
- "filepaths": test_paths,
140
- "files": dl_manager.iter_archive(archive),
141
- },
142
- ),
143
- datasets.SplitGenerator(
144
- name=datasets.Split.VALIDATION,
145
- gen_kwargs={
146
- "filepaths": dev_paths,
147
- "files": dl_manager.iter_archive(archive),
148
- },
149
- ),
150
- ]
151
-
152
- def _generate_examples(self, filepaths, files):
153
- """Yields examples."""
154
-
155
- id_ = 0
156
-
157
- for path, f in files:
158
- if path in filepaths:
159
-
160
- tree = ET.parse(f)
161
- root = tree.getroot()
162
-
163
- for pair in root:
164
-
165
- yield id_, {
166
- "sentence_pair_id": int(pair.attrib.get("id")),
167
- "premise": pair.find(".//t").text,
168
- "hypothesis": pair.find(".//h").text,
169
- "relatedness_score": float(pair.attrib.get("similarity")),
170
- "entailment_judgment": pair.attrib.get("entailment").upper(),
171
- }
172
-
173
- id_ += 1