Datasets:

Modalities:
Text
Formats:
parquet
Sub-tasks:
extractive-qa
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
843ef53
1 Parent(s): 3b1697c

Delete loading script

Browse files
Files changed (1) hide show
  1. tydiqa.py +0 -268
tydiqa.py DELETED
@@ -1,268 +0,0 @@
1
- """TODO(tydiqa): Add a description here."""
2
-
3
-
4
- import json
5
- import textwrap
6
-
7
- import datasets
8
- from datasets.tasks import QuestionAnsweringExtractive
9
-
10
-
11
- # TODO(tydiqa): BibTeX citation
12
- _CITATION = """\
13
- @article{tydiqa,
14
- title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages},
15
- author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki}
16
- year = {2020},
17
- journal = {Transactions of the Association for Computational Linguistics}
18
- }
19
- """
20
-
21
- # TODO(tydiqa):
22
- _DESCRIPTION = """\
23
- TyDi QA is a question answering dataset covering 11 typologically diverse languages with 204K question-answer pairs.
24
- The languages of TyDi QA are diverse with regard to their typology -- the set of linguistic features that each language
25
- expresses -- such that we expect models performing well on this set to generalize across a large number of the languages
26
- in the world. It contains language phenomena that would not be found in English-only corpora. To provide a realistic
27
- information-seeking task and avoid priming effects, questions are written by people who want to know the answer, but
28
- don’t know the answer yet, (unlike SQuAD and its descendents) and the data is collected directly in each language without
29
- the use of translation (unlike MLQA and XQuAD).
30
- """
31
-
32
- _URL = "https://storage.googleapis.com/tydiqa/"
33
- _PRIMARY_URLS = {
34
- "train": _URL + "v1.0/tydiqa-v1.0-train.jsonl.gz",
35
- "dev": _URL + "v1.0/tydiqa-v1.0-dev.jsonl.gz",
36
- }
37
- _SECONDARY_URLS = {
38
- "train": _URL + "v1.1/tydiqa-goldp-v1.1-train.json",
39
- "dev": _URL + "v1.1/tydiqa-goldp-v1.1-dev.json",
40
- }
41
-
42
-
43
- class TydiqaConfig(datasets.BuilderConfig):
44
-
45
- """BuilderConfig for Tydiqa"""
46
-
47
- def __init__(self, **kwargs):
48
- """
49
-
50
- Args:
51
- **kwargs: keyword arguments forwarded to super.
52
- """
53
- super(TydiqaConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
54
-
55
-
56
- class Tydiqa(datasets.GeneratorBasedBuilder):
57
- """TODO(tydiqa): Short description of my dataset."""
58
-
59
- # TODO(tydiqa): Set up version.
60
- VERSION = datasets.Version("0.1.0")
61
- BUILDER_CONFIGS = [
62
- TydiqaConfig(
63
- name="primary_task",
64
- description=textwrap.dedent(
65
- """\
66
- Passage selection task (SelectP): Given a list of the passages in the article, return either (a) the index of
67
- the passage that answers the question or (b) NULL if no such passage exists.
68
- Minimal answer span task (MinSpan): Given the full text of an article, return one of (a) the start and end
69
- byte indices of the minimal span that completely answers the question; (b) YES or NO if the question requires
70
- a yes/no answer and we can draw a conclusion from the passage; (c) NULL if it is not possible to produce a
71
- minimal answer for this question."""
72
- ),
73
- ),
74
- TydiqaConfig(
75
- name="secondary_task",
76
- description=textwrap.dedent(
77
- """Gold passage task (GoldP): Given a passage that is guaranteed to contain the
78
- answer, predict the single contiguous span of characters that answers the question. This is more similar to
79
- existing reading comprehension datasets (as opposed to the information-seeking task outlined above).
80
- This task is constructed with two goals in mind: (1) more directly comparing with prior work and (2) providing
81
- a simplified way for researchers to use TyDi QA by providing compatibility with existing code for SQuAD 1.1,
82
- XQuAD, and MLQA. Toward these goals, the gold passage task differs from the primary task in several ways:
83
- only the gold answer passage is provided rather than the entire Wikipedia article;
84
- unanswerable questions have been discarded, similar to MLQA and XQuAD;
85
- we evaluate with the SQuAD 1.1 metrics like XQuAD; and
86
- Thai and Japanese are removed since the lack of whitespace breaks some tools.
87
- """
88
- ),
89
- ),
90
- ]
91
-
92
- def _info(self):
93
- # TODO(tydiqa): Specifies the datasets.DatasetInfo object
94
- if self.config.name == "primary_task":
95
- return datasets.DatasetInfo(
96
- # This is the description that will appear on the datasets page.
97
- description=_DESCRIPTION,
98
- # datasets.features.FeatureConnectors
99
- features=datasets.Features(
100
- {
101
- "passage_answer_candidates": datasets.features.Sequence(
102
- {
103
- "plaintext_start_byte": datasets.Value("int32"),
104
- "plaintext_end_byte": datasets.Value("int32"),
105
- }
106
- ),
107
- "question_text": datasets.Value("string"),
108
- "document_title": datasets.Value("string"),
109
- "language": datasets.Value("string"),
110
- "annotations": datasets.features.Sequence(
111
- {
112
- # 'annotation_id': datasets.Value('variant'),
113
- "passage_answer_candidate_index": datasets.Value("int32"),
114
- "minimal_answers_start_byte": datasets.Value("int32"),
115
- "minimal_answers_end_byte": datasets.Value("int32"),
116
- "yes_no_answer": datasets.Value("string"),
117
- }
118
- ),
119
- "document_plaintext": datasets.Value("string"),
120
- # 'example_id': datasets.Value('variant'),
121
- "document_url": datasets.Value("string")
122
- # These are the features of your dataset like images, labels ...
123
- }
124
- ),
125
- # If there's a common (input, target) tuple from the features,
126
- # specify them here. They'll be used if as_supervised=True in
127
- # builder.as_dataset.
128
- supervised_keys=None,
129
- # Homepage of the dataset for documentation
130
- homepage="https://github.com/google-research-datasets/tydiqa",
131
- citation=_CITATION,
132
- )
133
- elif self.config.name == "secondary_task":
134
- return datasets.DatasetInfo(
135
- description=_DESCRIPTION,
136
- features=datasets.Features(
137
- {
138
- "id": datasets.Value("string"),
139
- "title": datasets.Value("string"),
140
- "context": datasets.Value("string"),
141
- "question": datasets.Value("string"),
142
- "answers": datasets.features.Sequence(
143
- {
144
- "text": datasets.Value("string"),
145
- "answer_start": datasets.Value("int32"),
146
- }
147
- ),
148
- }
149
- ),
150
- # No default supervised_keys (as we have to pass both question
151
- # and context as input).
152
- supervised_keys=None,
153
- homepage="https://github.com/google-research-datasets/tydiqa",
154
- citation=_CITATION,
155
- task_templates=[
156
- QuestionAnsweringExtractive(
157
- question_column="question", context_column="context", answers_column="answers"
158
- )
159
- ],
160
- )
161
-
162
- def _split_generators(self, dl_manager):
163
- """Returns SplitGenerators."""
164
- # TODO(tydiqa): Downloads the data and defines the splits
165
- # dl_manager is a datasets.download.DownloadManager that can be used to
166
- # download and extract URLs
167
- primary_downloaded = dl_manager.download_and_extract(_PRIMARY_URLS)
168
- secondary_downloaded = dl_manager.download_and_extract(_SECONDARY_URLS)
169
- if self.config.name == "primary_task":
170
- return [
171
- datasets.SplitGenerator(
172
- name=datasets.Split.TRAIN,
173
- # These kwargs will be passed to _generate_examples
174
- gen_kwargs={"filepath": primary_downloaded["train"]},
175
- ),
176
- datasets.SplitGenerator(
177
- name=datasets.Split.VALIDATION,
178
- # These kwargs will be passed to _generate_examples
179
- gen_kwargs={"filepath": primary_downloaded["dev"]},
180
- ),
181
- ]
182
- elif self.config.name == "secondary_task":
183
- return [
184
- datasets.SplitGenerator(
185
- name=datasets.Split.TRAIN,
186
- # These kwargs will be passed to _generate_examples
187
- gen_kwargs={"filepath": secondary_downloaded["train"]},
188
- ),
189
- datasets.SplitGenerator(
190
- name=datasets.Split.VALIDATION,
191
- # These kwargs will be passed to _generate_examples
192
- gen_kwargs={"filepath": secondary_downloaded["dev"]},
193
- ),
194
- ]
195
-
196
- def _generate_examples(self, filepath):
197
- """Yields examples."""
198
- # TODO(tydiqa): Yields (key, example) tuples from the dataset
199
- if self.config.name == "primary_task":
200
- with open(filepath, encoding="utf-8") as f:
201
- for id_, row in enumerate(f):
202
- data = json.loads(row)
203
- passages = data["passage_answer_candidates"]
204
- end_byte = [passage["plaintext_end_byte"] for passage in passages]
205
- start_byte = [passage["plaintext_start_byte"] for passage in passages]
206
- title = data["document_title"]
207
- lang = data["language"]
208
- question = data["question_text"]
209
- annotations = data["annotations"]
210
- # annot_ids = [annotation["annotation_id"] for annotation in annotations]
211
- yes_no_answers = [annotation["yes_no_answer"] for annotation in annotations]
212
- min_answers_end_byte = [
213
- annotation["minimal_answer"]["plaintext_end_byte"] for annotation in annotations
214
- ]
215
- min_answers_start_byte = [
216
- annotation["minimal_answer"]["plaintext_start_byte"] for annotation in annotations
217
- ]
218
- passage_cand_answers = [
219
- annotation["passage_answer"]["candidate_index"] for annotation in annotations
220
- ]
221
- doc = data["document_plaintext"]
222
- # example_id = data["example_id"]
223
- url = data["document_url"]
224
- yield id_, {
225
- "passage_answer_candidates": {
226
- "plaintext_start_byte": start_byte,
227
- "plaintext_end_byte": end_byte,
228
- },
229
- "question_text": question,
230
- "document_title": title,
231
- "language": lang,
232
- "annotations": {
233
- # 'annotation_id': annot_ids,
234
- "passage_answer_candidate_index": passage_cand_answers,
235
- "minimal_answers_start_byte": min_answers_start_byte,
236
- "minimal_answers_end_byte": min_answers_end_byte,
237
- "yes_no_answer": yes_no_answers,
238
- },
239
- "document_plaintext": doc,
240
- # 'example_id': example_id,
241
- "document_url": url,
242
- }
243
- elif self.config.name == "secondary_task":
244
- with open(filepath, encoding="utf-8") as f:
245
- data = json.load(f)
246
- for article in data["data"]:
247
- title = article.get("title", "").strip()
248
- for paragraph in article["paragraphs"]:
249
- context = paragraph["context"].strip()
250
- for qa in paragraph["qas"]:
251
- question = qa["question"].strip()
252
- id_ = qa["id"]
253
-
254
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
255
- answers = [answer["text"].strip() for answer in qa["answers"]]
256
-
257
- # Features currently used are "context", "question", and "answers".
258
- # Others are extracted here for the ease of future expansions.
259
- yield id_, {
260
- "title": title,
261
- "context": context,
262
- "question": question,
263
- "id": id_,
264
- "answers": {
265
- "answer_start": answer_starts,
266
- "text": answers,
267
- },
268
- }