Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
albertvillanova HF staff commited on
Commit
92f7ed7
1 Parent(s): 86db073

Delete loading script

Browse files
Files changed (1) hide show
  1. ms_marco.py +0 -204
ms_marco.py DELETED
@@ -1,204 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """MS MARCO dataset."""
18
-
19
-
20
- import json
21
-
22
- import datasets
23
-
24
-
25
- _CITATION = """
26
- @article{DBLP:journals/corr/NguyenRSGTMD16,
27
- author = {Tri Nguyen and
28
- Mir Rosenberg and
29
- Xia Song and
30
- Jianfeng Gao and
31
- Saurabh Tiwary and
32
- Rangan Majumder and
33
- Li Deng},
34
- title = {{MS} {MARCO:} {A} Human Generated MAchine Reading COmprehension Dataset},
35
- journal = {CoRR},
36
- volume = {abs/1611.09268},
37
- year = {2016},
38
- url = {http://arxiv.org/abs/1611.09268},
39
- archivePrefix = {arXiv},
40
- eprint = {1611.09268},
41
- timestamp = {Mon, 13 Aug 2018 16:49:03 +0200},
42
- biburl = {https://dblp.org/rec/journals/corr/NguyenRSGTMD16.bib},
43
- bibsource = {dblp computer science bibliography, https://dblp.org}
44
- }
45
- }
46
- """
47
-
48
- _DESCRIPTION = """
49
- Starting with a paper released at NIPS 2016, MS MARCO is a collection of datasets focused on deep learning in search.
50
-
51
- The first dataset was a question answering dataset featuring 100,000 real Bing questions and a human generated answer.
52
- Since then we released a 1,000,000 question dataset, a natural langauge generation dataset, a passage ranking dataset,
53
- keyphrase extraction dataset, crawling dataset, and a conversational search.
54
-
55
- There have been 277 submissions. 20 KeyPhrase Extraction submissions, 87 passage ranking submissions, 0 document ranking
56
- submissions, 73 QnA V2 submissions, 82 NLGEN submisions, and 15 QnA V1 submissions
57
-
58
- This data comes in three tasks/forms: Original QnA dataset(v1.1), Question Answering(v2.1), Natural Language Generation(v2.1).
59
-
60
- The original question answering datset featured 100,000 examples and was released in 2016. Leaderboard is now closed but data is availible below.
61
-
62
- The current competitive tasks are Question Answering and Natural Language Generation. Question Answering features over 1,000,000 queries and
63
- is much like the original QnA dataset but bigger and with higher quality. The Natural Language Generation dataset features 180,000 examples and
64
- builds upon the QnA dataset to deliver answers that could be spoken by a smart speaker.
65
-
66
- """
67
- _V2_URLS = {
68
- "train": "https://msmarco.blob.core.windows.net/msmarco/train_v2.1.json.gz",
69
- "dev": "https://msmarco.blob.core.windows.net/msmarco/dev_v2.1.json.gz",
70
- "test": "https://msmarco.blob.core.windows.net/msmarco/eval_v2.1_public.json.gz",
71
- }
72
-
73
- _V1_URLS = {
74
- "train": "https://msmarco.blob.core.windows.net/msmsarcov1/train_v1.1.json.gz",
75
- "dev": "https://msmarco.blob.core.windows.net/msmsarcov1/dev_v1.1.json.gz",
76
- "test": "https://msmarco.blob.core.windows.net/msmsarcov1/test_hidden_v1.1.json",
77
- }
78
-
79
-
80
- class MsMarcoConfig(datasets.BuilderConfig):
81
- """BuilderConfig for MS MARCO."""
82
-
83
- def __init__(self, **kwargs):
84
- """BuilderConfig for MS MARCO
85
-
86
- Args:
87
- **kwargs: keyword arguments forwarded to super.
88
- """
89
- super(MsMarcoConfig, self).__init__(**kwargs)
90
-
91
-
92
- class MsMarco(datasets.GeneratorBasedBuilder):
93
-
94
- BUILDER_CONFIGS = [
95
- MsMarcoConfig(
96
- name="v1.1",
97
- description="""version v1.1""",
98
- version=datasets.Version("1.1.0", ""),
99
- ),
100
- MsMarcoConfig(
101
- name="v2.1",
102
- description="""version v2.1""",
103
- version=datasets.Version("2.1.0", ""),
104
- ),
105
- ]
106
-
107
- def _info(self):
108
- return datasets.DatasetInfo(
109
- description=_DESCRIPTION + "\n" + self.config.description,
110
- features=datasets.Features(
111
- {
112
- "answers": datasets.features.Sequence(datasets.Value("string")),
113
- "passages": datasets.features.Sequence(
114
- {
115
- "is_selected": datasets.Value("int32"),
116
- "passage_text": datasets.Value("string"),
117
- "url": datasets.Value("string"),
118
- }
119
- ),
120
- "query": datasets.Value("string"),
121
- "query_id": datasets.Value("int32"),
122
- "query_type": datasets.Value("string"),
123
- "wellFormedAnswers": datasets.features.Sequence(datasets.Value("string")),
124
- }
125
- ),
126
- homepage="https://microsoft.github.io/msmarco/",
127
- citation=_CITATION,
128
- )
129
-
130
- def _split_generators(self, dl_manager):
131
- """Returns SplitGenerators."""
132
- if self.config.name == "v2.1":
133
- dl_path = dl_manager.download_and_extract(_V2_URLS)
134
- else:
135
- dl_path = dl_manager.download_and_extract(_V1_URLS)
136
- return [
137
- datasets.SplitGenerator(
138
- name=datasets.Split.VALIDATION,
139
- gen_kwargs={"filepath": dl_path["dev"]},
140
- ),
141
- datasets.SplitGenerator(
142
- name=datasets.Split.TRAIN,
143
- gen_kwargs={"filepath": dl_path["train"]},
144
- ),
145
- datasets.SplitGenerator(
146
- name=datasets.Split.TEST,
147
- gen_kwargs={"filepath": dl_path["test"]},
148
- ),
149
- ]
150
-
151
- def _generate_examples(self, filepath):
152
- """Yields examples."""
153
- with open(filepath, encoding="utf-8") as f:
154
- if self.config.name == "v2.1":
155
- data = json.load(f)
156
- questions = data["query"]
157
- answers = data.get("answers", {})
158
- passages = data["passages"]
159
- query_ids = data["query_id"]
160
- query_types = data["query_type"]
161
- wellFormedAnswers = data.get("wellFormedAnswers", {})
162
- for key in questions:
163
-
164
- is_selected = [passage.get("is_selected", -1) for passage in passages[key]]
165
- passage_text = [passage["passage_text"] for passage in passages[key]]
166
- urls = [passage["url"] for passage in passages[key]]
167
- question = questions[key]
168
- answer = answers.get(key, [])
169
- query_id = query_ids[key]
170
- query_type = query_types[key]
171
- wellFormedAnswer = wellFormedAnswers.get(key, [])
172
- if wellFormedAnswer == "[]":
173
- wellFormedAnswer = []
174
- yield query_id, {
175
- "answers": answer,
176
- "passages": {"is_selected": is_selected, "passage_text": passage_text, "url": urls},
177
- "query": question,
178
- "query_id": query_id,
179
- "query_type": query_type,
180
- "wellFormedAnswers": wellFormedAnswer,
181
- }
182
- if self.config.name == "v1.1":
183
- for row in f:
184
- data = json.loads(row)
185
- question = data["query"]
186
- answer = data.get("answers", [])
187
- passages = data["passages"]
188
- query_id = data["query_id"]
189
- query_type = data["query_type"]
190
- wellFormedAnswer = data.get("wellFormedAnswers", [])
191
-
192
- is_selected = [passage.get("is_selected", -1) for passage in passages]
193
- passage_text = [passage["passage_text"] for passage in passages]
194
- urls = [passage["url"] for passage in passages]
195
- if wellFormedAnswer == "[]":
196
- wellFormedAnswer = []
197
- yield query_id, {
198
- "answers": answer,
199
- "passages": {"is_selected": is_selected, "passage_text": passage_text, "url": urls},
200
- "query": question,
201
- "query_id": query_id,
202
- "query_type": query_type,
203
- "wellFormedAnswers": wellFormedAnswer,
204
- }