Datasets:

Modalities:
Text
Libraries:
Datasets
License:
afaji commited on
Commit
4b8ca92
1 Parent(s): 44ab977

Mintaka first upload

Browse files
Files changed (2) hide show
  1. mintaka.py +178 -0
  2. test_mintaka.py +16 -0
mintaka.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ """Mintaka: A Complex, Natural, and Multilingual Dataset for End-to-End Question Answering"""
4
+
5
+ import json
6
+ import datasets
7
+
8
+ logger = datasets.logging.get_logger(__name__)
9
+
10
+ _DESCRIPTION = """\
11
+ Mintaka is a complex, natural, and multilingual dataset designed for experimenting with end-to-end
12
+ question-answering models. Mintaka is composed of 20,000 question-answer pairs collected in English,
13
+ annotated with Wikidata entities, and translated into Arabic, French, German, Hindi, Italian,
14
+ Japanese, Portuguese, and Spanish for a total of 180,000 samples.
15
+ Mintaka includes 8 types of complex questions, including superlative, intersection, and multi-hop questions,
16
+ which were naturally elicited from crowd workers.
17
+ """
18
+
19
+ _CITATION = """\
20
+ @inproceedings{sen-etal-2022-mintaka,
21
+ title = "Mintaka: A Complex, Natural, and Multilingual Dataset for End-to-End Question Answering",
22
+ author = "Sen, Priyanka and
23
+ Aji, Alham Fikri and
24
+ Saffari, Amir",
25
+ booktitle = "Proceedings of the 29th International Conference on Computational Linguistics",
26
+ month = oct,
27
+ year = "2022",
28
+ address = "Gyeongju, Republic of Korea",
29
+ publisher = "International Committee on Computational Linguistics",
30
+ url = "https://aclanthology.org/2022.coling-1.138",
31
+ pages = "1604--1619"
32
+ }
33
+ """
34
+
35
+ _LICENSE = """\
36
+ Copyright Amazon.com Inc. or its affiliates.
37
+ Attribution 4.0 International
38
+ """
39
+
40
+ _TRAIN_URL = "https://raw.githubusercontent.com/amazon-science/mintaka/main/data/mintaka_train.json"
41
+ _DEV_URL = "https://raw.githubusercontent.com/amazon-science/mintaka/main/data/mintaka_dev.json"
42
+ _TEST_URL = "https://raw.githubusercontent.com/amazon-science/mintaka/main/data/mintaka_test.json"
43
+
44
+
45
+ _LANGUAGES = ['en', 'ar', 'de', 'ja', 'hi', 'pt', 'es', 'it', 'fr']
46
+
47
+ _ALL = "all"
48
+
49
+ class Mintaka(datasets.GeneratorBasedBuilder):
50
+ """Mintaka: A Complex, Natural, and Multilingual Dataset for End-to-End Question Answering"""
51
+
52
+ BUILDER_CONFIGS = [
53
+ datasets.BuilderConfig(
54
+ name = name,
55
+ version = datasets.Version("1.0.0"),
56
+ description = f"Mintaka: A Complex, Natural, and Multilingual Dataset for End-to-End Question Answering for {name}",
57
+ ) for name in _LANGUAGES
58
+ ]
59
+
60
+ BUILDER_CONFIGS.append(datasets.BuilderConfig(
61
+ name = _ALL,
62
+ version = datasets.Version("1.0.0"),
63
+ description = f"Mintaka: A Complex, Natural, and Multilingual Dataset for End-to-End Question Answering",
64
+ ))
65
+
66
+ DEFAULT_CONFIG_NAME = 'en'
67
+
68
+ def _info(self):
69
+ return datasets.DatasetInfo(
70
+ description=_DESCRIPTION,
71
+ features=datasets.Features(
72
+ {
73
+ "id": datasets.Value("string"),
74
+ "lang": datasets.Value("string"),
75
+ "question": datasets.Value("string"),
76
+ "answerText": datasets.Value("string"),
77
+ "category": datasets.Value("string"),
78
+ "complexityType": datasets.Value("string"),
79
+ "questionEntity": [{
80
+ "name": datasets.Value("string"),
81
+ "entityType": datasets.Value("string"),
82
+ "label": datasets.Value("string"),
83
+ "mention": datasets.Value("string"),
84
+ "span": [datasets.Value("int32")],
85
+ }],
86
+ "answerEntity": [{
87
+ "name": datasets.Value("string"),
88
+ "label": datasets.Value("string"),
89
+ }]
90
+ },
91
+ ),
92
+ supervised_keys=None,
93
+ citation=_CITATION,
94
+ license=_LICENSE,
95
+ )
96
+
97
+ def _split_generators(self, dl_manager):
98
+ return [
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TRAIN,
101
+ gen_kwargs={
102
+ "file": dl_manager.download_and_extract(_TRAIN_URL),
103
+ "lang": self.config.name,
104
+ }
105
+ ),
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.VALIDATION,
108
+ gen_kwargs={
109
+ "file": dl_manager.download_and_extract(_DEV_URL),
110
+ "lang": self.config.name,
111
+ }
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TEST,
115
+ gen_kwargs={
116
+ "file": dl_manager.download_and_extract(_TEST_URL),
117
+ "lang": self.config.name,
118
+ }
119
+ ),
120
+ ]
121
+
122
+ def _generate_examples(self, file, lang):
123
+ if lang == _ALL:
124
+ langs = _LANGUAGES
125
+ else:
126
+ langs = [lang]
127
+
128
+ key_ = 0
129
+
130
+ logger.info("⏳ Generating examples from = %s", ", ".join(lang))
131
+
132
+ with open(file, encoding='utf-8') as json_file:
133
+ data = json.load(json_file)
134
+ for sample in data:
135
+ for lang in langs:
136
+ questionEntity = [
137
+ {
138
+ "name": str(qe["name"]),
139
+ "entityType": qe["entityType"],
140
+ "label": qe["label"] if "label" in qe else "",
141
+ "mention": qe["mention"],
142
+ "span": qe["span"],
143
+ } for qe in sample["questionEntity"]
144
+ ]
145
+
146
+ answers = []
147
+ if sample['answer']["answerType"] == "entity" and sample['answer']['answer'] is not None:
148
+ answers = sample['answer']['answer']
149
+ elif sample['answer']["answerType"] == "numerical" and "supportingEnt" in sample["answer"]:
150
+ answers = sample['answer']['supportingEnt']
151
+
152
+ def get_label(labels, lang):
153
+ if lang in labels:
154
+ return labels[lang]
155
+ if 'en' in labels:
156
+ return labels['en']
157
+ return ""
158
+
159
+ answerEntity = [
160
+ {
161
+ "name": str(ae["name"]),
162
+ "label": get_label(ae["label"], lang),
163
+ } for ae in answers
164
+ ]
165
+
166
+ yield key_, {
167
+ "id": sample["id"],
168
+ "lang": lang,
169
+ "question": sample["question"] if lang == 'en' else sample['translations'][lang],
170
+ "answerText": sample["answer"]["mention"],
171
+ "category": sample["category"],
172
+ "complexityType": sample["complexityType"],
173
+ "questionEntity": questionEntity,
174
+ "answerEntity": answerEntity,
175
+
176
+ }
177
+
178
+ key_ += 1
test_mintaka.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ source = "AmazonScience/mintaka"
4
+
5
+ #dataset = load_dataset(source, "all", download_mode="force_redownload")
6
+ dataset = load_dataset(source, "all")
7
+
8
+ print(dataset)
9
+ print(dataset["train"][0])
10
+ print(dataset["train"][0:10]['question'])
11
+
12
+
13
+ dataset = load_dataset(source, "en")
14
+ dataset = load_dataset(source, "ar")
15
+
16
+