Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
da6d020
1 Parent(s): ca5c18f

Delete loading script

Browse files
Files changed (1) hide show
  1. quail.py +0 -142
quail.py DELETED
@@ -1,142 +0,0 @@
1
- import xml.etree.ElementTree as ET
2
-
3
- import datasets
4
-
5
-
6
- logger = datasets.logging.get_logger(__name__)
7
-
8
-
9
- _CITATION = """\
10
- @inproceedings{DBLP:conf/aaai/RogersKDR20,
11
- author = {Anna Rogers and
12
- Olga Kovaleva and
13
- Matthew Downey and
14
- Anna Rumshisky},
15
- title = {Getting Closer to {AI} Complete Question Answering: {A} Set of Prerequisite
16
- Real Tasks},
17
- booktitle = {The Thirty-Fourth {AAAI} Conference on Artificial Intelligence, {AAAI}
18
- 2020, The Thirty-Second Innovative Applications of Artificial Intelligence
19
- Conference, {IAAI} 2020, The Tenth {AAAI} Symposium on Educational
20
- Advances in Artificial Intelligence, {EAAI} 2020, New York, NY, USA,
21
- February 7-12, 2020},
22
- pages = {8722--8731},
23
- publisher = {{AAAI} Press},
24
- year = {2020},
25
- url = {https://aaai.org/ojs/index.php/AAAI/article/view/6398},
26
- timestamp = {Thu, 04 Jun 2020 13:18:48 +0200},
27
- biburl = {https://dblp.org/rec/conf/aaai/RogersKDR20.bib},
28
- bibsource = {dblp computer science bibliography, https://dblp.org}
29
- }
30
- """
31
-
32
- _DESCRIPTION = """\
33
- QuAIL is a reading comprehension dataset. \
34
- QuAIL contains 15K multi-choice questions in texts 300-350 tokens \
35
- long 4 domains (news, user stories, fiction, blogs).\
36
- QuAIL is balanced and annotated for question types.\
37
- """
38
-
39
-
40
- class QuailConfig(datasets.BuilderConfig):
41
- """BuilderConfig for QuAIL."""
42
-
43
- def __init__(self, **kwargs):
44
- """BuilderConfig for QuAIL.
45
- Args:
46
- **kwargs: keyword arguments forwarded to super.
47
- """
48
- super(QuailConfig, self).__init__(**kwargs)
49
-
50
-
51
- class Quail(datasets.GeneratorBasedBuilder):
52
- """QuAIL: The Stanford Question Answering Dataset. Version 1.1."""
53
-
54
- _CHALLENGE_SET = "https://raw.githubusercontent.com/text-machine-lab/quail/master/quail_v1.3/xml/randomized/quail_1.3_challenge_randomized.xml"
55
- _DEV_SET = "https://raw.githubusercontent.com/text-machine-lab/quail/master/quail_v1.3/xml/randomized/quail_1.3_dev_randomized.xml"
56
- _TRAIN_SET = "https://raw.githubusercontent.com/text-machine-lab/quail/master/quail_v1.3/xml/randomized/quail_1.3_train_randomized.xml"
57
-
58
- BUILDER_CONFIGS = [
59
- QuailConfig(
60
- name="quail",
61
- version=datasets.Version("1.3.0", ""),
62
- description="Quail dataset 1.3.0",
63
- ),
64
- ]
65
-
66
- def _info(self):
67
- return datasets.DatasetInfo(
68
- description=_DESCRIPTION,
69
- features=datasets.Features(
70
- {
71
- "id": datasets.Value("string"),
72
- "context_id": datasets.Value("string"),
73
- "question_id": datasets.Value("string"),
74
- "domain": datasets.Value("string"),
75
- "metadata": {
76
- "author": datasets.Value("string"),
77
- "title": datasets.Value("string"),
78
- "url": datasets.Value("string"),
79
- },
80
- "context": datasets.Value("string"),
81
- "question": datasets.Value("string"),
82
- "question_type": datasets.Value("string"),
83
- "answers": datasets.features.Sequence(
84
- datasets.Value("string"),
85
- ),
86
- "correct_answer_id": datasets.Value("int32"),
87
- }
88
- ),
89
- # No default supervised_keys (as we have to pass both question
90
- # and context as input).
91
- supervised_keys=None,
92
- homepage="https://text-machine-lab.github.io/blog/2020/quail/",
93
- citation=_CITATION,
94
- )
95
-
96
- def _split_generators(self, dl_manager):
97
- urls_to_download = {"train": self._TRAIN_SET, "dev": self._DEV_SET, "challenge": self._CHALLENGE_SET}
98
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
99
-
100
- return [
101
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
102
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
103
- datasets.SplitGenerator(name="challenge", gen_kwargs={"filepath": downloaded_files["challenge"]}),
104
- ]
105
-
106
- def _generate_examples(self, filepath):
107
- """This function returns the examples in the raw (text) form."""
108
- logger.info("generating examples from = %s", filepath)
109
- root = ET.parse(filepath).getroot()
110
- for text_tag in root.iterfind("text"):
111
- text_id = text_tag.get("id")
112
- domain = text_tag.get("domain")
113
- metadata_tag = text_tag.find("metadata")
114
- author = metadata_tag.find("author").text.strip()
115
- title = metadata_tag.find("title").text.strip()
116
- url = metadata_tag.find("url").text.strip()
117
- text_body = text_tag.find("text_body").text.strip()
118
- questions_tag = text_tag.find("questions")
119
- for q_tag in questions_tag.iterfind("q"):
120
- question_type = q_tag.get("type", None)
121
- question_text = q_tag.text.strip()
122
- question_id = q_tag.get("id")
123
- answers = []
124
- answer_id = None
125
- for i, a_tag in enumerate(q_tag.iterfind("a")):
126
- if a_tag.get("correct") == "True":
127
- answer_id = i
128
- answers.append(a_tag.text.strip())
129
-
130
- id_ = f"{text_id}_{question_id}"
131
- yield id_, {
132
- "id": id_,
133
- "context_id": text_id,
134
- "question_id": question_id,
135
- "question_type": question_type,
136
- "domain": domain,
137
- "metadata": {"author": author, "title": title, "url": url},
138
- "context": text_body,
139
- "question": question_text,
140
- "answers": answers,
141
- "correct_answer_id": answer_id,
142
- }