Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
pandas
License:

Convert dataset to Parquet

#3
by albertvillanova HF staff - opened
README.md CHANGED
@@ -1,15 +1,14 @@
1
  ---
2
  annotations_creators:
3
  - crowdsourced
4
- language:
5
- - en
6
  language_creators:
7
  - found
 
 
8
  license:
9
  - cc-by-nc-sa-4.0
10
  multilinguality:
11
  - monolingual
12
- pretty_name: Question Answering for Artificial Intelligence (QuAIL)
13
  size_categories:
14
  - 10K<n<100K
15
  source_datasets:
@@ -19,7 +18,9 @@ task_categories:
19
  task_ids:
20
  - multiple-choice-qa
21
  paperswithcode_id: quail
 
22
  dataset_info:
 
23
  features:
24
  - name: id
25
  dtype: string
@@ -47,19 +48,28 @@ dataset_info:
47
  sequence: string
48
  - name: correct_answer_id
49
  dtype: int32
50
- config_name: quail
51
  splits:
52
  - name: train
53
- num_bytes: 23432697
54
  num_examples: 10246
55
  - name: validation
56
- num_bytes: 4989579
57
  num_examples: 2164
58
  - name: challenge
59
- num_bytes: 1199840
60
  num_examples: 556
61
- download_size: 6402933
62
- dataset_size: 29622116
 
 
 
 
 
 
 
 
 
 
63
  ---
64
 
65
  # Dataset Card for "quail"
 
1
  ---
2
  annotations_creators:
3
  - crowdsourced
 
 
4
  language_creators:
5
  - found
6
+ language:
7
+ - en
8
  license:
9
  - cc-by-nc-sa-4.0
10
  multilinguality:
11
  - monolingual
 
12
  size_categories:
13
  - 10K<n<100K
14
  source_datasets:
 
18
  task_ids:
19
  - multiple-choice-qa
20
  paperswithcode_id: quail
21
+ pretty_name: Question Answering for Artificial Intelligence (QuAIL)
22
  dataset_info:
23
+ config_name: quail
24
  features:
25
  - name: id
26
  dtype: string
 
48
  sequence: string
49
  - name: correct_answer_id
50
  dtype: int32
 
51
  splits:
52
  - name: train
53
+ num_bytes: 23432601
54
  num_examples: 10246
55
  - name: validation
56
+ num_bytes: 4989531
57
  num_examples: 2164
58
  - name: challenge
59
+ num_bytes: 1199792
60
  num_examples: 556
61
+ download_size: 2286403
62
+ dataset_size: 29621924
63
+ configs:
64
+ - config_name: quail
65
+ data_files:
66
+ - split: train
67
+ path: quail/train-*
68
+ - split: validation
69
+ path: quail/validation-*
70
+ - split: challenge
71
+ path: quail/challenge-*
72
+ default: true
73
  ---
74
 
75
  # Dataset Card for "quail"
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"quail": {"description": "QuAIL is a reading comprehension dataset. QuAIL contains 15K multi-choice questions in texts 300-350 tokens long 4 domains (news, user stories, fiction, blogs).QuAIL is balanced and annotated for question types.", "citation": "@inproceedings{DBLP:conf/aaai/RogersKDR20,\n author = {Anna Rogers and\n Olga Kovaleva and\n Matthew Downey and\n Anna Rumshisky},\n title = {Getting Closer to {AI} Complete Question Answering: {A} Set of Prerequisite\n Real Tasks},\n booktitle = {The Thirty-Fourth {AAAI} Conference on Artificial Intelligence, {AAAI}\n 2020, The Thirty-Second Innovative Applications of Artificial Intelligence\n Conference, {IAAI} 2020, The Tenth {AAAI} Symposium on Educational\n Advances in Artificial Intelligence, {EAAI} 2020, New York, NY, USA,\n February 7-12, 2020},\n pages = {8722--8731},\n publisher = {{AAAI} Press},\n year = {2020},\n url = {https://aaai.org/ojs/index.php/AAAI/article/view/6398},\n timestamp = {Thu, 04 Jun 2020 13:18:48 +0200},\n biburl = {https://dblp.org/rec/conf/aaai/RogersKDR20.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n", "homepage": "https://text-machine-lab.github.io/blog/2020/quail/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "domain": {"dtype": "string", "id": null, "_type": "Value"}, "metadata": {"author": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_type": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "correct_answer_id": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "quail", "config_name": "quail", "version": {"version_str": "1.3.0", "description": "", "major": 1, "minor": 3, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23432697, "num_examples": 10246, "dataset_name": "quail"}, "validation": {"name": "validation", "num_bytes": 4989579, "num_examples": 2164, "dataset_name": "quail"}, "challenge": {"name": "challenge", "num_bytes": 1199840, "num_examples": 556, "dataset_name": "quail"}}, "download_checksums": {"https://raw.githubusercontent.com/text-machine-lab/quail/master/quail_v1.3/xml/randomized/quail_1.3_train_randomized.xml": {"num_bytes": 5064067, "checksum": "faf7849a4397485fc6134919b9ce55e40ca623915be86bce16eccfb6c4186fac"}, "https://raw.githubusercontent.com/text-machine-lab/quail/master/quail_v1.3/xml/randomized/quail_1.3_dev_randomized.xml": {"num_bytes": 1075073, "checksum": "e39b848db1a13533ee264c9eaa21a309aebf3c0394967848acb89b83ae96b8c4"}, "https://raw.githubusercontent.com/text-machine-lab/quail/master/quail_v1.3/xml/randomized/quail_1.3_challenge_randomized.xml": {"num_bytes": 263793, "checksum": "1e140e6e6b9b820e70a75a79f8bc111db6472fd0027bbcfb760e6841045478ae"}}, "download_size": 6402933, "post_processing_size": null, "dataset_size": 29622116, "size_in_bytes": 36025049}}
 
 
quail.py DELETED
@@ -1,142 +0,0 @@
1
- import xml.etree.ElementTree as ET
2
-
3
- import datasets
4
-
5
-
6
- logger = datasets.logging.get_logger(__name__)
7
-
8
-
9
- _CITATION = """\
10
- @inproceedings{DBLP:conf/aaai/RogersKDR20,
11
- author = {Anna Rogers and
12
- Olga Kovaleva and
13
- Matthew Downey and
14
- Anna Rumshisky},
15
- title = {Getting Closer to {AI} Complete Question Answering: {A} Set of Prerequisite
16
- Real Tasks},
17
- booktitle = {The Thirty-Fourth {AAAI} Conference on Artificial Intelligence, {AAAI}
18
- 2020, The Thirty-Second Innovative Applications of Artificial Intelligence
19
- Conference, {IAAI} 2020, The Tenth {AAAI} Symposium on Educational
20
- Advances in Artificial Intelligence, {EAAI} 2020, New York, NY, USA,
21
- February 7-12, 2020},
22
- pages = {8722--8731},
23
- publisher = {{AAAI} Press},
24
- year = {2020},
25
- url = {https://aaai.org/ojs/index.php/AAAI/article/view/6398},
26
- timestamp = {Thu, 04 Jun 2020 13:18:48 +0200},
27
- biburl = {https://dblp.org/rec/conf/aaai/RogersKDR20.bib},
28
- bibsource = {dblp computer science bibliography, https://dblp.org}
29
- }
30
- """
31
-
32
- _DESCRIPTION = """\
33
- QuAIL is a reading comprehension dataset. \
34
- QuAIL contains 15K multi-choice questions in texts 300-350 tokens \
35
- long 4 domains (news, user stories, fiction, blogs).\
36
- QuAIL is balanced and annotated for question types.\
37
- """
38
-
39
-
40
- class QuailConfig(datasets.BuilderConfig):
41
- """BuilderConfig for QuAIL."""
42
-
43
- def __init__(self, **kwargs):
44
- """BuilderConfig for QuAIL.
45
- Args:
46
- **kwargs: keyword arguments forwarded to super.
47
- """
48
- super(QuailConfig, self).__init__(**kwargs)
49
-
50
-
51
- class Quail(datasets.GeneratorBasedBuilder):
52
- """QuAIL: The Stanford Question Answering Dataset. Version 1.1."""
53
-
54
- _CHALLENGE_SET = "https://raw.githubusercontent.com/text-machine-lab/quail/master/quail_v1.3/xml/randomized/quail_1.3_challenge_randomized.xml"
55
- _DEV_SET = "https://raw.githubusercontent.com/text-machine-lab/quail/master/quail_v1.3/xml/randomized/quail_1.3_dev_randomized.xml"
56
- _TRAIN_SET = "https://raw.githubusercontent.com/text-machine-lab/quail/master/quail_v1.3/xml/randomized/quail_1.3_train_randomized.xml"
57
-
58
- BUILDER_CONFIGS = [
59
- QuailConfig(
60
- name="quail",
61
- version=datasets.Version("1.3.0", ""),
62
- description="Quail dataset 1.3.0",
63
- ),
64
- ]
65
-
66
- def _info(self):
67
- return datasets.DatasetInfo(
68
- description=_DESCRIPTION,
69
- features=datasets.Features(
70
- {
71
- "id": datasets.Value("string"),
72
- "context_id": datasets.Value("string"),
73
- "question_id": datasets.Value("string"),
74
- "domain": datasets.Value("string"),
75
- "metadata": {
76
- "author": datasets.Value("string"),
77
- "title": datasets.Value("string"),
78
- "url": datasets.Value("string"),
79
- },
80
- "context": datasets.Value("string"),
81
- "question": datasets.Value("string"),
82
- "question_type": datasets.Value("string"),
83
- "answers": datasets.features.Sequence(
84
- datasets.Value("string"),
85
- ),
86
- "correct_answer_id": datasets.Value("int32"),
87
- }
88
- ),
89
- # No default supervised_keys (as we have to pass both question
90
- # and context as input).
91
- supervised_keys=None,
92
- homepage="https://text-machine-lab.github.io/blog/2020/quail/",
93
- citation=_CITATION,
94
- )
95
-
96
- def _split_generators(self, dl_manager):
97
- urls_to_download = {"train": self._TRAIN_SET, "dev": self._DEV_SET, "challenge": self._CHALLENGE_SET}
98
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
99
-
100
- return [
101
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
102
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
103
- datasets.SplitGenerator(name="challenge", gen_kwargs={"filepath": downloaded_files["challenge"]}),
104
- ]
105
-
106
- def _generate_examples(self, filepath):
107
- """This function returns the examples in the raw (text) form."""
108
- logger.info("generating examples from = %s", filepath)
109
- root = ET.parse(filepath).getroot()
110
- for text_tag in root.iterfind("text"):
111
- text_id = text_tag.get("id")
112
- domain = text_tag.get("domain")
113
- metadata_tag = text_tag.find("metadata")
114
- author = metadata_tag.find("author").text.strip()
115
- title = metadata_tag.find("title").text.strip()
116
- url = metadata_tag.find("url").text.strip()
117
- text_body = text_tag.find("text_body").text.strip()
118
- questions_tag = text_tag.find("questions")
119
- for q_tag in questions_tag.iterfind("q"):
120
- question_type = q_tag.get("type", None)
121
- question_text = q_tag.text.strip()
122
- question_id = q_tag.get("id")
123
- answers = []
124
- answer_id = None
125
- for i, a_tag in enumerate(q_tag.iterfind("a")):
126
- if a_tag.get("correct") == "True":
127
- answer_id = i
128
- answers.append(a_tag.text.strip())
129
-
130
- id_ = f"{text_id}_{question_id}"
131
- yield id_, {
132
- "id": id_,
133
- "context_id": text_id,
134
- "question_id": question_id,
135
- "question_type": question_type,
136
- "domain": domain,
137
- "metadata": {"author": author, "title": title, "url": url},
138
- "context": text_body,
139
- "question": question_text,
140
- "answers": answers,
141
- "correct_answer_id": answer_id,
142
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
quail/challenge-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe47785617f8248c27318282891a55b4cebf8bd5f3fef13f1dbd107c2bfb5797
3
+ size 96956
quail/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e760cd0359e3aa59444bc8e551864d4fa518e06987380dabd50eed49179091ee
3
+ size 1791229
quail/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8ba2dd760c9c53a2642de0d8213ee66b6d252d5c279f0f0ac8e06602bf20bf1
3
+ size 398218