Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Sub-tasks:
open-domain-qa
Languages:
English
Size:
10K - 100K
ArXiv:
License:
Commit
•
94630fe
1
Parent(s):
2cb02c2
Convert dataset to Parquet (#4)
Browse files- Convert dataset to Parquet (34986e86317f29c06cc232315de682a88959f89f)
- Delete loading script (04646bb1beff89c6f31e7c0be00303f9370661d4)
- Delete legacy dataset_infos.json (39d3baf7e6177071430f84335a7085e2f429ff9e)
- README.md +15 -6
- commonsense_qa.py +0 -102
- data/test-00000-of-00001.parquet +3 -0
- data/train-00000-of-00001.parquet +3 -0
- data/validation-00000-of-00001.parquet +3 -0
- dataset_infos.json +0 -1
README.md
CHANGED
@@ -9,7 +9,6 @@ license:
|
|
9 |
- mit
|
10 |
multilinguality:
|
11 |
- monolingual
|
12 |
-
pretty_name: CommonsenseQA
|
13 |
size_categories:
|
14 |
- 1K<n<10K
|
15 |
source_datasets:
|
@@ -19,6 +18,7 @@ task_categories:
|
|
19 |
task_ids:
|
20 |
- open-domain-qa
|
21 |
paperswithcode_id: commonsenseqa
|
|
|
22 |
dataset_info:
|
23 |
features:
|
24 |
- name: id
|
@@ -37,16 +37,25 @@ dataset_info:
|
|
37 |
dtype: string
|
38 |
splits:
|
39 |
- name: train
|
40 |
-
num_bytes:
|
41 |
num_examples: 9741
|
42 |
- name: validation
|
43 |
-
num_bytes:
|
44 |
num_examples: 1221
|
45 |
- name: test
|
46 |
-
num_bytes:
|
47 |
num_examples: 1140
|
48 |
-
download_size:
|
49 |
-
dataset_size:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
---
|
51 |
|
52 |
# Dataset Card for "commonsense_qa"
|
|
|
9 |
- mit
|
10 |
multilinguality:
|
11 |
- monolingual
|
|
|
12 |
size_categories:
|
13 |
- 1K<n<10K
|
14 |
source_datasets:
|
|
|
18 |
task_ids:
|
19 |
- open-domain-qa
|
20 |
paperswithcode_id: commonsenseqa
|
21 |
+
pretty_name: CommonsenseQA
|
22 |
dataset_info:
|
23 |
features:
|
24 |
- name: id
|
|
|
37 |
dtype: string
|
38 |
splits:
|
39 |
- name: train
|
40 |
+
num_bytes: 2207794
|
41 |
num_examples: 9741
|
42 |
- name: validation
|
43 |
+
num_bytes: 273848
|
44 |
num_examples: 1221
|
45 |
- name: test
|
46 |
+
num_bytes: 257842
|
47 |
num_examples: 1140
|
48 |
+
download_size: 1558570
|
49 |
+
dataset_size: 2739484
|
50 |
+
configs:
|
51 |
+
- config_name: default
|
52 |
+
data_files:
|
53 |
+
- split: train
|
54 |
+
path: data/train-*
|
55 |
+
- split: validation
|
56 |
+
path: data/validation-*
|
57 |
+
- split: test
|
58 |
+
path: data/test-*
|
59 |
---
|
60 |
|
61 |
# Dataset Card for "commonsense_qa"
|
commonsense_qa.py
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
"""CommonsenseQA dataset."""
|
2 |
-
|
3 |
-
|
4 |
-
import json
|
5 |
-
|
6 |
-
import datasets
|
7 |
-
|
8 |
-
|
9 |
-
_HOMEPAGE = "https://www.tau-nlp.org/commonsenseqa"
|
10 |
-
|
11 |
-
_DESCRIPTION = """\
|
12 |
-
CommonsenseQA is a new multiple-choice question answering dataset that requires different types of commonsense knowledge
|
13 |
-
to predict the correct answers . It contains 12,102 questions with one correct answer and four distractor answers.
|
14 |
-
The dataset is provided in two major training/validation/testing set splits: "Random split" which is the main evaluation
|
15 |
-
split, and "Question token split", see paper for details.
|
16 |
-
"""
|
17 |
-
|
18 |
-
_CITATION = """\
|
19 |
-
@inproceedings{talmor-etal-2019-commonsenseqa,
|
20 |
-
title = "{C}ommonsense{QA}: A Question Answering Challenge Targeting Commonsense Knowledge",
|
21 |
-
author = "Talmor, Alon and
|
22 |
-
Herzig, Jonathan and
|
23 |
-
Lourie, Nicholas and
|
24 |
-
Berant, Jonathan",
|
25 |
-
booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)",
|
26 |
-
month = jun,
|
27 |
-
year = "2019",
|
28 |
-
address = "Minneapolis, Minnesota",
|
29 |
-
publisher = "Association for Computational Linguistics",
|
30 |
-
url = "https://aclanthology.org/N19-1421",
|
31 |
-
doi = "10.18653/v1/N19-1421",
|
32 |
-
pages = "4149--4158",
|
33 |
-
archivePrefix = "arXiv",
|
34 |
-
eprint = "1811.00937",
|
35 |
-
primaryClass = "cs",
|
36 |
-
}
|
37 |
-
"""
|
38 |
-
|
39 |
-
_URL = "https://s3.amazonaws.com/commensenseqa"
|
40 |
-
_URLS = {
|
41 |
-
"train": f"{_URL}/train_rand_split.jsonl",
|
42 |
-
"validation": f"{_URL}/dev_rand_split.jsonl",
|
43 |
-
"test": f"{_URL}/test_rand_split_no_answers.jsonl",
|
44 |
-
}
|
45 |
-
|
46 |
-
|
47 |
-
class CommonsenseQa(datasets.GeneratorBasedBuilder):
|
48 |
-
"""CommonsenseQA dataset."""
|
49 |
-
|
50 |
-
VERSION = datasets.Version("1.0.0")
|
51 |
-
|
52 |
-
def _info(self):
|
53 |
-
features = datasets.Features(
|
54 |
-
{
|
55 |
-
"id": datasets.Value("string"),
|
56 |
-
"question": datasets.Value("string"),
|
57 |
-
"question_concept": datasets.Value("string"),
|
58 |
-
"choices": datasets.features.Sequence(
|
59 |
-
{
|
60 |
-
"label": datasets.Value("string"),
|
61 |
-
"text": datasets.Value("string"),
|
62 |
-
}
|
63 |
-
),
|
64 |
-
"answerKey": datasets.Value("string"),
|
65 |
-
}
|
66 |
-
)
|
67 |
-
return datasets.DatasetInfo(
|
68 |
-
description=_DESCRIPTION,
|
69 |
-
features=features,
|
70 |
-
homepage=_HOMEPAGE,
|
71 |
-
citation=_CITATION,
|
72 |
-
)
|
73 |
-
|
74 |
-
def _split_generators(self, dl_manager):
|
75 |
-
"""Returns SplitGenerators."""
|
76 |
-
filepaths = dl_manager.download_and_extract(_URLS)
|
77 |
-
splits = [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
|
78 |
-
return [
|
79 |
-
datasets.SplitGenerator(
|
80 |
-
name=split,
|
81 |
-
gen_kwargs={
|
82 |
-
"filepath": filepaths[split],
|
83 |
-
},
|
84 |
-
)
|
85 |
-
for split in splits
|
86 |
-
]
|
87 |
-
|
88 |
-
def _generate_examples(self, filepath):
|
89 |
-
"""Yields examples."""
|
90 |
-
with open(filepath, encoding="utf-8") as f:
|
91 |
-
for uid, row in enumerate(f):
|
92 |
-
data = json.loads(row)
|
93 |
-
choices = data["question"]["choices"]
|
94 |
-
labels = [label["label"] for label in choices]
|
95 |
-
texts = [text["text"] for text in choices]
|
96 |
-
yield uid, {
|
97 |
-
"id": data["id"],
|
98 |
-
"question": data["question"]["stem"],
|
99 |
-
"question_concept": data["question"]["question_concept"],
|
100 |
-
"choices": {"label": labels, "text": texts},
|
101 |
-
"answerKey": data.get("answerKey", ""),
|
102 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:19efe93223d1712397aaa44b3adc07f4fa50349206a611ca4f33afbec661fa5e
|
3 |
+
size 151227
|
data/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b0449767ed986bfc2ca52b1244a46ef12f732756727f3cb0a4ab69ac8b3d282b
|
3 |
+
size 1247103
|
data/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bdbd9bf9cc4d2349b24901038b2ab2f58e10e4e507ad2fd425dca55cd3cb6660
|
3 |
+
size 160240
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"default": {"description": "CommonsenseQA is a new multiple-choice question answering dataset that requires different types of commonsense knowledge\nto predict the correct answers . It contains 12,102 questions with one correct answer and four distractor answers.\nThe dataset is provided in two major training/validation/testing set splits: \"Random split\" which is the main evaluation\nsplit, and \"Question token split\", see paper for details.\n", "citation": "@inproceedings{talmor-etal-2019-commonsenseqa,\n title = \"{C}ommonsense{QA}: A Question Answering Challenge Targeting Commonsense Knowledge\",\n author = \"Talmor, Alon and\n Herzig, Jonathan and\n Lourie, Nicholas and\n Berant, Jonathan\",\n booktitle = \"Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)\",\n month = jun,\n year = \"2019\",\n address = \"Minneapolis, Minnesota\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/N19-1421\",\n doi = \"10.18653/v1/N19-1421\",\n pages = \"4149--4158\",\n archivePrefix = \"arXiv\",\n eprint = \"1811.00937\",\n primaryClass = \"cs\",\n}\n", "homepage": "https://www.tau-nlp.org/commonsenseqa", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "question_concept": {"dtype": "string", "id": null, "_type": "Value"}, "choices": {"feature": {"label": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answerKey": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "commonsense_qa", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2209044, "num_examples": 9741, "dataset_name": "commonsense_qa"}, "validation": {"name": "validation", "num_bytes": 274033, "num_examples": 1221, "dataset_name": "commonsense_qa"}, "test": {"name": "test", "num_bytes": 258017, "num_examples": 1140, "dataset_name": "commonsense_qa"}}, "download_checksums": {"https://s3.amazonaws.com/commensenseqa/train_rand_split.jsonl": {"num_bytes": 3785890, "checksum": "58ffa3c8472410e24b8c43f423d89c8a003d8284698a6ed7874355dedd09a2fb"}, "https://s3.amazonaws.com/commensenseqa/dev_rand_split.jsonl": {"num_bytes": 471653, "checksum": "3210497fdaae614ac085d9eb873dd7f4d49b6f965a93adadc803e1229fd8a02a"}, "https://s3.amazonaws.com/commensenseqa/test_rand_split_no_answers.jsonl": {"num_bytes": 423148, "checksum": "b426896d71a9cd064cf01cfaf6e920817c51701ef66028883ac1af2e73ad5f29"}}, "download_size": 4680691, "post_processing_size": null, "dataset_size": 2741094, "size_in_bytes": 7421785}}
|
|
|
|