Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
system HF staff commited on
Commit
346a684
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
common_gen.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import absolute_import, division, print_function
2
+
3
+ import json
4
+ import os
5
+ import random
6
+
7
+ import datasets
8
+
9
+
10
+ random.seed(42) # This is important, to ensure the same order for concept sets as the official script.
11
+
12
+ _CITATION = """\
13
+ @article{lin2019comgen,
14
+ author = {Bill Yuchen Lin and Ming Shen and Wangchunshu Zhou and Pei Zhou and Chandra Bhagavatula and Yejin Choi and Xiang Ren},
15
+ title = {CommonGen: A Constrained Text Generation Challenge for Generative Commonsense Reasoning},
16
+ journal = {CoRR},
17
+ volume = {abs/1911.03705},
18
+ year = {2019}
19
+ }
20
+ """
21
+
22
+ _DESCRIPTION = """\
23
+ CommonGen is a constrained text generation task, associated with a benchmark dataset,
24
+ to explicitly test machines for the ability of generative commonsense reasoning. Given
25
+ a set of common concepts; the task is to generate a coherent sentence describing an
26
+ everyday scenario using these concepts.
27
+
28
+ CommonGen is challenging because it inherently requires 1) relational reasoning using
29
+ background commonsense knowledge, and 2) compositional generalization ability to work
30
+ on unseen concept combinations. Our dataset, constructed through a combination of
31
+ crowd-sourcing from AMT and existing caption corpora, consists of 30k concept-sets and
32
+ 50k sentences in total.
33
+ """
34
+ _URL = "https://storage.googleapis.com/huggingface-nlp/datasets/common_gen/commongen_data.zip"
35
+
36
+
37
+ class CommonGen(datasets.GeneratorBasedBuilder):
38
+ VERSION = datasets.Version("2020.5.30")
39
+
40
+ def _info(self):
41
+ features = datasets.Features(
42
+ {
43
+ "concept_set_idx": datasets.Value("int32"),
44
+ "concepts": datasets.Sequence(datasets.Value("string")),
45
+ "target": datasets.Value("string"),
46
+ }
47
+ )
48
+ return datasets.DatasetInfo(
49
+ description=_DESCRIPTION,
50
+ features=features,
51
+ supervised_keys=datasets.info.SupervisedKeysData(input="concepts", output="target"),
52
+ homepage="https://inklab.usc.edu/CommonGen/index.html",
53
+ citation=_CITATION,
54
+ )
55
+
56
+ def _split_generators(self, dl_manager):
57
+ """Returns SplitGenerators."""
58
+
59
+ dl_dir = dl_manager.download_and_extract(_URL)
60
+
61
+ return [
62
+ datasets.SplitGenerator(
63
+ name=datasets.Split.TRAIN,
64
+ gen_kwargs={"filepath": os.path.join(dl_dir, "commongen.train.jsonl"), "split": "train"},
65
+ ),
66
+ datasets.SplitGenerator(
67
+ name=datasets.Split.VALIDATION,
68
+ gen_kwargs={"filepath": os.path.join(dl_dir, "commongen.dev.jsonl"), "split": "dev"},
69
+ ),
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.TEST,
72
+ gen_kwargs={"filepath": os.path.join(dl_dir, "commongen.test_noref.jsonl"), "split": "test"},
73
+ ),
74
+ ]
75
+
76
+ def _generate_examples(self, filepath, split):
77
+ """Yields examples."""
78
+ with open(filepath, encoding="utf-8") as f:
79
+ id_ = 0
80
+ for idx, row in enumerate(f):
81
+ row = row.replace(", }", "}") # Fix possible JSON format error
82
+ data = json.loads(row)
83
+
84
+ rand_order = [word for word in data["concept_set"].split("#")]
85
+ random.shuffle(rand_order)
86
+
87
+ if split == "test":
88
+ yield idx, {
89
+ "concept_set_idx": idx,
90
+ "concepts": rand_order,
91
+ "target": "",
92
+ }
93
+ else:
94
+ for scene in data["scene"]:
95
+ yield id_, {
96
+ "concept_set_idx": idx,
97
+ "concepts": rand_order,
98
+ "target": scene,
99
+ }
100
+ id_ += 1
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "CommonGen is a constrained text generation task, associated with a benchmark dataset, \nto explicitly test machines for the ability of generative commonsense reasoning. Given \na set of common concepts; the task is to generate a coherent sentence describing an \neveryday scenario using these concepts.\n\nCommonGen is challenging because it inherently requires 1) relational reasoning using \nbackground commonsense knowledge, and 2) compositional generalization ability to work \non unseen concept combinations. Our dataset, constructed through a combination of \ncrowd-sourcing from AMT and existing caption corpora, consists of 30k concept-sets and \n50k sentences in total.\n", "citation": "@article{lin2019comgen,\n author = {Bill Yuchen Lin and Ming Shen and Wangchunshu Zhou and Pei Zhou and Chandra Bhagavatula and Yejin Choi and Xiang Ren},\n title = {CommonGen: A Constrained Text Generation Challenge for Generative Commonsense Reasoning},\n journal = {CoRR},\n volume = {abs/1911.03705},\n year = {2019}\n}\n", "homepage": "https://inklab.usc.edu/CommonGen/index.html", "license": "", "features": {"concept_set_idx": {"dtype": "int32", "id": null, "_type": "Value"}, "concepts": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "target": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "concepts", "output": "target"}, "builder_name": "common_gen", "config_name": "default", "version": {"version_str": "2020.5.30", "description": null, "datasets_version_to_prepare": null, "major": 2020, "minor": 5, "patch": 30}, "splits": {"train": {"name": "train", "num_bytes": 6724250, "num_examples": 67389, "dataset_name": "common_gen"}, "validation": {"name": "validation", "num_bytes": 408752, "num_examples": 4018, "dataset_name": "common_gen"}, "test": {"name": "test", "num_bytes": 77530, "num_examples": 1497, "dataset_name": "common_gen"}}, "download_checksums": {"https://storage.googleapis.com/huggingface-nlp/datasets/common_gen/commongen_data.zip": {"num_bytes": 1845699, "checksum": "a3f19ca607da4e874fc5f2dd1f53c13a6788a497f883d74cc3f9a1fcda44c594"}}, "download_size": 1845699, "post_processing_size": null, "dataset_size": 7210532, "size_in_bytes": 9056231}}
dummy/2020.5.30/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:884c5253c1efbb9befc969a34ca74d0480de4233d709871d934ed3c6d296c5ea
3
+ size 2495