Datasets:
Sebastian Gehrmann
commited on
Commit
•
2b42c85
1
Parent(s):
ee678f8
- wiki_cat_sum.py +84 -83
wiki_cat_sum.py
CHANGED
@@ -51,71 +51,71 @@ _LICENSE = "CC BY-SA 3.0"
|
|
51 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
52 |
_URLs = {
|
53 |
"animal": {
|
54 |
-
"train": "
|
55 |
-
"validation": "
|
56 |
-
"test": "
|
57 |
-
"cs_abs":[
|
58 |
-
"
|
59 |
-
"
|
60 |
-
"
|
61 |
-
"
|
62 |
-
"
|
63 |
-
"
|
64 |
-
"
|
65 |
-
"
|
66 |
-
"
|
67 |
],
|
68 |
"cs_tdiv": [
|
69 |
-
"
|
70 |
-
"
|
71 |
-
"
|
72 |
-
"
|
73 |
-
]
|
74 |
},
|
75 |
"company": {
|
76 |
-
"train": "
|
77 |
-
"validation": "
|
78 |
-
"test": "
|
79 |
-
"cs_abs":[
|
80 |
-
"
|
81 |
-
"
|
82 |
-
"
|
83 |
-
"
|
84 |
-
"
|
85 |
-
"
|
86 |
-
"
|
87 |
-
"
|
88 |
-
"
|
89 |
],
|
90 |
"cs_tdiv": [
|
91 |
-
"
|
92 |
-
"
|
93 |
-
"
|
94 |
-
"
|
95 |
-
]
|
96 |
},
|
97 |
"film": {
|
98 |
-
"train": "
|
99 |
-
"validation": "
|
100 |
-
"test": "
|
101 |
-
"cs_abs":[
|
102 |
-
"
|
103 |
-
"
|
104 |
-
"
|
105 |
-
"
|
106 |
-
"
|
107 |
-
"
|
108 |
-
"
|
109 |
-
"
|
110 |
-
"
|
111 |
],
|
112 |
"cs_tdiv": [
|
113 |
-
"
|
114 |
-
"
|
115 |
-
"
|
116 |
-
"
|
117 |
-
]
|
118 |
-
}
|
119 |
}
|
120 |
|
121 |
|
@@ -125,7 +125,6 @@ class WikiCatSum(datasets.GeneratorBasedBuilder):
|
|
125 |
|
126 |
VERSION = datasets.Version("0.1.0")
|
127 |
|
128 |
-
|
129 |
# If you need to make complex sub-parts in the datasets with configurable options
|
130 |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
131 |
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
@@ -134,9 +133,13 @@ class WikiCatSum(datasets.GeneratorBasedBuilder):
|
|
134 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
135 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
136 |
BUILDER_CONFIGS = [
|
137 |
-
datasets.BuilderConfig(
|
138 |
-
|
139 |
-
|
|
|
|
|
|
|
|
|
140 |
]
|
141 |
|
142 |
DEFAULT_CONFIG_NAME = "animal" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
@@ -149,13 +152,13 @@ class WikiCatSum(datasets.GeneratorBasedBuilder):
|
|
149 |
"gem_parent_id": datasets.Value("string"),
|
150 |
"id": datasets.Value("string"),
|
151 |
"title": datasets.Value("string"),
|
152 |
-
"paragraphs": datasets.features.Sequence(
|
153 |
-
datasets.Value("string")),
|
154 |
"summary": datasets.features.Sequence(
|
155 |
{
|
156 |
"text": datasets.Value("string"),
|
157 |
"topic": datasets.Value("int16"),
|
158 |
-
}
|
|
|
159 |
# These are the features of your dataset like images, labels ...
|
160 |
}
|
161 |
)
|
@@ -187,13 +190,12 @@ class WikiCatSum(datasets.GeneratorBasedBuilder):
|
|
187 |
my_urls = _URLs[self.config.name]
|
188 |
d_conf = dl_manager.download_and_extract(my_urls)
|
189 |
challenge_sets = [
|
190 |
-
("challenge_test_abstractivity_%d" % (lvl), fname)
|
191 |
-
|
192 |
] + [
|
193 |
-
("challenge_test_topic_diversity_%d" % (lvl), fname)
|
194 |
-
|
195 |
]
|
196 |
-
|
197 |
|
198 |
return [
|
199 |
datasets.SplitGenerator(
|
@@ -207,10 +209,7 @@ class WikiCatSum(datasets.GeneratorBasedBuilder):
|
|
207 |
datasets.SplitGenerator(
|
208 |
name=datasets.Split.TEST,
|
209 |
# These kwargs will be passed to _generate_examples
|
210 |
-
gen_kwargs={
|
211 |
-
"filepath": d_conf["validation"],
|
212 |
-
"split": "test"
|
213 |
-
},
|
214 |
),
|
215 |
datasets.SplitGenerator(
|
216 |
name=datasets.Split.VALIDATION,
|
@@ -221,20 +220,22 @@ class WikiCatSum(datasets.GeneratorBasedBuilder):
|
|
221 |
},
|
222 |
),
|
223 |
] + [
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
|
234 |
def _generate_examples(
|
235 |
-
self,
|
|
|
|
|
236 |
):
|
237 |
-
"""
|
238 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
239 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
240 |
|
@@ -245,4 +246,4 @@ class WikiCatSum(datasets.GeneratorBasedBuilder):
|
|
245 |
# data["gem_id"] = "GEM-wiki_cat_sum-%s-%d" % (split,data["id"]+1)
|
246 |
data["gem_parent_id"] = f"{self.config.name}-{split}-{id_+1}"
|
247 |
data["gem_id"] = f"{self.config.name}-{split}-{id_+1}"
|
248 |
-
yield id_,data
|
|
|
51 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
52 |
_URLs = {
|
53 |
"animal": {
|
54 |
+
"train": "main_splits/train-animal.jsonl",
|
55 |
+
"validation": "main_splits/valid-animal.jsonl",
|
56 |
+
"test": "main_splits/test-animal.jsonl",
|
57 |
+
"cs_abs": [
|
58 |
+
"cs_abs/test-animal_nv_0.jsonl",
|
59 |
+
"cs_abs/test-animal_nv_1.jsonl",
|
60 |
+
"cs_abs/test-animal_nv_2.jsonl",
|
61 |
+
"cs_abs/test-animal_nv_3.jsonl",
|
62 |
+
"cs_abs/test-animal_nv_4.jsonl",
|
63 |
+
"cs_abs/test-animal_nv_6.jsonl",
|
64 |
+
"cs_abs/test-animal_nv_7.jsonl",
|
65 |
+
"cs_abs/test-animal_nv_8.jsonl",
|
66 |
+
"cs_abs/test-animal_nv_9.jsonl",
|
67 |
],
|
68 |
"cs_tdiv": [
|
69 |
+
"cs_tdiv/test-animal_tdiv_0.jsonl",
|
70 |
+
"cs_tdiv/test-animal_tdiv_1.jsonl",
|
71 |
+
"cs_tdiv/test-animal_tdiv_2.jsonl",
|
72 |
+
"cs_tdiv/test-animal_tdiv_3.jsonl",
|
73 |
+
],
|
74 |
},
|
75 |
"company": {
|
76 |
+
"train": "main_splits/train-company.jsonl",
|
77 |
+
"validation": "main_splits/valid-company.jsonl",
|
78 |
+
"test": "main_splits/test-company.jsonl",
|
79 |
+
"cs_abs": [
|
80 |
+
"cs_abs/test-company_nv_0.jsonl",
|
81 |
+
"cs_abs/test-company_nv_1.jsonl",
|
82 |
+
"cs_abs/test-company_nv_2.jsonl",
|
83 |
+
"cs_abs/test-company_nv_3.jsonl",
|
84 |
+
"cs_abs/test-company_nv_4.jsonl",
|
85 |
+
"cs_abs/test-company_nv_6.jsonl",
|
86 |
+
"cs_abs/test-company_nv_7.jsonl",
|
87 |
+
"cs_abs/test-company_nv_8.jsonl",
|
88 |
+
"cs_abs/test-company_nv_9.jsonl",
|
89 |
],
|
90 |
"cs_tdiv": [
|
91 |
+
"cs_tdiv/test-company_tdiv_0.jsonl",
|
92 |
+
"cs_tdiv/test-company_tdiv_1.jsonl",
|
93 |
+
"cs_tdiv/test-company_tdiv_2.jsonl",
|
94 |
+
"cs_tdiv/test-company_tdiv_3.jsonl",
|
95 |
+
],
|
96 |
},
|
97 |
"film": {
|
98 |
+
"train": "main_splits/train-film.jsonl",
|
99 |
+
"validation": "main_splits/valid-film.jsonl",
|
100 |
+
"test": "main_splits/test-film.jsonl",
|
101 |
+
"cs_abs": [
|
102 |
+
"cs_abs/test-film_nv_0.jsonl",
|
103 |
+
"cs_abs/test-film_nv_1.jsonl",
|
104 |
+
"cs_abs/test-film_nv_2.jsonl",
|
105 |
+
"cs_abs/test-film_nv_3.jsonl",
|
106 |
+
"cs_abs/test-film_nv_4.jsonl",
|
107 |
+
"cs_abs/test-film_nv_6.jsonl",
|
108 |
+
"cs_abs/test-film_nv_7.jsonl",
|
109 |
+
"cs_abs/test-film_nv_8.jsonl",
|
110 |
+
"cs_abs/test-film_nv_9.jsonl",
|
111 |
],
|
112 |
"cs_tdiv": [
|
113 |
+
"cs_tdiv/test-film_tdiv_0.jsonl",
|
114 |
+
"cs_tdiv/test-film_tdiv_1.jsonl",
|
115 |
+
"cs_tdiv/test-film_tdiv_2.jsonl",
|
116 |
+
"cs_tdiv/test-film_tdiv_3.jsonl",
|
117 |
+
],
|
118 |
+
},
|
119 |
}
|
120 |
|
121 |
|
|
|
125 |
|
126 |
VERSION = datasets.Version("0.1.0")
|
127 |
|
|
|
128 |
# If you need to make complex sub-parts in the datasets with configurable options
|
129 |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
130 |
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
|
|
133 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
134 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
135 |
BUILDER_CONFIGS = [
|
136 |
+
datasets.BuilderConfig(
|
137 |
+
name="animal", version=VERSION, description="Animal domain"
|
138 |
+
),
|
139 |
+
datasets.BuilderConfig(
|
140 |
+
name="company", version=VERSION, description="Company domain"
|
141 |
+
),
|
142 |
+
datasets.BuilderConfig(name="film", version=VERSION, description="Film domain"),
|
143 |
]
|
144 |
|
145 |
DEFAULT_CONFIG_NAME = "animal" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
|
|
152 |
"gem_parent_id": datasets.Value("string"),
|
153 |
"id": datasets.Value("string"),
|
154 |
"title": datasets.Value("string"),
|
155 |
+
"paragraphs": datasets.features.Sequence(datasets.Value("string")),
|
|
|
156 |
"summary": datasets.features.Sequence(
|
157 |
{
|
158 |
"text": datasets.Value("string"),
|
159 |
"topic": datasets.Value("int16"),
|
160 |
+
}
|
161 |
+
)
|
162 |
# These are the features of your dataset like images, labels ...
|
163 |
}
|
164 |
)
|
|
|
190 |
my_urls = _URLs[self.config.name]
|
191 |
d_conf = dl_manager.download_and_extract(my_urls)
|
192 |
challenge_sets = [
|
193 |
+
("challenge_test_abstractivity_%d" % (lvl), fname)
|
194 |
+
for lvl, fname in enumerate(d_conf["cs_abs"])
|
195 |
] + [
|
196 |
+
("challenge_test_topic_diversity_%d" % (lvl), fname)
|
197 |
+
for lvl, fname in enumerate(d_conf["cs_abs"])
|
198 |
]
|
|
|
199 |
|
200 |
return [
|
201 |
datasets.SplitGenerator(
|
|
|
209 |
datasets.SplitGenerator(
|
210 |
name=datasets.Split.TEST,
|
211 |
# These kwargs will be passed to _generate_examples
|
212 |
+
gen_kwargs={"filepath": d_conf["validation"], "split": "test"},
|
|
|
|
|
|
|
213 |
),
|
214 |
datasets.SplitGenerator(
|
215 |
name=datasets.Split.VALIDATION,
|
|
|
220 |
},
|
221 |
),
|
222 |
] + [
|
223 |
+
datasets.SplitGenerator(
|
224 |
+
name=challenge_split,
|
225 |
+
gen_kwargs={
|
226 |
+
"filepath": filename,
|
227 |
+
"split": challenge_split,
|
228 |
+
},
|
229 |
+
)
|
230 |
+
for challenge_split, filename in challenge_sets
|
231 |
+
]
|
232 |
|
233 |
def _generate_examples(
|
234 |
+
self,
|
235 |
+
filepath,
|
236 |
+
split, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
237 |
):
|
238 |
+
"""Yields examples as (key, example) tuples."""
|
239 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
240 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
241 |
|
|
|
246 |
# data["gem_id"] = "GEM-wiki_cat_sum-%s-%d" % (split,data["id"]+1)
|
247 |
data["gem_parent_id"] = f"{self.config.name}-{split}-{id_+1}"
|
248 |
data["gem_id"] = f"{self.config.name}-{split}-{id_+1}"
|
249 |
+
yield id_, data
|