update per-language splits
Browse files- wiki_lingua.py +46 -41
wiki_lingua.py
CHANGED
@@ -17,7 +17,6 @@
|
|
17 |
import os
|
18 |
import glob
|
19 |
import pickle
|
20 |
-
import re
|
21 |
import datasets
|
22 |
|
23 |
|
@@ -170,7 +169,7 @@ class WikiLingua(datasets.GeneratorBasedBuilder):
|
|
170 |
"target_language": datasets.Value("string"),
|
171 |
"source": datasets.Value("string"),
|
172 |
"target": datasets.Value("string"),
|
173 |
-
"references": [datasets.Value("string")]
|
174 |
}
|
175 |
),
|
176 |
supervised_keys=None,
|
@@ -181,59 +180,65 @@ class WikiLingua(datasets.GeneratorBasedBuilder):
|
|
181 |
|
182 |
def _split_generators(self, dl_manager):
|
183 |
"""Returns SplitGenerators."""
|
|
|
184 |
dl_dir = dl_manager.download_and_extract(_URL)
|
185 |
data_dir = os.path.join(dl_dir, "GEM_V2")
|
186 |
|
187 |
return [
|
188 |
datasets.SplitGenerator(
|
189 |
name=datasets.Split.TRAIN,
|
190 |
-
gen_kwargs={
|
|
|
|
|
|
|
|
|
191 |
),
|
192 |
datasets.SplitGenerator(
|
193 |
name=datasets.Split.VALIDATION,
|
194 |
-
gen_kwargs={
|
|
|
|
|
|
|
|
|
195 |
),
|
196 |
-
] + [
|
197 |
datasets.SplitGenerator(
|
198 |
-
name=
|
199 |
-
gen_kwargs={
|
200 |
-
|
201 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
]
|
203 |
|
204 |
-
def _generate_examples(self,
|
205 |
"""Yields examples."""
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
"source_language": d["source"],
|
216 |
-
"target_language": d["target"],
|
217 |
-
"source": d["document"].strip(),
|
218 |
-
"target": d["summary"].strip(),
|
219 |
-
"references": [d["summary"].strip()],
|
220 |
-
}
|
221 |
-
else:
|
222 |
-
# filter data as needed for train & validation sets
|
223 |
-
if split == "validation":
|
224 |
-
filepaths = glob.glob(os.path.join(filepath, "wikilingua_*.val.pk"))
|
225 |
else:
|
226 |
-
filepaths = glob.glob(os.path.join(filepath, "wikilingua_*.train.pk"))
|
227 |
-
for filepath in filepaths:
|
228 |
-
# skip files if they are irrelevant to task mode
|
229 |
-
if (
|
230 |
-
self.config.mode == WikilinguaModes.MULTILINGUAL
|
231 |
-
and "crosslingual" in filepath
|
232 |
-
) or (
|
233 |
-
self.config.mode == WikilinguaModes.CROSSLINGUAL
|
234 |
-
and "multilingual" in filepath
|
235 |
-
):
|
236 |
-
yield from []
|
237 |
|
238 |
with open(filepath, "rb") as f:
|
239 |
data = pickle.load(f)
|
@@ -265,5 +270,5 @@ class WikiLingua(datasets.GeneratorBasedBuilder):
|
|
265 |
"target_language": tgt_lang,
|
266 |
"source": src,
|
267 |
"target": tgt,
|
268 |
-
"references": [tgt]
|
269 |
}
|
|
|
17 |
import os
|
18 |
import glob
|
19 |
import pickle
|
|
|
20 |
import datasets
|
21 |
|
22 |
|
|
|
169 |
"target_language": datasets.Value("string"),
|
170 |
"source": datasets.Value("string"),
|
171 |
"target": datasets.Value("string"),
|
172 |
+
"references": [datasets.Value("string")],
|
173 |
}
|
174 |
),
|
175 |
supervised_keys=None,
|
|
|
180 |
|
181 |
def _split_generators(self, dl_manager):
|
182 |
"""Returns SplitGenerators."""
|
183 |
+
|
184 |
dl_dir = dl_manager.download_and_extract(_URL)
|
185 |
data_dir = os.path.join(dl_dir, "GEM_V2")
|
186 |
|
187 |
return [
|
188 |
datasets.SplitGenerator(
|
189 |
name=datasets.Split.TRAIN,
|
190 |
+
gen_kwargs={
|
191 |
+
"filepaths": glob.glob(
|
192 |
+
os.path.join(data_dir, f"wikilingua_*.train.pk")
|
193 |
+
)
|
194 |
+
},
|
195 |
),
|
196 |
datasets.SplitGenerator(
|
197 |
name=datasets.Split.VALIDATION,
|
198 |
+
gen_kwargs={
|
199 |
+
"filepaths": glob.glob(
|
200 |
+
os.path.join(data_dir, f"wikilingua_*lingual.val.pk")
|
201 |
+
)
|
202 |
+
},
|
203 |
),
|
|
|
204 |
datasets.SplitGenerator(
|
205 |
+
name=datasets.Split.TEST,
|
206 |
+
gen_kwargs={
|
207 |
+
"filepaths": glob.glob(
|
208 |
+
os.path.join(data_dir, f"wikilingua_*lingual.test.pk")
|
209 |
+
)
|
210 |
+
},
|
211 |
+
),
|
212 |
+
datasets.SplitGenerator(
|
213 |
+
name=f"sampled_{datasets.Split.VALIDATION}",
|
214 |
+
gen_kwargs={
|
215 |
+
"filepaths": glob.glob(
|
216 |
+
os.path.join(data_dir, f"wikilingua_*_sampled.val.pk")
|
217 |
+
)
|
218 |
+
},
|
219 |
+
),
|
220 |
+
datasets.SplitGenerator(
|
221 |
+
name=f"sampled_{datasets.Split.TEST}",
|
222 |
+
gen_kwargs={
|
223 |
+
"filepaths": glob.glob(
|
224 |
+
os.path.join(data_dir, f"wikilingua_*_sampled.test.pk")
|
225 |
+
)
|
226 |
+
},
|
227 |
+
),
|
228 |
]
|
229 |
|
230 |
+
def _generate_examples(self, filepaths):
|
231 |
"""Yields examples."""
|
232 |
+
for filepath in filepaths:
|
233 |
+
if (
|
234 |
+
self.config.name == WikilinguaModes.MULTILINGUAL
|
235 |
+
and WikilinguaModes.CROSSLINGUAL in filepath
|
236 |
+
) or (
|
237 |
+
self.config.name == WikilinguaModes.CROSSLINGUAL
|
238 |
+
and WikilinguaModes.MULTILINGUAL in filepath
|
239 |
+
):
|
240 |
+
yield from []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
241 |
else:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
|
243 |
with open(filepath, "rb") as f:
|
244 |
data = pickle.load(f)
|
|
|
270 |
"target_language": tgt_lang,
|
271 |
"source": src,
|
272 |
"target": tgt,
|
273 |
+
"references": [tgt],
|
274 |
}
|