Datasets:

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
581c8ee
1 Parent(s): 1d7bb2a

Delete loading script

Browse files
Files changed (1) hide show
  1. europarl_bilingual.py +0 -232
europarl_bilingual.py DELETED
@@ -1,232 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import itertools
17
- import os
18
- import xml.etree.ElementTree as ET
19
-
20
- import datasets
21
-
22
-
23
- # Find for instance the citation on arxiv or on the dataset repo/website
24
- _CITATION = """\
25
- @inproceedings{koehn-2005-europarl,
26
- title = "{E}uroparl: A Parallel Corpus for Statistical Machine Translation",
27
- author = "Koehn, Philipp",
28
- booktitle = "Proceedings of Machine Translation Summit X: Papers",
29
- month = sep # " 13-15",
30
- year = "2005",
31
- address = "Phuket, Thailand",
32
- url = "https://aclanthology.org/2005.mtsummit-papers.11",
33
- pages = "79--86",
34
- }
35
- @inproceedings{tiedemann-2012-parallel,
36
- title = "Parallel Data, Tools and Interfaces in {OPUS}",
37
- author = {Tiedemann, J{\\"o}rg},
38
- editor = "Calzolari, Nicoletta and
39
- Choukri, Khalid and
40
- Declerck, Thierry and
41
- Do{\\u{g}}an, Mehmet U{\\u{g}}ur and
42
- Maegaard, Bente and
43
- Mariani, Joseph and
44
- Moreno, Asuncion and
45
- Odijk, Jan and
46
- Piperidis, Stelios",
47
- booktitle = "Proceedings of the Eighth International Conference on Language Resources and Evaluation ({LREC}'12)",
48
- month = may,
49
- year = "2012",
50
- address = "Istanbul, Turkey",
51
- publisher = "European Language Resources Association (ELRA)",
52
- url = "http://www.lrec-conf.org/proceedings/lrec2012/pdf/463_Paper.pdf",
53
- pages = "2214--2218",
54
- }"""
55
-
56
- # You can copy an official description
57
- _DESCRIPTION = """\
58
- A parallel corpus extracted from the European Parliament web site by Philipp Koehn (University of Edinburgh). The main intended use is to aid statistical machine translation research.
59
- """
60
-
61
- # Add a link to an official homepage for the dataset here
62
- _HOMEPAGE = "https://opus.nlpl.eu/Europarl/corpus/version/Europarl"
63
-
64
- # Add the licence for the dataset here if you can find it
65
- _LICENSE = """\
66
- The data set comes with the same license
67
- as the original sources.
68
- Please, check the information about the source
69
- that is given on
70
- https://opus.nlpl.eu/Europarl/corpus/version/Europarl
71
- """
72
-
73
- # The HuggingFace dataset library don't host the datasets but only point to the original files
74
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
75
- LANGUAGES = [
76
- "bg",
77
- "cs",
78
- "da",
79
- "de",
80
- "el",
81
- "en",
82
- "es",
83
- "et",
84
- "fi",
85
- "fr",
86
- "hu",
87
- "it",
88
- "lt",
89
- "lv",
90
- "nl",
91
- "pl",
92
- "pt",
93
- "ro",
94
- "sk",
95
- "sl",
96
- "sv",
97
- ]
98
-
99
- LANGUAGE_PAIRS = list(itertools.combinations(LANGUAGES, 2))
100
-
101
- _VERSION = "8.0.0"
102
- _BASE_URL_DATASET = "https://object.pouta.csc.fi/OPUS-Europarl/v8/raw/{}.zip"
103
- _BASE_URL_RELATIONS = "https://object.pouta.csc.fi/OPUS-Europarl/v8/xml/{}-{}.xml.gz"
104
-
105
-
106
- class EuroparlBilingualConfig(datasets.BuilderConfig):
107
- """Slightly custom config to require source and target languages."""
108
-
109
- def __init__(self, *args, lang1=None, lang2=None, **kwargs):
110
- super().__init__(
111
- *args,
112
- name=f"{lang1}-{lang2}",
113
- **kwargs,
114
- )
115
- self.lang1 = lang1
116
- self.lang2 = lang2
117
-
118
- def _lang_pair(self):
119
- return (self.lang1, self.lang2)
120
-
121
- def _is_valid(self):
122
- return self._lang_pair() in LANGUAGE_PAIRS
123
-
124
-
125
- class EuroparlBilingual(datasets.GeneratorBasedBuilder):
126
- """Europarl contains aligned sentences in multiple west language pairs."""
127
-
128
- VERSION = datasets.Version(_VERSION)
129
-
130
- BUILDER_CONFIG_CLASS = EuroparlBilingualConfig
131
- BUILDER_CONFIGS = [
132
- EuroparlBilingualConfig(lang1=lang1, lang2=lang2, version=datasets.Version(_VERSION))
133
- for lang1, lang2 in LANGUAGE_PAIRS
134
- ]
135
-
136
- def _info(self):
137
- """This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset."""
138
- features = datasets.Features(
139
- {
140
- "translation": datasets.Translation(languages=(self.config.lang1, self.config.lang2)),
141
- }
142
- )
143
-
144
- return datasets.DatasetInfo(
145
- description=_DESCRIPTION,
146
- features=features,
147
- supervised_keys=None,
148
- homepage=_HOMEPAGE,
149
- license=_LICENSE,
150
- citation=_CITATION,
151
- )
152
-
153
- def _split_generators(self, dl_manager):
154
- """Returns SplitGenerators."""
155
-
156
- if not self.config._is_valid():
157
- raise ValueError(
158
- f"{self.config._lang_pair()} is not a supported language pair. Choose among: {LANGUAGE_PAIRS}"
159
- )
160
-
161
- # download data files
162
- path_datafile_1 = dl_manager.download_and_extract(_BASE_URL_DATASET.format(self.config.lang1))
163
- path_datafile_2 = dl_manager.download_and_extract(_BASE_URL_DATASET.format(self.config.lang2))
164
-
165
- # download relations file
166
- path_relation_file = dl_manager.download_and_extract(
167
- _BASE_URL_RELATIONS.format(self.config.lang1, self.config.lang2)
168
- )
169
-
170
- return [
171
- datasets.SplitGenerator(
172
- name=datasets.Split.TRAIN,
173
- # These kwargs will be passed to _generate_examples
174
- gen_kwargs={
175
- "path_datafiles": (path_datafile_1, path_datafile_2),
176
- "path_relation_file": path_relation_file,
177
- },
178
- )
179
- ]
180
-
181
- @staticmethod
182
- def _parse_xml_datafile(filepath):
183
- """
184
- Parse and return a Dict[sentence_id, text] representing data with the following structure:
185
- """
186
- document = ET.parse(filepath).getroot()
187
- return {tag.attrib["id"]: tag.text for tag in document.iter("s")}
188
-
189
- def _generate_examples(self, path_datafiles, path_relation_file):
190
- """Yields examples.
191
- In parenthesis the useful attributes
192
-
193
- Lang files XML
194
- - document
195
- - CHAPTER ('ID')
196
- - P ('id')
197
- - s ('id')
198
-
199
- Relation file XML
200
- - cesAlign
201
- - linkGrp ('fromDoc', 'toDoc')
202
- - link ('xtargets': '1;1')
203
- """
204
-
205
- # my counter
206
- _id = 0
207
- relations_root = ET.parse(path_relation_file).getroot()
208
-
209
- for linkGroup in relations_root:
210
- # retrieve files and remove .gz extension because 'datasets' library already decompress them
211
- from_doc_dict = EuroparlBilingual._parse_xml_datafile(
212
- os.path.splitext(os.path.join(path_datafiles[0], "Europarl", "raw", linkGroup.attrib["fromDoc"]))[0]
213
- )
214
-
215
- to_doc_dict = EuroparlBilingual._parse_xml_datafile(
216
- os.path.splitext(os.path.join(path_datafiles[1], "Europarl", "raw", linkGroup.attrib["toDoc"]))[0]
217
- )
218
-
219
- for link in linkGroup:
220
- from_sentence_ids, to_sentence_ids = link.attrib["xtargets"].split(";")
221
- from_sentence_ids = [i for i in from_sentence_ids.split(" ") if i]
222
- to_sentence_ids = [i for i in to_sentence_ids.split(" ") if i]
223
-
224
- if not len(from_sentence_ids) or not len(to_sentence_ids):
225
- continue
226
-
227
- # in rare cases, there is not entry for some key pairs
228
- sentence_lang1 = " ".join(from_doc_dict[i] for i in from_sentence_ids if i in from_doc_dict)
229
- sentence_lang2 = " ".join(to_doc_dict[i] for i in to_sentence_ids if i in to_doc_dict)
230
-
231
- yield _id, {"translation": {self.config.lang1: sentence_lang1, self.config.lang2: sentence_lang2}}
232
- _id += 1