Datasets:

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
10a7c2a
1 Parent(s): de8108b

Delete loading script

Browse files
Files changed (1) hide show
  1. opus_books.py +0 -187
opus_books.py DELETED
@@ -1,187 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- import os
18
-
19
- import datasets
20
-
21
-
22
- _DESCRIPTION = """\
23
- This is a collection of copyright free books aligned by Andras Farkas, which are available from http://www.farkastranslations.com/bilingual_books.php
24
- Note that the texts are rather dated due to copyright issues and that some of them are manually reviewed (check the meta-data at the top of the corpus files in XML). The source is multilingually aligned, which is available from http://www.farkastranslations.com/bilingual_books.php. In OPUS, the alignment is formally bilingual but the multilingual alignment can be recovered from the XCES sentence alignment files. Note also that the alignment units from the original source may include multi-sentence paragraphs, which are split and sentence-aligned in OPUS.
25
- All texts are freely available for personal, educational and research use. Commercial use (e.g. reselling as parallel books) and mass redistribution without explicit permission are not granted. Please acknowledge the source when using the data!
26
-
27
- 16 languages, 64 bitexts
28
- total number of files: 158
29
- total number of tokens: 19.50M
30
- total number of sentence fragments: 0.91M
31
- """
32
- _HOMEPAGE_URL = "http://opus.nlpl.eu/Books.php"
33
- _CITATION = """\
34
- @InProceedings{TIEDEMANN12.463,
35
- author = {J�rg Tiedemann},
36
- title = {Parallel Data, Tools and Interfaces in OPUS},
37
- booktitle = {Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)},
38
- year = {2012},
39
- month = {may},
40
- date = {23-25},
41
- address = {Istanbul, Turkey},
42
- editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Mehmet Ugur Dogan and Bente Maegaard and Joseph Mariani and Jan Odijk and Stelios Piperidis},
43
- publisher = {European Language Resources Association (ELRA)},
44
- isbn = {978-2-9517408-7-7},
45
- language = {english}
46
- }
47
- """
48
-
49
- _VERSION = "1.0.0"
50
- _BASE_NAME = "Books.{}.{}"
51
- _BASE_URL = "https://object.pouta.csc.fi/OPUS-Books/v1/moses/{}-{}.txt.zip"
52
-
53
- _LANGUAGE_PAIRS = [
54
- ("ca", "de"),
55
- ("ca", "en"),
56
- ("de", "en"),
57
- ("el", "en"),
58
- ("de", "eo"),
59
- ("en", "eo"),
60
- ("de", "es"),
61
- ("el", "es"),
62
- ("en", "es"),
63
- ("eo", "es"),
64
- ("en", "fi"),
65
- ("es", "fi"),
66
- ("de", "fr"),
67
- ("el", "fr"),
68
- ("en", "fr"),
69
- ("eo", "fr"),
70
- ("es", "fr"),
71
- ("fi", "fr"),
72
- ("ca", "hu"),
73
- ("de", "hu"),
74
- ("el", "hu"),
75
- ("en", "hu"),
76
- ("eo", "hu"),
77
- ("fr", "hu"),
78
- ("de", "it"),
79
- ("en", "it"),
80
- ("eo", "it"),
81
- ("es", "it"),
82
- ("fr", "it"),
83
- ("hu", "it"),
84
- ("ca", "nl"),
85
- ("de", "nl"),
86
- ("en", "nl"),
87
- ("es", "nl"),
88
- ("fr", "nl"),
89
- ("hu", "nl"),
90
- ("it", "nl"),
91
- ("en", "no"),
92
- ("es", "no"),
93
- ("fi", "no"),
94
- ("fr", "no"),
95
- ("hu", "no"),
96
- ("en", "pl"),
97
- ("fi", "pl"),
98
- ("fr", "pl"),
99
- ("hu", "pl"),
100
- ("de", "pt"),
101
- ("en", "pt"),
102
- ("eo", "pt"),
103
- ("es", "pt"),
104
- ("fr", "pt"),
105
- ("hu", "pt"),
106
- ("it", "pt"),
107
- ("de", "ru"),
108
- ("en", "ru"),
109
- ("es", "ru"),
110
- ("fr", "ru"),
111
- ("hu", "ru"),
112
- ("it", "ru"),
113
- ("en", "sv"),
114
- ("fr", "sv"),
115
- ("it", "sv"),
116
- ]
117
-
118
-
119
- class OpusBooksConfig(datasets.BuilderConfig):
120
- def __init__(self, *args, lang1=None, lang2=None, **kwargs):
121
- super().__init__(
122
- *args,
123
- name=f"{lang1}-{lang2}",
124
- **kwargs,
125
- )
126
- self.lang1 = lang1
127
- self.lang2 = lang2
128
-
129
-
130
- class OpusBooks(datasets.GeneratorBasedBuilder):
131
- BUILDER_CONFIGS = [
132
- OpusBooksConfig(
133
- lang1=lang1,
134
- lang2=lang2,
135
- description=f"Translating {lang1} to {lang2} or vice versa",
136
- version=datasets.Version(_VERSION),
137
- )
138
- for lang1, lang2 in _LANGUAGE_PAIRS
139
- ]
140
- BUILDER_CONFIG_CLASS = OpusBooksConfig
141
-
142
- def _info(self):
143
- return datasets.DatasetInfo(
144
- description=_DESCRIPTION,
145
- features=datasets.Features(
146
- {
147
- "id": datasets.Value("string"),
148
- "translation": datasets.Translation(languages=(self.config.lang1, self.config.lang2)),
149
- },
150
- ),
151
- supervised_keys=None,
152
- homepage=_HOMEPAGE_URL,
153
- citation=_CITATION,
154
- )
155
-
156
- def _split_generators(self, dl_manager):
157
- def _base_url(lang1, lang2):
158
- return _BASE_URL.format(lang1, lang2)
159
-
160
- download_url = _base_url(self.config.lang1, self.config.lang2)
161
- path = dl_manager.download_and_extract(download_url)
162
- return [
163
- datasets.SplitGenerator(
164
- name=datasets.Split.TRAIN,
165
- gen_kwargs={"datapath": path},
166
- )
167
- ]
168
-
169
- def _generate_examples(self, datapath):
170
- l1, l2 = self.config.lang1, self.config.lang2
171
- folder = l1 + "-" + l2
172
- l1_file = _BASE_NAME.format(folder, l1)
173
- l2_file = _BASE_NAME.format(folder, l2)
174
- l1_path = os.path.join(datapath, l1_file)
175
- l2_path = os.path.join(datapath, l2_file)
176
- with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2:
177
- for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
178
- x = x.strip()
179
- y = y.strip()
180
- result = (
181
- sentence_counter,
182
- {
183
- "id": str(sentence_counter),
184
- "translation": {l1: x, l2: y},
185
- },
186
- )
187
- yield result