holylovenia commited on
Commit
25bc40f
1 Parent(s): f524bef

Upload hplt.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. hplt.py +225 -0
hplt.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import io
17
+ import itertools
18
+ import json
19
+ from pathlib import Path
20
+ from typing import Dict, List, Tuple
21
+
22
+ import datasets
23
+ import requests
24
+ import zstandard as zstd
25
+
26
+ from seacrowd.utils.configs import SEACrowdConfig
27
+ from seacrowd.utils.constants import SCHEMA_TO_FEATURES, TASK_TO_SCHEMA, Licenses, Tasks
28
+
29
+ _CITATION = r"""\
30
+ @inproceedings{aulamo-etal-2023-hplt,
31
+ title = "{HPLT}: High Performance Language Technologies",
32
+ author = {Aulamo, Mikko and
33
+ Bogoychev, Nikolay and
34
+ Ji, Shaoxiong and
35
+ Nail, Graeme and
36
+ Ram{\'\i}rez-S{\'a}nchez, Gema and
37
+ Tiedemann, J{\"o}rg and
38
+ van der Linde, Jelmer and
39
+ Zaragoza, Jaume},
40
+ editor = "Nurminen, Mary and
41
+ Brenner, Judith and
42
+ Koponen, Maarit and
43
+ Latomaa, Sirkku and
44
+ Mikhailov, Mikhail and
45
+ Schierl, Frederike and
46
+ Ranasinghe, Tharindu and
47
+ Vanmassenhove, Eva and
48
+ Vidal, Sergi Alvarez and
49
+ Aranberri, Nora and
50
+ Nunziatini, Mara and
51
+ Escart{\'\i}n, Carla Parra and
52
+ Forcada, Mikel and
53
+ Popovic, Maja and
54
+ Scarton, Carolina and
55
+ Moniz, Helena",
56
+ booktitle = "Proceedings of the 24th Annual Conference of the European
57
+ Association for Machine Translation",
58
+ month = jun,
59
+ year = "2023",
60
+ address = "Tampere, Finland",
61
+ publisher = "European Association for Machine Translation",
62
+ url = "https://aclanthology.org/2023.eamt-1.61",
63
+ pages = "517--518",
64
+
65
+ abstract = "We describe the High Performance Language Technologies project
66
+ (HPLT), a 3-year EU-funded project started in September 2022. HPLT will
67
+ build a space combining petabytes of natural language data with large-scale
68
+ model training. It will derive monolingual and bilingual datasets from the
69
+ Internet Archive and CommonCrawl and build efficient and solid machine
70
+ translation (MT) as well as large language models (LLMs). HPLT aims at
71
+ providing free, sustainable and reusable datasets, models and workflows at
72
+ scale using high-performance computing (HPC).",
73
+ }
74
+ """
75
+
76
+ _DATASETNAME = "hplt"
77
+
78
+ _DESCRIPTION = """\
79
+ The dataset is part of the High Performance Language Technologies project
80
+ (HPLT), a 3-year EU-funded project started in September 2022. HPLT derives
81
+ monolingual and bilingual datasets from the Internet Archive and CommonCrawl and
82
+ builds efficient and solid machine translation (MT) as well as large language
83
+ models (LLMs). HPLT aims at providing free, sustainable and reusable datasets,
84
+ models and workflows at scale using high-performance computing (HPC).
85
+ """
86
+
87
+ _HOMEPAGE = "https://hplt-project.org/datasets/v1.2"
88
+
89
+ _LANGUAGES = {
90
+ "ind": "id",
91
+ "zlm": "ms",
92
+ "tha": "th",
93
+ "mya": "my",
94
+ "fil": "tl",
95
+ "vie": "vi"
96
+ }
97
+
98
+ _LICENSE = Licenses.CC0_1_0.value
99
+
100
+ _LOCAL = False
101
+
102
+ _URLS = {
103
+ "raw": "https://data.hplt-project.org/one/monotext/{lang}_map.txt",
104
+ "deduplicated": "https://data.hplt-project.org/one/monotext/deduplicated/{lang}_map.txt",
105
+ "cleaned": "https://data.hplt-project.org/one/monotext/cleaned/{lang}_map.txt",
106
+ }
107
+
108
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
109
+ _SEACROWD_SCHEMA = f"seacrowd_{TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]].lower()}" # ssp
110
+
111
+ _SOURCE_VERSION = "1.2.0"
112
+
113
+ _SEACROWD_VERSION = "2024.06.20"
114
+
115
+
116
+ class HpltDataset(datasets.GeneratorBasedBuilder):
117
+ """HPLT derives monolingual and bilingual datasets from the Internet Archive and CommonCrawl"""
118
+
119
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
120
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
121
+
122
+ SUBSETS = ["raw", "deduplicated", "cleaned"]
123
+
124
+ BUILDER_CONFIGS = []
125
+ for lang, subset in list(itertools.product(_LANGUAGES.keys(), SUBSETS)):
126
+ subset_id = f"{lang}_{subset}"
127
+ BUILDER_CONFIGS += [
128
+ SEACrowdConfig(
129
+ name=f"{_DATASETNAME}_{subset_id}_source",
130
+ version=SOURCE_VERSION,
131
+ description=f"{_DATASETNAME} {subset_id} source schema",
132
+ schema="source",
133
+ subset_id=subset_id,
134
+ ),
135
+ SEACrowdConfig(
136
+ name=f"{_DATASETNAME}_{subset_id}_{_SEACROWD_SCHEMA}",
137
+ version=SEACROWD_VERSION,
138
+ description=f"{_DATASETNAME} {subset_id} SEACrowd schema",
139
+ schema=_SEACROWD_SCHEMA,
140
+ subset_id=subset_id,
141
+ ),
142
+ ]
143
+
144
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_mya_cleaned_source" # smallest w.r.t. size
145
+
146
+ def _info(self) -> datasets.DatasetInfo:
147
+ if self.config.schema == "source":
148
+ features = datasets.Features(
149
+ {
150
+ "id": datasets.Value("int32"),
151
+ "document_lang": datasets.Value("string"),
152
+ "scores": datasets.Sequence(datasets.Value("float")),
153
+ "langs": datasets.Sequence(datasets.Value("string")),
154
+ "text": datasets.Value("string"),
155
+ "url": datasets.Value("string"),
156
+ "collection": datasets.Value("string"),
157
+ }
158
+ )
159
+ elif self.config.schema == _SEACROWD_SCHEMA:
160
+ features = SCHEMA_TO_FEATURES[
161
+ TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]]
162
+ ] # ssp_features
163
+
164
+ return datasets.DatasetInfo(
165
+ description=_DESCRIPTION,
166
+ features=features,
167
+ homepage=_HOMEPAGE,
168
+ license=_LICENSE,
169
+ citation=_CITATION,
170
+ )
171
+
172
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
173
+ """Returns SplitGenerators. Data is not yet extracted for efficient generation."""
174
+ lang, subset = self.config.subset_id.split("_")
175
+ lang = _LANGUAGES[lang]
176
+ map_url = _URLS[subset].format(lang=lang)
177
+
178
+ response = requests.get(map_url, timeout=10)
179
+ if response:
180
+ data_urls = response.text.strip().split("\n")
181
+ data_urls = [url for url in data_urls if url.endswith(".jsonl.zst")]
182
+ else:
183
+ raise requests.exceptions.HTTPError(
184
+ f"Non-success status code: {response.status_code}"
185
+ )
186
+
187
+ data_paths = list(map(Path, dl_manager.download(data_urls)))
188
+ return [
189
+ datasets.SplitGenerator(
190
+ name=datasets.Split.TRAIN,
191
+ gen_kwargs={
192
+ "data_paths": data_paths,
193
+ },
194
+ ),
195
+ ]
196
+
197
+ def _generate_examples(self, data_paths: Path) -> Tuple[int, Dict]:
198
+ """Yields examples as (key, example) tuples."""
199
+ key = 0
200
+ for data_path in data_paths:
201
+ with open(data_path, "rb") as f:
202
+ # Zstandard decompression
203
+ dctx = zstd.ZstdDecompressor()
204
+ reader = dctx.stream_reader(f)
205
+ text_io = io.TextIOWrapper(reader, encoding="utf-8")
206
+
207
+ # read jsonl file by line and yield
208
+ for line in text_io:
209
+ data = json.loads(line)
210
+ if self.config.schema == "source":
211
+ yield key, {
212
+ "id": key,
213
+ "document_lang": data["document_lang"],
214
+ "scores": data["scores"],
215
+ "langs": data["langs"],
216
+ "text": data["text"],
217
+ "url": data["url"],
218
+ "collection": data["collection"],
219
+ }
220
+ elif self.config.schema == _SEACROWD_SCHEMA:
221
+ yield key, {
222
+ "id": str(key),
223
+ "text": data["text"],
224
+ }
225
+ key += 1