holylovenia commited on
Commit
994dad2
1 Parent(s): d0607a7

Upload culturay.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. culturay.py +177 -0
culturay.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import io
17
+ import json
18
+ from pathlib import Path
19
+ from typing import Dict, List, Tuple
20
+
21
+ import datasets
22
+ import zstandard as zstd
23
+ from huggingface_hub import HfFileSystem
24
+
25
+ from seacrowd.utils.configs import SEACrowdConfig
26
+ from seacrowd.utils.constants import (SCHEMA_TO_FEATURES, TASK_TO_SCHEMA,
27
+ Licenses, Tasks)
28
+
29
+ _CITATION = """\
30
+ @misc{nguyen2024culturay,
31
+ title={CulturaY: A Large Cleaned Multilingual Dataset of 75 Languages},
32
+ author={Thuat Nguyen, Huu Nguyen and Thien Nguyen},
33
+ year={2024},
34
+ }
35
+ """
36
+
37
+ _DATASETNAME = "culturay"
38
+
39
+ _DESCRIPTION = """\
40
+ CulturaY: A Large Cleaned Multilingual Dataset of 75 Languages From the team
41
+ that brought you CulutraX, we present CulturaY, another substantial multilingual
42
+ dataset of 15TB (uncompressed)/3TB (zstd-compressed) that applies the same
43
+ dataset cleaning methodology to the HPLT v1.1 dataset. Please note that HPLT
44
+ v1.2 has also been released and is an alternative verison with different
45
+ cleaning methodolgies. This data was used in part to train our SOTA Vietnamese
46
+ model: Vistral-7B-Chat.
47
+
48
+ Before using this dataloader, please accept the acknowledgement at
49
+ https://huggingface.co/datasets/ontocord/CulturaY and use huggingface-cli login
50
+ for authentication.
51
+ """
52
+
53
+ _HOMEPAGE = "https://huggingface.co/datasets/ontocord/CulturaY"
54
+
55
+ _LANGUAGES = ["mya", "fil", "zlm", "vie", "ind", "tha"]
56
+
57
+ _LICENSE = Licenses.CC_BY_4_0.value
58
+
59
+ _LOCAL = False
60
+
61
+ _BASE_URL = "https://huggingface.co/datasets/ontocord/CulturaY/resolve/main/{lang}/"
62
+
63
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
64
+ _SEACROWD_SCHEMA = f"seacrowd_{TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]].lower()}" # ssp
65
+
66
+ _SOURCE_VERSION = "1.0.0"
67
+
68
+ _SEACROWD_VERSION = "2024.06.20"
69
+
70
+
71
+ class CulturaYDataset(datasets.GeneratorBasedBuilder):
72
+ """Substantial multilingual dataset by cleaning HPLT v1.1 (Internet Archive) data."""
73
+
74
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
75
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
76
+
77
+ BUILDER_CONFIGS = []
78
+ for subset in _LANGUAGES:
79
+ BUILDER_CONFIGS += [
80
+ SEACrowdConfig(
81
+ name=f"{_DATASETNAME}_{subset}_source",
82
+ version=SOURCE_VERSION,
83
+ description=f"{_DATASETNAME} {subset} source schema",
84
+ schema="source",
85
+ subset_id=subset,
86
+ ),
87
+ SEACrowdConfig(
88
+ name=f"{_DATASETNAME}_{subset}_{_SEACROWD_SCHEMA}",
89
+ version=SEACROWD_VERSION,
90
+ description=f"{_DATASETNAME} {subset} SEACrowd schema",
91
+ schema=_SEACROWD_SCHEMA,
92
+ subset_id=subset,
93
+ ),
94
+ ]
95
+
96
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_my_source" # smallest wrt n_doc
97
+
98
+ def _info(self) -> datasets.DatasetInfo:
99
+ if self.config.schema == "source":
100
+ features = datasets.Features(
101
+ {
102
+ "id": datasets.Value("int64"),
103
+ "document_lang": datasets.Value("string"),
104
+ "scores": datasets.Sequence(datasets.Value("float64")),
105
+ "langs": datasets.Sequence(datasets.Value("string")),
106
+ "text": datasets.Value("string"),
107
+ "url": datasets.Value("string"),
108
+ "collection": datasets.Value("string"),
109
+ }
110
+ )
111
+ elif self.config.schema == _SEACROWD_SCHEMA:
112
+ features = SCHEMA_TO_FEATURES[
113
+ TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]]
114
+ ] # ssp_features
115
+
116
+ return datasets.DatasetInfo(
117
+ description=_DESCRIPTION,
118
+ features=features,
119
+ homepage=_HOMEPAGE,
120
+ license=_LICENSE,
121
+ citation=_CITATION,
122
+ )
123
+
124
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
125
+ """Returns SplitGenerators. Data is not yet extracted for efficient generation."""
126
+ lang_dict = {"mya": "my", "fil": "tl", "zlm": "ms", "vie": "vi", "ind": "id", "tha": "th"}
127
+ subset = lang_dict[self.config.subset_id]
128
+ base_path = _BASE_URL.format(lang=subset)
129
+
130
+ fs = HfFileSystem(token=dl_manager.download_config.token)
131
+ file_list = fs.ls(f"datasets/ontocord/CulturaY/{subset}", detail=False)
132
+
133
+ data_urls = [
134
+ f"{base_path}{filename.split('/')[-1]}"
135
+ for filename in file_list
136
+ if filename.endswith(".jsonl.zst")
137
+ ]
138
+
139
+ data_paths = list(map(Path, dl_manager.download(data_urls)))
140
+ return [
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.TRAIN,
143
+ gen_kwargs={
144
+ "data_paths": data_paths,
145
+ },
146
+ ),
147
+ ]
148
+
149
+ def _generate_examples(self, data_paths: Path) -> Tuple[int, Dict]:
150
+ """Yields examples as (key, example) tuples."""
151
+ key = 0
152
+ for data_path in data_paths:
153
+ with open(data_path, "rb") as f:
154
+ # Zstandard decompression
155
+ dctx = zstd.ZstdDecompressor()
156
+ reader = dctx.stream_reader(f)
157
+ text_io = io.TextIOWrapper(reader, encoding="utf-8")
158
+
159
+ # read jsonl file by line and yield
160
+ for line in text_io:
161
+ data = json.loads(line)
162
+ if self.config.schema == "source":
163
+ yield key, {
164
+ "id": data["id"],
165
+ "document_lang": data["document_lang"],
166
+ "scores": data["scores"],
167
+ "langs": data["langs"],
168
+ "text": data["text"],
169
+ "url": data["url"],
170
+ "collection": data["collection"],
171
+ }
172
+ elif self.config.schema == _SEACROWD_SCHEMA:
173
+ yield key, {
174
+ "id": str(data["id"]),
175
+ "text": data["text"],
176
+ }
177
+ key += 1