Datasets:

ArXiv:
License:
holylovenia commited on
Commit
b670bb4
1 Parent(s): efcf1b9

Upload mozilla_pontoon.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. mozilla_pontoon.py +171 -0
mozilla_pontoon.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from typing import Dict, List, Tuple
16
+
17
+ import datasets
18
+
19
+ from seacrowd.utils import schemas
20
+ from seacrowd.utils.configs import SEACrowdConfig
21
+ from seacrowd.utils.constants import Licenses, Tasks
22
+
23
+ # Keep blank; dataset has no associated paper
24
+ _CITATION = """\
25
+ @article{,
26
+ author = {},
27
+ title = {},
28
+ journal = {},
29
+ volume = {},
30
+ year = {},
31
+ url = {},
32
+ doi = {},
33
+ biburl = {},
34
+ bibsource = {}
35
+ }
36
+ """
37
+
38
+ _LOCAL = False
39
+ _LANGUAGES = ["mya", "ceb", "gor", "hil", "ilo", "ind", "jav", "khm", "lao", "zlm", "nia", "tgl", "tha", "vie"]
40
+
41
+ _DATASETNAME = "mozilla_pontoon"
42
+ _DESCRIPTION = """
43
+ This dataset contains crowdsource translations of more than 200 languages for
44
+ different Mozilla open-source projects from Mozilla's Pontoon localization platform.
45
+ Source sentences are in English.
46
+ """
47
+
48
+ _HOMEPAGE = "https://huggingface.co/datasets/ayymen/Pontoon-Translations"
49
+ _LICENSE = Licenses.BSD_3_CLAUSE.value
50
+ _URL = "https://huggingface.co/datasets/ayymen/Pontoon-Translations"
51
+
52
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
53
+ _SOURCE_VERSION = "1.0.0"
54
+ _SEACROWD_VERSION = "2024.06.20"
55
+
56
+
57
+ class MozillaPontoonDataset(datasets.GeneratorBasedBuilder):
58
+ """Dataset of translations from Mozilla's Pontoon platform."""
59
+
60
+ # Two-letter ISO code is used when available
61
+ # otherwise 3-letter one is used
62
+ LANG_CODE_MAPPER = {"mya": "my", "ceb": "ceb", "gor": "gor", "hil": "hil", "ilo": "ilo", "ind": "id", "jav": "jv", "khm": "km", "lao": "lo", "zlm": "ms", "nia": "nia", "tgl": "tl", "tha": "th", "vie": "vi"}
63
+
64
+ # Config to load individual datasets per language
65
+ BUILDER_CONFIGS = [
66
+ SEACrowdConfig(
67
+ name=f"{_DATASETNAME}_eng_{lang}_source",
68
+ version=datasets.Version(_SOURCE_VERSION),
69
+ description=f"{_DATASETNAME} source schema for {lang} language",
70
+ schema="source",
71
+ subset_id=f"{_DATASETNAME}_eng_{lang}",
72
+ )
73
+ for lang in _LANGUAGES
74
+ ] + [
75
+ SEACrowdConfig(
76
+ name=f"{_DATASETNAME}_eng_{lang}_seacrowd_t2t",
77
+ version=datasets.Version(_SEACROWD_VERSION),
78
+ description=f"{_DATASETNAME} SEACrowd schema for {lang} language",
79
+ schema="seacrowd_t2t",
80
+ subset_id=f"{_DATASETNAME}_eng_{lang}",
81
+ )
82
+ for lang in _LANGUAGES
83
+ ]
84
+
85
+ # Config to load all datasets
86
+ BUILDER_CONFIGS.extend(
87
+ [
88
+ SEACrowdConfig(
89
+ name=f"{_DATASETNAME}_source",
90
+ version=datasets.Version(_SOURCE_VERSION),
91
+ description=f"{_DATASETNAME} source schema for all languages",
92
+ schema="source",
93
+ subset_id=_DATASETNAME,
94
+ ),
95
+ SEACrowdConfig(
96
+ name=f"{_DATASETNAME}_seacrowd_t2t",
97
+ version=datasets.Version(_SEACROWD_VERSION),
98
+ description=f"{_DATASETNAME} SEACrowd schema for all languages",
99
+ schema="seacrowd_t2t",
100
+ subset_id=_DATASETNAME,
101
+ ),
102
+ ]
103
+ )
104
+
105
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
106
+
107
+ def _info(self) -> datasets.DatasetInfo:
108
+ if self.config.schema == "source":
109
+ features = datasets.Features(
110
+ {
111
+ "source_string": datasets.Value("string"),
112
+ "target_string": datasets.Value("string"),
113
+ }
114
+ )
115
+ elif self.config.schema == "seacrowd_t2t":
116
+ features = schemas.text2text_features
117
+
118
+ return datasets.DatasetInfo(
119
+ description=_DESCRIPTION,
120
+ features=features,
121
+ homepage=_HOMEPAGE,
122
+ license=_LICENSE,
123
+ citation=_CITATION,
124
+ )
125
+
126
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
127
+ """Returns SplitGenerators."""
128
+ # dl_manager not used since dataloader uses HF 'load_dataset'
129
+ return [
130
+ datasets.SplitGenerator(
131
+ name=datasets.Split.TRAIN,
132
+ gen_kwargs={"split": "train"},
133
+ ),
134
+ ]
135
+
136
+ def _load_hf_data_from_remote(self, language: str) -> datasets.DatasetDict:
137
+ """Load dataset from HuggingFace."""
138
+ hf_lang_code = self.LANG_CODE_MAPPER[language]
139
+ hf_remote_ref = "/".join(_URL.split("/")[-2:])
140
+ return datasets.load_dataset(hf_remote_ref, f"en-{hf_lang_code}", split="train")
141
+
142
+ def _generate_examples(self, split: str) -> Tuple[int, Dict]:
143
+ """Yields examples as (key, example) tuples."""
144
+ languages = []
145
+ pontoon_datasets = []
146
+
147
+ lang = self.config.subset_id.split("_")[-1]
148
+ if lang in _LANGUAGES:
149
+ languages.append(lang)
150
+ pontoon_datasets.append(self._load_hf_data_from_remote(lang))
151
+ else:
152
+ for lang in _LANGUAGES:
153
+ languages.append(lang)
154
+ pontoon_datasets.append(self._load_hf_data_from_remote(lang))
155
+
156
+ index = 0
157
+ for lang, lang_subset in zip(languages, pontoon_datasets):
158
+ for row in lang_subset:
159
+ if self.config.schema == "source":
160
+ example = row
161
+
162
+ elif self.config.schema == "seacrowd_t2t":
163
+ example = {
164
+ "id": str(index),
165
+ "text_1": row["source_string"],
166
+ "text_2": row["target_string"],
167
+ "text_1_name": "eng",
168
+ "text_2_name": lang,
169
+ }
170
+ yield index, example
171
+ index += 1