holylovenia commited on
Commit
eefaeff
1 Parent(s): 6a3ebaf

Upload ud_jv_csui.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ud_jv_csui.py +256 -0
ud_jv_csui.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+
21
+ from seacrowd.utils import schemas
22
+ from seacrowd.utils.common_parser import load_ud_data, load_ud_data_as_seacrowd_kb
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import Licenses, Tasks
25
+
26
+ _CITATION = """\
27
+ @unpublished{Alfina2023,
28
+ author = {Alfina, Ika and Yuliawati, Arlisa and Tanaya, Dipta and Dinakaramani, Arawinda and Zeman, Daniel},
29
+ title = {{A Gold Standard Dataset for Javanese Tokenization, POS Tagging, Morphological Feature Tagging, and Dependency Parsing}},
30
+ year = {2023}
31
+ }
32
+ """
33
+
34
+ _DATASETNAME = "ud_jv_csui"
35
+
36
+ _DESCRIPTION = """\
37
+ UD Javanese-CSUI is a dependency treebank in Javanese, a regional language in Indonesia with more than 68 million users.
38
+ It was developed by Alfina et al. from the Faculty of Computer Science, Universitas Indonesia.
39
+ The newest version has 1000 sentences and 14K words with manual annotation.
40
+
41
+ The sentences use the Latin script and do not use the original writing system of Javanese (Hanacaraka).
42
+
43
+ The original sentences were taken from several resources:
44
+ 1. Javanese reference grammar books (125 sents)
45
+ 2. OPUS, especially from the Javanese section of the WikiMatrix v1 corpus (150 sents)
46
+ 3. Online news (Solopos) (725 sents)
47
+
48
+ Javanese has several language levels (register), such as Ngoko, Krama, Krama Inggil, and Krama Andhap.
49
+ In this treebank, the sentences predominantly use Ngoko words, some of which use Krama words.
50
+ """
51
+
52
+ _HOMEPAGE = "https://github.com/UniversalDependencies/UD_Javanese-CSUI"
53
+
54
+ _LANGUAGES = ["jav"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
55
+
56
+ _LICENSE = Licenses.CC_BY_SA_4_0.value
57
+
58
+ _LOCAL = False
59
+
60
+ _URLS = {
61
+ _DATASETNAME: "https://raw.githubusercontent.com/UniversalDependencies/UD_Javanese-CSUI/master/jv_csui-ud-test.conllu",
62
+ }
63
+
64
+ _SUPPORTED_TASKS = [Tasks.DEPENDENCY_PARSING, Tasks.MACHINE_TRANSLATION, Tasks.POS_TAGGING]
65
+
66
+ _SOURCE_VERSION = "1.0.0"
67
+
68
+ _SEACROWD_VERSION = "2024.06.20"
69
+
70
+
71
+ def _resolve_misannotation_(dataset):
72
+ """Resolving mis-annotation in the raw data. In-place."""
73
+ for d in dataset:
74
+ # Metadata's typos
75
+ if d["sent_id"] == "opus-wiki-5": # From the raw file. Thrown-away during parsing due to no field name.
76
+ d.setdefault("text_en", "Prior to World War II, 14 commercial and 12 public radios could be operated in France.")
77
+ if d["sent_id"] == "wedhawati-2001-66": # empty string
78
+ d.setdefault("text_en", "Reading can expand knowledge.")
79
+ if d["sent_id"] == "opus-wiki-72":
80
+ d["text_en"] = d.pop("text-en") # metadata mis-titled
81
+ if d["sent_id"] == "opus-wiki-27":
82
+ d["text_id"] = d.pop("tex_id") # metadata mis-titled
83
+
84
+ # Problems on the annotation itself
85
+ if d["sent_id"] == "solopos-2022-42": # POS tag is also wrong. Proceed with caution.
86
+ d["form"][1] = d["form"][1].replace("tresnane", "tresna") # tresna + e
87
+ if d["sent_id"] == "solopos-2022-93": # wrong annot
88
+ d["form"][10] = d["form"][10].replace("tengene", "tengen") # tengen + e
89
+ if d["sent_id"] == "solopos-2022-506": # annotation inconsistency on occurrences of word "sedina"
90
+ d["form"][3] = d["form"][3].replace("siji", "se")
91
+ if d["sent_id"] == "solopos-2022-711": # annotation inconsistency on the word "rasah" from "ra" and "usah"
92
+ d["form"][11] = d["form"][11].replace("usah", "sah")
93
+
94
+ return dataset
95
+
96
+
97
+ class UdJvCsuiDataset(datasets.GeneratorBasedBuilder):
98
+ """Treebank of Javanese comprises 1030 sentences from 14K words with manual annotation"""
99
+
100
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
101
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
102
+
103
+ # source: https://universaldependencies.org/u/pos/
104
+ UPOS_TAGS = ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"]
105
+
106
+ BUILDER_CONFIGS = [
107
+ SEACrowdConfig(
108
+ name=f"{_DATASETNAME}_source",
109
+ version=SOURCE_VERSION,
110
+ description=f"{_DATASETNAME} source schema",
111
+ schema="source",
112
+ subset_id=f"{_DATASETNAME}",
113
+ ),
114
+ SEACrowdConfig(
115
+ name=f"{_DATASETNAME}_seacrowd_kb",
116
+ version=SEACROWD_VERSION,
117
+ description=f"{_DATASETNAME} SEACrowd KB schema",
118
+ schema="seacrowd_kb",
119
+ subset_id=f"{_DATASETNAME}",
120
+ ),
121
+ SEACrowdConfig(
122
+ name=f"{_DATASETNAME}_seacrowd_t2t",
123
+ version=SEACROWD_VERSION,
124
+ description=f"{_DATASETNAME} SEACrowd Text-to-Text schema",
125
+ schema="seacrowd_t2t",
126
+ subset_id=f"{_DATASETNAME}",
127
+ ),
128
+ SEACrowdConfig(
129
+ name=f"{_DATASETNAME}_seacrowd_seq_label",
130
+ version=SEACROWD_VERSION,
131
+ description=f"{_DATASETNAME} SEACrowd Seq Label schema",
132
+ schema="seacrowd_seq_label",
133
+ subset_id=f"{_DATASETNAME}",
134
+ ),
135
+ ]
136
+
137
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
138
+
139
+ def _info(self) -> datasets.DatasetInfo:
140
+
141
+ if self.config.schema == "source":
142
+ features = datasets.Features(
143
+ {
144
+ # metadata
145
+ "sent_id": datasets.Value("string"),
146
+ "text": datasets.Value("string"),
147
+ "text_id": datasets.Value("string"),
148
+ "text_en": datasets.Value("string"),
149
+ # tokens
150
+ "id": [datasets.Value("string")],
151
+ "form": [datasets.Value("string")],
152
+ "lemma": [datasets.Value("string")],
153
+ "upos": [datasets.Value("string")],
154
+ "xpos": [datasets.Value("string")],
155
+ "feats": [datasets.Value("string")],
156
+ "head": [datasets.Value("string")],
157
+ "deprel": [datasets.Value("string")],
158
+ "deps": [datasets.Value("string")],
159
+ "misc": [datasets.Value("string")],
160
+ }
161
+ )
162
+ elif self.config.schema == "seacrowd_kb":
163
+ features = schemas.kb_features
164
+
165
+ elif self.config.schema == "seacrowd_t2t":
166
+ features = schemas.text2text_features
167
+
168
+ elif self.config.schema == "seacrowd_seq_label":
169
+ features = schemas.seq_label_features(self.UPOS_TAGS)
170
+
171
+ else:
172
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
173
+
174
+ return datasets.DatasetInfo(
175
+ description=_DESCRIPTION,
176
+ features=features,
177
+ homepage=_HOMEPAGE,
178
+ license=_LICENSE,
179
+ citation=_CITATION,
180
+ )
181
+
182
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
183
+ """Returns SplitGenerators."""
184
+ urls = _URLS[_DATASETNAME]
185
+ data_path = dl_manager.download(urls)
186
+
187
+ return [
188
+ datasets.SplitGenerator(
189
+ name=datasets.Split.TEST, # https://github.com/UniversalDependencies/UD_Javanese-CSUI#split
190
+ gen_kwargs={"filepath": data_path},
191
+ ),
192
+ ]
193
+
194
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
195
+ # Note from hudi_f:
196
+ # Other than 3 sentences with multi-span of length 3, the data format seems fine.
197
+ # Thus, it is safe to ignore the assertion. (as of 2024/02/14)
198
+ dataset = list(
199
+ load_ud_data(
200
+ filepath,
201
+ filter_kwargs={"id": lambda i: isinstance(i, int)},
202
+ # assert_fn=assert_multispan_range_is_one
203
+ )
204
+ )
205
+ _resolve_misannotation_(dataset)
206
+
207
+ for d in dataset:
208
+ if "text_id" not in d or "text_en" not in d:
209
+ print(d)
210
+
211
+ if self.config.schema == "source":
212
+ pass
213
+
214
+ elif self.config.schema == "seacrowd_kb":
215
+ dataset = load_ud_data_as_seacrowd_kb(
216
+ filepath,
217
+ dataset,
218
+ morph_exceptions=[
219
+ # Exceptions due to inconsistencies in the raw data annotation
220
+ ("ne", "e"),
221
+ ("nipun", "ipun"),
222
+ ("me", "e"), # occurrence word: "Esemme" = "Esem" + "e". original text has double 'm'.
223
+ ],
224
+ )
225
+
226
+ elif self.config.schema == "seacrowd_t2t":
227
+ dataset = list(
228
+ map(
229
+ lambda d: {
230
+ "id": d["sent_id"],
231
+ "text_1": d["text"],
232
+ "text_2": d["text_id"],
233
+ "text_1_name": "jav",
234
+ "text_2_name": "ind",
235
+ },
236
+ dataset,
237
+ )
238
+ )
239
+
240
+ elif self.config.schema == "seacrowd_seq_label":
241
+ dataset = list(
242
+ map(
243
+ lambda d: {
244
+ "id": d["sent_id"],
245
+ "tokens": d["form"],
246
+ "labels": d["upos"],
247
+ },
248
+ dataset,
249
+ )
250
+ )
251
+
252
+ else:
253
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
254
+
255
+ for key, example in enumerate(dataset):
256
+ yield key, example