Datasets:

Languages:
Sundanese
ArXiv:
holylovenia commited on
Commit
92a71cc
1 Parent(s): c261e33

Upload postag_su.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. postag_su.py +271 -0
postag_su.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import re
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from nusacrowd.utils import schemas
23
+ from nusacrowd.utils.configs import NusantaraConfig
24
+ from nusacrowd.utils.constants import Tasks
25
+
26
+ _CITATION = """\
27
+ @data{FK2/VTAHRH_2022,
28
+ author = {ARDIYANTI SURYANI, ARIE and Widyantoro, Dwi Hendratmo and Purwarianti, Ayu and Sudaryat, Yayat},
29
+ publisher = {Telkom University Dataverse},
30
+ title = {{PoSTagged Sundanese Monolingual Corpus}},
31
+ year = {2022},
32
+ version = {DRAFT VERSION},
33
+ doi = {10.34820/FK2/VTAHRH},
34
+ url = {https://doi.org/10.34820/FK2/VTAHRH}
35
+ }
36
+
37
+ @INPROCEEDINGS{7437678,
38
+ author={Suryani, Arie Ardiyanti and Widyantoro, Dwi Hendratmo and Purwarianti, Ayu and Sudaryat, Yayat},
39
+ booktitle={2015 International Conference on Information Technology Systems and Innovation (ICITSI)},
40
+ title={Experiment on a phrase-based statistical machine translation using PoS Tag information for Sundanese into Indonesian},
41
+ year={2015},
42
+ volume={},
43
+ number={},
44
+ pages={1-6},
45
+ doi={10.1109/ICITSI.2015.7437678}
46
+ }
47
+ """
48
+
49
+ _LANGUAGES = ["sun"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
50
+ _LOCAL = False
51
+
52
+ _DATASETNAME = "postag_su"
53
+
54
+ _DESCRIPTION = """\
55
+ This dataset contains 3616 lines of Sundanese sentences taken from several online magazines (Mangle, Dewan Dakwah Jabar, and Balebat). \
56
+ Annotated with PoS Labels by several undergraduates of the Sundanese Language Education Study Program (PPBS), UPI Bandung.
57
+ """
58
+
59
+ _HOMEPAGE = "https://dataverse.telkomuniversity.ac.id/dataset.xhtml?persistentId=doi:10.34820/FK2/VTAHRH"
60
+
61
+ _LICENSE = 'CC0 - "Public Domain Dedication"'
62
+
63
+ _URLS = {
64
+ _DATASETNAME: "https://dataverse.telkomuniversity.ac.id/api/access/datafile/:persistentId?persistentId=doi:10.34820/FK2/VTAHRH/WQIFK8",
65
+ }
66
+
67
+ _SUPPORTED_TASKS = [Tasks.POS_TAGGING]
68
+
69
+ _SOURCE_VERSION = "1.1.0"
70
+
71
+ _NUSANTARA_VERSION = "1.0.0"
72
+
73
+
74
+ class PosSunMonoDataset(datasets.GeneratorBasedBuilder):
75
+ """PoSTagged Sundanese Monolingual Corpus"""
76
+
77
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
78
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
79
+
80
+ # Based on Wicaksono, A. F., & Purwarianti, A. (2010). HMM Based Part-of-Speech Tagger for Bahasa Indonesia. On Proceedings of 4th International MALINDO (Malay and Indonesian Language) Workshop.
81
+ POS_TAGS = [
82
+ "",
83
+ "!",
84
+ '"',
85
+ "'",
86
+ ")",
87
+ ",",
88
+ "-",
89
+ ".",
90
+ "...",
91
+ "....",
92
+ "/",
93
+ ":",
94
+ ";",
95
+ "?",
96
+ "C",
97
+ "CBI",
98
+ "CC",
99
+ "CDC",
100
+ "CDI",
101
+ "CDO",
102
+ "CDP",
103
+ "CDT",
104
+ "CP",
105
+ "CRB",
106
+ "CS",
107
+ "DC",
108
+ "DT",
109
+ "FE",
110
+ "FW",
111
+ "GM",
112
+ "IN",
113
+ "J",
114
+ "JJ",
115
+ "KA",
116
+ "KK",
117
+ "MD",
118
+ "MG",
119
+ "MN",
120
+ "N",
121
+ "NEG",
122
+ "NN",
123
+ "NNA",
124
+ "NNG",
125
+ "NNN",
126
+ "NNO",
127
+ "NNP",
128
+ "NNPP",
129
+ "NP",
130
+ "NPP",
131
+ "OP",
132
+ "PB",
133
+ "PCDP",
134
+ "PR",
135
+ "PRL",
136
+ "PRL|IN",
137
+ "PRN",
138
+ "PRP",
139
+ "RB",
140
+ "RBT",
141
+ "RB|RP",
142
+ "RN",
143
+ "RP",
144
+ "SC",
145
+ "SCC",
146
+ "SC|IN",
147
+ "SYM",
148
+ "UH",
149
+ "VB",
150
+ "VBI",
151
+ "VBT",
152
+ "VRB",
153
+ "W",
154
+ "WH",
155
+ "WHP",
156
+ "WRP",
157
+ "`",
158
+ "–",
159
+ "—",
160
+ "‘",
161
+ "’",
162
+ "“",
163
+ "”",
164
+ ]
165
+
166
+ BUILDER_CONFIGS = [
167
+ NusantaraConfig(
168
+ name=f"{_DATASETNAME}_source",
169
+ version=SOURCE_VERSION,
170
+ description=f"{_DATASETNAME} source schema",
171
+ schema="source",
172
+ subset_id=f"{_DATASETNAME}",
173
+ ),
174
+ NusantaraConfig(
175
+ name=f"{_DATASETNAME}_nusantara_seq_label",
176
+ version=NUSANTARA_VERSION,
177
+ description=f"{_DATASETNAME} Nusantara Seq Label schema",
178
+ schema="nusantara_seq_label",
179
+ subset_id=f"{_DATASETNAME}",
180
+ ),
181
+ ]
182
+
183
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
184
+
185
+ def _info(self) -> datasets.DatasetInfo:
186
+
187
+ if self.config.schema == "source":
188
+ features = datasets.Features({"labeled_sentence": datasets.Value("string")})
189
+ elif self.config.schema == "nusantara_seq_label":
190
+ features = schemas.seq_label_features(self.POS_TAGS)
191
+
192
+ else:
193
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
194
+
195
+ return datasets.DatasetInfo(
196
+ description=_DESCRIPTION,
197
+ features=features,
198
+ homepage=_HOMEPAGE,
199
+ license=_LICENSE,
200
+ citation=_CITATION,
201
+ )
202
+
203
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
204
+ """Returns SplitGenerators."""
205
+ urls = _URLS[_DATASETNAME]
206
+ data_path = dl_manager.download(urls)
207
+
208
+ return [
209
+ datasets.SplitGenerator(
210
+ name=datasets.Split.TRAIN,
211
+ gen_kwargs={
212
+ "filepath": data_path,
213
+ },
214
+ ),
215
+ ]
216
+
217
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
218
+ """Yields examples as (key, example) tuples."""
219
+
220
+ def __hotfix(line):
221
+ if line.endswith(" taun|NN 1953.|."):
222
+ return line.replace(" taun|NN 1953.|.", " taun|NN 1953|CDP .|.")
223
+ elif line.endswith(" jeung|CC|CC sasab|RB .|."):
224
+ return line.replace(" jeung|CC|CC sasab|RB .|.", " jeung|CC sasab|RB .|.")
225
+ elif line.startswith("Kagiatan|NN éta|DT dihadiran|VBT kira|-kira "):
226
+ return line.replace("Kagiatan|NN éta|DT dihadiran|VBT kira|-kira ", "Kagiatan|NN éta|DT dihadiran|VBT kira-kira|DT ")
227
+ return line
228
+
229
+ with open(filepath, "r", encoding="utf8") as ipt:
230
+ raw = list(map(lambda l: __hotfix(l.rstrip("\n ")), ipt))
231
+
232
+ pat_0 = r"(,\|,|\?\|\?|-\|-|!\|!)"
233
+ repl_spc = r" \1 "
234
+
235
+ pat_1 = r"([A-Z”])(\.\|\.)"
236
+ pat_2 = r"(\.\|\.)([^. ])"
237
+ repl_spl = r"\1 \2"
238
+
239
+ pat_3 = r"([^ ]+\|[^ ]+)\| "
240
+ repl_del = r"\1 "
241
+
242
+ pat_4 = r"\|\|"
243
+ repl_dup = r"|"
244
+
245
+ def __apply_regex(txt):
246
+ for pat, repl in [(pat_0, repl_spc), (pat_1, repl_spl), (pat_2, repl_spl), (pat_3, repl_del), (pat_4, repl_dup)]:
247
+ txt = re.sub(pat, repl, txt)
248
+ return txt
249
+
250
+ def __cleanse_label(token):
251
+ text, label = token
252
+ return text, re.sub(r"([A-Z]+)[.,)]", r"\1", label.upper())
253
+
254
+ if self.config.schema == "source":
255
+ for key, example in enumerate(raw):
256
+ yield key, {"labeled_sentence": example}
257
+
258
+ elif self.config.schema == "nusantara_seq_label":
259
+ spaced = list(map(__apply_regex, raw))
260
+ data = list(map(lambda l: [__cleanse_label(tok.split("|", 1)) for tok in filter(None, l.split(" "))], spaced))
261
+
262
+ for key, example in enumerate(data):
263
+ tokens, labels = zip(*example)
264
+ yield key, {"id": str(key), "tokens": tokens, "labels": labels}
265
+
266
+ else:
267
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
268
+
269
+
270
+ if __name__ == "__main__":
271
+ datasets.load_dataset(__file__)