Datasets:

Languages:
Khmer
ArXiv:
License:
holylovenia commited on
Commit
61098d8
1 Parent(s): 68fa23f

Upload khpos.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. khpos.py +212 -0
khpos.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The khPOS Corpus (Khmer POS Corpus) is a 12,000 sentences (25,626 words) manually word segmented and POS tagged corpus
18
+ developed for Khmer language NLP research and developments. We collected Khmer sentences from websites that include
19
+ various area such as economics, news, politics. Moreover it is also contained some student list and voter list of
20
+ national election committee of Cambodia. The average number of words per sentence in the whole corpus is 10.75.
21
+ Here, some symbols such as "។" (Khmer sign Khan), "៖" (Khmer sign Camnuc pii kuuh), "-", "?", "[", "]" etc. also
22
+ counted as words. The shortest sentence contained only 1 word and longest sentence contained 169 words. This dataset contains
23
+ A validation set and a test set, each containing 1000 sentences.
24
+ """
25
+ from pathlib import Path
26
+ from typing import Dict, List, Tuple
27
+
28
+ import datasets
29
+
30
+ from seacrowd.utils import schemas
31
+ from seacrowd.utils.configs import SEACrowdConfig
32
+ from seacrowd.utils.constants import Tasks, Licenses
33
+
34
+ _CITATION = """\
35
+ @inproceedings{kyaw2017comparison,
36
+ title={Comparison of Six POS Tagging Methods on 12K Sentences Khmer Language POS Tagged Corpus},
37
+ author={Ye Kyaw Thu and Vichet Chea and Yoshinori Sagisaka},
38
+ booktitle={Proceedings of the first Regional Conference on Optical character recognition and Natural language processing technologies for ASEAN languages (ONA 2017)},
39
+ year={2017},
40
+ month={December 7-8},
41
+ address={Phnom Penh, Cambodia}
42
+ }
43
+ """
44
+
45
+ _DATASETNAME = "khpos"
46
+
47
+ _DESCRIPTION = """\
48
+ The khPOS Corpus (Khmer POS Corpus) is a 12,000 sentences (25,626 words) manually word segmented and POS tagged corpus
49
+ developed for Khmer language NLP research and developments. We collected Khmer sentences from websites that include
50
+ various area such as economics, news, politics. Moreover it is also contained some student list and voter list of
51
+ national election committee of Cambodia. The average number of words per sentence in the whole corpus is 10.75.
52
+ Here, some symbols such as "។" (Khmer sign Khan), "៖" (Khmer sign Camnuc pii kuuh), "-", "?", "[", "]" etc. also
53
+ counted as words. The shortest sentence contained only 1 word and longest sentence contained 169 words. This dataset contains
54
+ A validation set and a test set, each containing 1000 sentences.
55
+ """
56
+
57
+ _HOMEPAGE = "https://github.com/ye-kyaw-thu/khPOS/tree/master"
58
+
59
+ _LANGUAGES = ['khm'] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
60
+
61
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
62
+
63
+ _LOCAL = False
64
+
65
+ _URLS = {
66
+ _DATASETNAME: {
67
+ 'train': "https://raw.githubusercontent.com/ye-kyaw-thu/khPOS/master/corpus-draft-ver-1.0/data/after-replace/train.all2",
68
+ 'validation': "https://raw.githubusercontent.com/ye-kyaw-thu/khPOS/master/corpus-draft-ver-1.0/data/OPEN-TEST",
69
+ 'test': "https://raw.githubusercontent.com/ye-kyaw-thu/khPOS/master/corpus-draft-ver-1.0/data/CLOSE-TEST"
70
+ }
71
+ }
72
+
73
+ _SUPPORTED_TASKS = [Tasks.POS_TAGGING]
74
+
75
+ _SOURCE_VERSION = "1.0.0"
76
+
77
+ _SEACROWD_VERSION = "2024.06.20"
78
+
79
+
80
+ class KhPOS(datasets.GeneratorBasedBuilder):
81
+ """\
82
+ This datasets contain 12000 sentences (25626 words) for the Khmer language.
83
+ There are 24 POS tags and their description can be found at https://github.com/ye-kyaw-thu/khPOS/tree/master.
84
+ The used Khmer Tokenizer can be found in the above github repository as well. This dataset contains
85
+ A validation set and a test set, each containing 1000 sentences.
86
+ """
87
+
88
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
89
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
90
+
91
+ BUILDER_CONFIGS = [
92
+ SEACrowdConfig(
93
+ name="khpos_source",
94
+ version=SOURCE_VERSION,
95
+ description="khpos source schema",
96
+ schema="source",
97
+ subset_id="khpos",
98
+ ),
99
+ SEACrowdConfig(
100
+ name="khpos_seacrowd_seq_label",
101
+ version=SEACROWD_VERSION,
102
+ description="khpos SEACrowd schema",
103
+ schema="seacrowd_seq_label",
104
+ subset_id="khpos",
105
+ ),
106
+ ]
107
+
108
+ DEFAULT_CONFIG_NAME = "khpos_source"
109
+
110
+ def _info(self) -> datasets.DatasetInfo:
111
+ if self.config.schema == "source":
112
+ features = datasets.Features({
113
+ "id" : datasets.Value("string"),
114
+ "tokens" : datasets.Sequence(datasets.Value("string")),
115
+ #pos_tags follows order from corpus-draft-ver-1.0/data/after-replace/train.all2.tag.freq
116
+ "pos_tags": datasets.Sequence(datasets.features.ClassLabel(
117
+ names = [
118
+ 'AB', 'AUX', 'CC', 'CD',
119
+ 'DBL', 'DT', 'ETC', 'IN',
120
+ 'JJ', 'KAN', 'M', 'NN',
121
+ 'PA', 'PN', 'PRO', 'QT',
122
+ 'RB', 'RPN', 'SYM', 'UH',
123
+ 'VB', 'VB_JJ', 'VCOM'
124
+ ]
125
+ ))
126
+ })
127
+ elif self.config.schema == "seacrowd_seq_label":
128
+ features = schemas.seq_label.features([
129
+ 'AB', 'AUX', 'CC', 'CD',
130
+ 'DBL', 'DT', 'ETC', 'IN',
131
+ 'JJ', 'KAN', 'M', 'NN',
132
+ 'PA', 'PN', 'PRO', 'QT',
133
+ 'RB', 'RPN', 'SYM', 'UH',
134
+ 'VB', 'VB_JJ', 'VCOM'
135
+ ])
136
+
137
+ return datasets.DatasetInfo(
138
+ description=_DESCRIPTION,
139
+ features=features,
140
+ homepage=_HOMEPAGE,
141
+ license=_LICENSE,
142
+ citation=_CITATION,
143
+ )
144
+
145
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
146
+ """Returns SplitGenerators."""
147
+ urls = _URLS[_DATASETNAME]['train']
148
+ path = dl_manager.download_and_extract(urls)
149
+
150
+ dev_url = _URLS[_DATASETNAME]['validation']
151
+ dev_path = dl_manager.download_and_extract(dev_url)
152
+
153
+ test_url = _URLS[_DATASETNAME]['test']
154
+ test_path = dl_manager.download_and_extract(test_url)
155
+
156
+ return [
157
+ datasets.SplitGenerator(
158
+ name=datasets.Split.TRAIN,
159
+ gen_kwargs={
160
+ "filepath": path,
161
+ "split": "train",
162
+ },
163
+ ),
164
+ datasets.SplitGenerator(
165
+ name=datasets.Split.VALIDATION,
166
+ gen_kwargs={
167
+ "filepath": dev_path,
168
+ "split": "dev",
169
+ },
170
+ ),
171
+ datasets.SplitGenerator(
172
+ name=datasets.Split.TEST,
173
+ gen_kwargs={
174
+ "filepath": test_path,
175
+ "split": "test",
176
+ },
177
+ ),
178
+ ]
179
+
180
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
181
+ """Yields examples as (key, example) tuples."""
182
+ with open(filepath, encoding="utf-8") as file:
183
+ counter = 0
184
+ for line in file:
185
+ if line.strip() != "":
186
+ groups = line.split(" ")
187
+ tokens = []
188
+ pos_tags = []
189
+ for group in groups:
190
+ token, pos_tag = group.split("/")
191
+ tokens.append(token)
192
+ pos_tags.append(pos_tag)
193
+ if self.config.schema == "source":
194
+ yield (
195
+ counter,
196
+ {
197
+ "id" : str(counter),
198
+ "tokens" : tokens,
199
+ "pos_tags": pos_tags
200
+ }
201
+ )
202
+ counter += 1
203
+ elif self.config.schema == "seacrowd_seq_label":
204
+ yield (
205
+ counter,
206
+ {
207
+ "id" : str(counter),
208
+ "tokens": tokens,
209
+ "labels": pos_tags
210
+ }
211
+ )
212
+ counter += 1