Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
1ff780a
1 Parent(s): 656cce4

Upload vndt.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vndt.py +197 -0
vndt.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import conllu
20
+ import datasets
21
+
22
+ from seacrowd.sea_datasets.vndt.utils import parse_token_and_impute_metadata
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.common_parser import (load_ud_data,
25
+ load_ud_data_as_seacrowd_kb)
26
+ from seacrowd.utils.configs import SEACrowdConfig
27
+ from seacrowd.utils.constants import Licenses, Tasks
28
+
29
+ _CITATION = """\
30
+ @InProceedings{Nguyen2014NLDB,
31
+ author = {Nguyen, Dat Quoc and Nguyen, Dai Quoc and Pham, Son Bao and Nguyen, Phuong-Thai and Nguyen, Minh Le},
32
+ title = {{From Treebank Conversion to Automatic Dependency Parsing for Vietnamese}},
33
+ booktitle = {{Proceedings of 19th International Conference on Application of Natural Language to Information Systems}},
34
+ year = {2014},
35
+ pages = {196-207},
36
+ url = {https://github.com/datquocnguyen/VnDT},
37
+ }
38
+ """
39
+
40
+ _DATASETNAME = "vndt"
41
+
42
+ _DESCRIPTION = """\
43
+ VnDT is a Vietnamese dependency treebank, consisting of 10K+ sentences (219k words). The VnDT Treebank is automatically
44
+ converted from the input Vietnamese Treebank.
45
+ """
46
+
47
+ _HOMEPAGE = "https://github.com/datquocnguyen/VnDT"
48
+
49
+ _LANGUAGES = {"vie": "vi"}
50
+
51
+ _LICENSE = Licenses.UNKNOWN.value
52
+
53
+ _LOCAL = False
54
+
55
+ _URLS = {
56
+ "gold-dev": "https://raw.githubusercontent.com/datquocnguyen/VnDT/master/VnDTv1.1-gold-POS-tags-dev.conll",
57
+ "gold-test": "https://raw.githubusercontent.com/datquocnguyen/VnDT/master/VnDTv1.1-gold-POS-tags-test.conll",
58
+ "gold-train": "https://raw.githubusercontent.com/datquocnguyen/VnDT/master/VnDTv1.1-gold-POS-tags-train.conll",
59
+ "predicted-dev": "https://raw.githubusercontent.com/datquocnguyen/VnDT/master/VnDTv1.1-predicted-POS-tags-dev.conll",
60
+ "predicted-test": "https://raw.githubusercontent.com/datquocnguyen/VnDT/master/VnDTv1.1-predicted-POS-tags-test.conll",
61
+ "predicted-train": "https://raw.githubusercontent.com/datquocnguyen/VnDT/master/VnDTv1.1-predicted-POS-tags-train.conll",
62
+ }
63
+
64
+ _SUPPORTED_TASKS = [Tasks.DEPENDENCY_PARSING]
65
+
66
+ _SOURCE_VERSION = "1.0.0"
67
+
68
+ _SEACROWD_VERSION = "2024.06.20"
69
+
70
+ class VnDTDataset(datasets.GeneratorBasedBuilder):
71
+ """
72
+ VnDT is a Vietnamese dependency treebank from https://github.com/datquocnguyen/VnDT.
73
+ """
74
+
75
+ # Override conllu.parse_token_and_metadata via monkey patching
76
+ conllu.parse_token_and_metadata = parse_token_and_impute_metadata
77
+
78
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
79
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
80
+
81
+ BUILDER_CONFIGS = [
82
+ SEACrowdConfig(
83
+ name=f"{_DATASETNAME}_gold_source",
84
+ version=datasets.Version(_SOURCE_VERSION),
85
+ description=f"{_DATASETNAME} gold standard source schema",
86
+ schema="source",
87
+ subset_id="gold",
88
+ ),
89
+ SEACrowdConfig(
90
+ name=f"{_DATASETNAME}_gold_seacrowd_kb",
91
+ version=datasets.Version(_SEACROWD_VERSION),
92
+ description=f"{_DATASETNAME} gold standard SEACrowd schema",
93
+ schema="seacrowd_kb",
94
+ subset_id="gold",
95
+ ),
96
+ SEACrowdConfig(
97
+ name=f"{_DATASETNAME}_predicted_source",
98
+ version=datasets.Version(_SOURCE_VERSION),
99
+ description=f"{_DATASETNAME} predicted source schema",
100
+ schema="source",
101
+ subset_id="predicted",
102
+ ),
103
+ SEACrowdConfig(
104
+ name=f"{_DATASETNAME}_predicted_seacrowd_kb",
105
+ version=datasets.Version(_SEACROWD_VERSION),
106
+ description=f"{_DATASETNAME} predicted SEACrowd schema",
107
+ schema="seacrowd_kb",
108
+ subset_id="predicted",
109
+ ),
110
+ ]
111
+
112
+ def _info(self) -> datasets.DatasetInfo:
113
+ if self.config.schema == "source":
114
+ features = datasets.Features(
115
+ {
116
+ "id": datasets.Sequence(datasets.Value("int8")),
117
+ "form": datasets.Sequence(datasets.Value("string")),
118
+ "lemma": datasets.Sequence(datasets.Value("string")),
119
+ "upos": datasets.Sequence(datasets.Value("string")),
120
+ "xpos": datasets.Sequence(datasets.Value("string")),
121
+ "feats": datasets.Sequence(datasets.Value("string")),
122
+ "head": datasets.Sequence(datasets.Value("int8")),
123
+ "deprel": datasets.Sequence(datasets.Value("string")),
124
+ "deps": datasets.Sequence(datasets.Value("string")),
125
+ "misc": datasets.Sequence(datasets.Value("string")),
126
+ }
127
+ )
128
+ elif self.config.schema == "seacrowd_kb":
129
+ features = schemas.kb_features
130
+ else:
131
+ raise ValueError(f"Invalid schema: '{self.config.schema}'")
132
+
133
+ return datasets.DatasetInfo(
134
+ description=_DESCRIPTION,
135
+ features=features,
136
+ homepage=_HOMEPAGE,
137
+ license=_LICENSE,
138
+ citation=_CITATION,
139
+ )
140
+
141
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
142
+ """
143
+ Returns SplitGenerators.
144
+ """
145
+
146
+ paths = {key: dl_manager.download_and_extract(value) for key, value in _URLS.items()}
147
+
148
+ if self.config.subset_id == "gold":
149
+ filtered_paths = {key: value for key, value in paths.items() if "gold" in key}
150
+ elif self.config.subset_id == "predicted":
151
+ filtered_paths = {key: value for key, value in paths.items() if "predicted" in key}
152
+ else:
153
+ raise NotImplementedError(f"Invalid subset: '{self.config.subset_id}'.")
154
+
155
+ return [
156
+ datasets.SplitGenerator(
157
+ name=datasets.Split.VALIDATION,
158
+ gen_kwargs={
159
+ "filepaths": [value for key, value in filtered_paths.items() if "dev" in key],
160
+ "split": "validation",
161
+ },
162
+ ),
163
+ datasets.SplitGenerator(
164
+ name=datasets.Split.TEST,
165
+ gen_kwargs={
166
+ "filepaths": [value for key, value in filtered_paths.items() if "test" in key],
167
+ "split": "test",
168
+ },
169
+ ),
170
+ datasets.SplitGenerator(
171
+ name=datasets.Split.TRAIN,
172
+ gen_kwargs={
173
+ "filepaths": [value for key, value in filtered_paths.items() if "train" in key],
174
+ "split": "train",
175
+ },
176
+ ),
177
+ ]
178
+
179
+ def _generate_examples(self, filepaths: Path, split: str) -> Tuple[int, Dict]:
180
+ """
181
+ Yields examples as (key, example) tuples.
182
+ """
183
+
184
+ dataset = None
185
+ for file in filepaths:
186
+ if self.config.schema == "source":
187
+ dataset = list(load_ud_data(file))
188
+ elif self.config.schema == "seacrowd_kb":
189
+ dataset = list(load_ud_data_as_seacrowd_kb(file, dataset))
190
+ else:
191
+ raise ValueError(f"Invalid config: '{self.config.name}'")
192
+
193
+ for idx, example in enumerate(dataset):
194
+ if self.config.schema == "source":
195
+ example.pop('sent_id', None)
196
+ example.pop('text', None)
197
+ yield idx, example