Datasets:

ArXiv:
License:
holylovenia commited on
Commit
e92ecac
1 Parent(s): 807848c

Upload up2.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. up2.py +189 -0
up2.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Southeast Asian language subsets from Universal Propositions (UP) 2.0 dataset.
18
+ Semantic role labeling (SRL) is a shallow semantic parsing task that identifies “who did what to whom when, where etc” for each predicate in a sentence.
19
+ It provides an intermediate (shallow) level of a semantic representation that helps the map from syntactic parse structures to more fully-specified representations of meaning.
20
+ """
21
+ from pathlib import Path
22
+ from typing import Dict, List, Tuple
23
+
24
+ import datasets
25
+
26
+ from seacrowd.utils.common_parser import load_ud_data
27
+ from seacrowd.utils.configs import SEACrowdConfig
28
+ from seacrowd.utils.constants import Licenses
29
+
30
+ _CITATION = """\
31
+ @inproceedings{jindal-etal-2022-universal,
32
+ title = "Universal {P}roposition {B}ank 2.0",
33
+ author = "Jindal, Ishan and
34
+ Rademaker, Alexandre and
35
+ Ulewicz, Micha{l} and
36
+ Linh, Ha and
37
+ Nguyen, Huyen and
38
+ Tran, Khoi-Nguyen and
39
+ Zhu, Huaiyu and
40
+ Li, Yunyao",
41
+ booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference",
42
+ month = jun,
43
+ year = "2022",
44
+ address = "Marseille, France",
45
+ publisher = "European Language Resources Association",
46
+ url = "https://aclanthology.org/2022.lrec-1.181",
47
+ pages = "1700--1711",
48
+ }}
49
+ """
50
+
51
+ _DATASETNAME = "up2"
52
+
53
+ _DESCRIPTION = """\
54
+ Southeast Asian language subsets from Universal Propositions (UP) 2.0 dataset.
55
+ Semantic role labeling (SRL) is a shallow semantic parsing task that identifies “who did what to whom when, where etc” for each predicate in a sentence.
56
+ It provides an intermediate (shallow) level of a semantic representation that helps the map from syntactic parse structures to more fully-specified representations of meaning.
57
+ """
58
+
59
+ _HOMEPAGE = "https://universalpropositions.github.io/"
60
+
61
+ _LANGUAGES = ["ind", "vie"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
62
+
63
+ _LICENSE = Licenses.CDLA_SHARING_1_0.value
64
+
65
+ _LOCAL = False
66
+
67
+ _URLS = {
68
+ split: {
69
+ "ind": [
70
+ f"https://raw.githubusercontent.com/UniversalPropositions/UP_Indonesian-GSD/main/id_gsd-up-{split}.conllup",
71
+ f"https://raw.githubusercontent.com/UniversalDependencies/UD_Indonesian-GSD/master/id_gsd-ud-{split}.conllu",
72
+ # f"https://raw.githubusercontent.com/indolem/indolem/main/dependency_parsing/UD_Indonesian_GSD/id_gsd-ud-{split}.conllu", # there are missing sent_id from the IndoLEM's dataset.
73
+ ],
74
+ "vie": [
75
+ f"https://raw.githubusercontent.com/UniversalPropositions/UP_Vietnamese-VTB/main/vi_vtb-up-{split}.conllup",
76
+ # f"https://raw.githubusercontent.com/UniversalDependencies/UD_Vietnamese-VTB/master/vi_vtb-ud-{split}.conllu", # new data => mismatch.
77
+ f"https://raw.githubusercontent.com/UniversalDependencies/UD_Vietnamese-VTB/0edef6d63df949aea0494c6d4ff4f91bb1959019/vi_vtb-ud-{split}.conllu", # r2.8
78
+ ],
79
+ }
80
+ for split in ["train", "test", "dev"]
81
+ }
82
+
83
+ _SUPPORTED_TASKS = []
84
+
85
+ _SOURCE_VERSION = "1.0.0"
86
+
87
+ _SEACROWD_VERSION = "2024.06.20"
88
+
89
+
90
+ class UP2Dataset(datasets.GeneratorBasedBuilder):
91
+ """
92
+ Southeast Asian language subsets from Universal Propositions (UP) 2.0 dataset.
93
+ Semantic role labeling (SRL) is a shallow semantic parsing task that identifies “who did what to whom when, where etc” for each predicate in a sentence.
94
+ It provides an intermediate (shallow) level of a semantic representation that helps the map from syntactic parse structures to more fully-specified representations of meaning.
95
+ """
96
+
97
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
98
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
99
+
100
+ BUILDER_CONFIGS = [
101
+ *[
102
+ SEACrowdConfig(
103
+ name=f"{_DATASETNAME}{'_' if _LANG else ''}{_LANG}_source",
104
+ version=datasets.Version(_SOURCE_VERSION),
105
+ description=f"{_DATASETNAME} source schema",
106
+ schema="source",
107
+ subset_id=f"{_DATASETNAME}{'_' if _LANG else ''}{_LANG}",
108
+ )
109
+ for _LANG in ["", *_LANGUAGES]
110
+ ],
111
+ ]
112
+
113
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_{_LANGUAGES[0]}_source"
114
+
115
+ def _info(self) -> datasets.DatasetInfo:
116
+
117
+ if self.config.schema == "source":
118
+ features = datasets.Features(
119
+ {
120
+ "lang": datasets.Value("string"),
121
+ "source_sent_id": datasets.Value("string"),
122
+ "sent_id": datasets.Value("string"),
123
+ "text": datasets.Value("string"),
124
+ "id": [datasets.Value("string")],
125
+ "up:pred": [datasets.Value("string")],
126
+ "up:argheads": [datasets.Value("string")],
127
+ "up:argspans": [datasets.Value("string")],
128
+ }
129
+ )
130
+
131
+ return datasets.DatasetInfo(
132
+ description=_DESCRIPTION,
133
+ features=features,
134
+ homepage=_HOMEPAGE,
135
+ license=_LICENSE,
136
+ citation=_CITATION,
137
+ )
138
+
139
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
140
+ """Returns SplitGenerators."""
141
+ _subset_id = self.config.subset_id.split("_")
142
+ if len(_subset_id) > 1:
143
+ _lang = _subset_id[1]
144
+ urls = {split: {_lang: urls_up_ud[_lang]} for split, urls_up_ud in _URLS.items()}
145
+ else:
146
+ urls = _URLS
147
+ data_dir = dl_manager.download_and_extract(urls)
148
+
149
+ return [
150
+ datasets.SplitGenerator(
151
+ name=datasets.Split.TRAIN,
152
+ gen_kwargs={
153
+ "filepaths": data_dir["train"],
154
+ },
155
+ ),
156
+ datasets.SplitGenerator(
157
+ name=datasets.Split.TEST,
158
+ gen_kwargs={
159
+ "filepaths": data_dir["test"],
160
+ },
161
+ ),
162
+ datasets.SplitGenerator(
163
+ name=datasets.Split.VALIDATION,
164
+ gen_kwargs={
165
+ "filepaths": data_dir["dev"],
166
+ },
167
+ ),
168
+ ]
169
+
170
+ def _generate_examples(self, filepaths: Dict[str, List[Path]]) -> Tuple[int, Dict]:
171
+ """Yields examples as (key, example) tuples."""
172
+
173
+ _subset_id = self.config.subset_id.split("_")
174
+ _langs = [_subset_id[1]] if (len(_subset_id) > 1) else _LANGUAGES
175
+
176
+ for _lang in _langs:
177
+ data = list(load_ud_data(filepaths[_lang][0]))
178
+ sentid2text = {_b["sent_id"]: _b["text"] for _b in load_ud_data(filepaths[_lang][1])}
179
+
180
+ for cur_data in data:
181
+ txt_src = sentid2text[cur_data["sent_id"]]
182
+ txt_up = cur_data["text"].rsplit("..........", 1)[0].rstrip(" -")
183
+ assert txt_up == txt_src[: len(txt_up)], f"Text mismatch. Found '{txt_up}' in conllup but source is '{txt_src[:len(txt_up)]}'"
184
+ cur_data["text"] = txt_src
185
+ cur_data["lang"] = _lang
186
+
187
+ if self.config.schema == "source":
188
+ for key, example in enumerate(data):
189
+ yield f"{_lang}_{key}", example