holylovenia commited on
Commit
b659778
1 Parent(s): 5059909

Upload singgalang.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. singgalang.py +155 -0
singgalang.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+
21
+ from nusacrowd.utils import schemas
22
+ from nusacrowd.utils.common_parser import load_conll_data
23
+ from nusacrowd.utils.configs import NusantaraConfig
24
+ from nusacrowd.utils.constants import Tasks
25
+
26
+ _CITATION = """\
27
+ @INPROCEEDINGS{8355036,
28
+ author={Alfina, Ika and Savitri, Septiviana and Fanany, Mohamad Ivan},
29
+ title={Modified DBpedia entities expansion for tagging automatically NER dataset},
30
+ booktitle={2017 International Conference on Advanced Computer Science and Information Systems (ICACSIS)},
31
+ pages={216-221},
32
+ year={2017},
33
+ url={https://ieeexplore.ieee.org/document/8355036},
34
+ doi={10.1109/ICACSIS.2017.8355036}}
35
+
36
+ @INPROCEEDINGS{7872784,
37
+ author={Alfina, Ika and Manurung, Ruli and Fanany, Mohamad Ivan},
38
+ booktitle={2016 International Conference on Advanced Computer Science and Information Systems (ICACSIS)},
39
+ title={DBpedia entities expansion in automatically building dataset for Indonesian NER},
40
+ year={2016},
41
+ pages={335-340},
42
+ doi={10.1109/ICACSIS.2016.7872784}}
43
+ """
44
+
45
+ _LOCAL = False
46
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
47
+ _DATASETNAME = "singgalang"
48
+
49
+ _DESCRIPTION = """\
50
+ Rule-based annotation Indonesian NER Dataset of 48,957 sentences or 1,478,286 tokens.
51
+ Annotation conforms the Stanford-NER format (https://stanfordnlp.github.io/CoreNLP/ner.html) for 3 NER tags of Person, Organisation, and Place.
52
+ This dataset consists of 41,297, 14,770, and 82,179 tokens of entity (respectively) from over 14, 6, and 5 rules.
53
+ """
54
+
55
+ _HOMEPAGE = "https://github.com/ir-nlp-csui/singgalang"
56
+
57
+ _LICENSE = """\
58
+ You can use this dataset for free. You don't need our permission to use it. Please cite our paper if your work uses our data in your publication.
59
+ Please note that you are not allowed to create a copy of this dataset and share it publicly in your own repository without our permission.\
60
+ """
61
+
62
+ _URLS = {
63
+ _DATASETNAME: "https://raw.githubusercontent.com/ir-nlp-csui/singgalang/main/SINGGALANG.tsv",
64
+ }
65
+
66
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
67
+
68
+ _SOURCE_VERSION = "1.0.0"
69
+
70
+ _NUSANTARA_VERSION = "1.0.0"
71
+
72
+
73
+ class SinggalangDataset(datasets.GeneratorBasedBuilder):
74
+ """Rule-based annotation Indonesian NER Dataset of 48,957 sentences with 3 NER tags"""
75
+
76
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
77
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
78
+
79
+ label_classes = [
80
+ "O",
81
+ "Person",
82
+ "Organisation",
83
+ "Place",
84
+ ]
85
+
86
+ BUILDER_CONFIGS = [
87
+ NusantaraConfig(
88
+ name=f"{_DATASETNAME}_source",
89
+ version=SOURCE_VERSION,
90
+ description=f"{_DATASETNAME} source schema",
91
+ schema="source",
92
+ subset_id=f"{_DATASETNAME}",
93
+ ),
94
+ NusantaraConfig(
95
+ name=f"{_DATASETNAME}_nusantara_seq_label",
96
+ version=NUSANTARA_VERSION,
97
+ description=f"{_DATASETNAME} Nusantara schema",
98
+ schema="nusantara_seq_label",
99
+ subset_id=f"{_DATASETNAME}",
100
+ ),
101
+ ]
102
+
103
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
104
+
105
+ def _info(self) -> datasets.DatasetInfo:
106
+
107
+ if self.config.schema == "source":
108
+ features = datasets.Features(
109
+ {
110
+ "sentence": [datasets.Value("string")],
111
+ "label": [datasets.Value("string")],
112
+ }
113
+ )
114
+
115
+ elif self.config.schema == "nusantara_seq_label":
116
+ features = schemas.seq_label_features(self.label_classes)
117
+
118
+ return datasets.DatasetInfo(
119
+ description=_DESCRIPTION,
120
+ features=features,
121
+ homepage=_HOMEPAGE,
122
+ license=_LICENSE,
123
+ citation=_CITATION,
124
+ )
125
+
126
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
127
+ """Returns SplitGenerators."""
128
+ url = _URLS[_DATASETNAME]
129
+ data_path = dl_manager.download(url)
130
+
131
+ return [
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split.TRAIN,
134
+ gen_kwargs={
135
+ "filepath": data_path,
136
+ },
137
+ ),
138
+ ]
139
+
140
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
141
+ """Yields examples as (key, example) tuples."""
142
+
143
+ dataset = load_conll_data(filepath)
144
+
145
+ if self.config.schema == "source":
146
+ for key, ex in enumerate(dataset):
147
+ yield key, ex
148
+
149
+ elif self.config.schema == "nusantara_seq_label":
150
+ for key, ex in enumerate(dataset):
151
+ yield key, {
152
+ "id": str(key),
153
+ "tokens": ex["sentence"],
154
+ "labels": ex["label"],
155
+ }