holylovenia commited on
Commit
a54b736
1 Parent(s): 5346539

Upload nergrit.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. nergrit.py +189 -0
nergrit.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ NERGrit Dataset """
16
+
17
+ from pathlib import Path
18
+ from typing import List
19
+
20
+ import datasets
21
+
22
+ from nusacrowd.utils import schemas
23
+ from nusacrowd.utils.common_parser import load_conll_data
24
+ from nusacrowd.utils.configs import NusantaraConfig
25
+ from nusacrowd.utils.constants import Tasks
26
+
27
+ _CITATION = """\
28
+ @misc{Fahmi_NERGRIT_CORPUS_2019,
29
+ author = {Fahmi, Husni and Wibisono, Yudi and Kusumawati, Riyanti},
30
+ title = {{NERGRIT CORPUS}},
31
+ url = {https://github.com/grit-id/nergrit-corpus},
32
+ year = {2019}
33
+ }
34
+ """
35
+
36
+ _LOCAL = False
37
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
38
+ _DATASETNAME = "nergrit"
39
+ _DESCRIPTION = """\
40
+ Nergrit Corpus is a dataset collection of Indonesian Named Entity Recognition (NER), Statement Extraction,
41
+ and Sentiment Analysis developed by PT Gria Inovasi Teknologi (GRIT).
42
+ The Named Entity Recognition contains 18 entities as follow:
43
+ 'CRD': Cardinal
44
+ 'DAT': Date
45
+ 'EVT': Event
46
+ 'FAC': Facility
47
+ 'GPE': Geopolitical Entity
48
+ 'LAW': Law Entity (such as Undang-Undang)
49
+ 'LOC': Location
50
+ 'MON': Money
51
+ 'NOR': Political Organization
52
+ 'ORD': Ordinal
53
+ 'ORG': Organization
54
+ 'PER': Person
55
+ 'PRC': Percent
56
+ 'PRD': Product
57
+ 'QTY': Quantity
58
+ 'REG': Religion
59
+ 'TIM': Time
60
+ 'WOA': Work of Art
61
+ 'LAN': Language
62
+ """
63
+
64
+ _HOMEPAGE = "https://github.com/grit-id/nergrit-corpus"
65
+ _LICENSE = "MIT"
66
+ _URL = "https://github.com/cahya-wirawan/indonesian-language-models/raw/master/data/nergrit-corpus_20190726_corrected.tgz"
67
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
68
+ _SOURCE_VERSION = "1.0.0"
69
+ _NUSANTARA_VERSION = "1.0.0"
70
+
71
+
72
+ class NergritDataset(datasets.GeneratorBasedBuilder):
73
+ """Indonesian Named Entity Recognition from https://github.com/grit-id/nergrit-corpus."""
74
+
75
+ label_classes = {
76
+ "ner": [
77
+ "B-CRD",
78
+ "B-DAT",
79
+ "B-EVT",
80
+ "B-FAC",
81
+ "B-GPE",
82
+ "B-LAN",
83
+ "B-LAW",
84
+ "B-LOC",
85
+ "B-MON",
86
+ "B-NOR",
87
+ "B-ORD",
88
+ "B-ORG",
89
+ "B-PER",
90
+ "B-PRC",
91
+ "B-PRD",
92
+ "B-QTY",
93
+ "B-REG",
94
+ "B-TIM",
95
+ "B-WOA",
96
+ "I-CRD",
97
+ "I-DAT",
98
+ "I-EVT",
99
+ "I-FAC",
100
+ "I-GPE",
101
+ "I-LAN",
102
+ "I-LAW",
103
+ "I-LOC",
104
+ "I-MON",
105
+ "I-NOR",
106
+ "I-ORD",
107
+ "I-ORG",
108
+ "I-PER",
109
+ "I-PRC",
110
+ "I-PRD",
111
+ "I-QTY",
112
+ "I-REG",
113
+ "I-TIM",
114
+ "I-WOA",
115
+ "O",
116
+ ],
117
+ "sentiment": ["B-POS", "B-NEG", "B-NET", "I-POS", "I-NEG", "I-NET", "O"],
118
+ "statement": ["B-BREL", "B-FREL", "B-STAT", "B-WHO", "I-BREL", "I-FREL", "I-STAT", "I-WHO", "O"],
119
+ }
120
+ BUILDER_CONFIGS = [
121
+ NusantaraConfig(
122
+ name=f"nergrit_{task}_source",
123
+ version=datasets.Version(_SOURCE_VERSION),
124
+ description="NERGrit source schema",
125
+ schema="source",
126
+ subset_id=f"nergrit_{task}",
127
+ )
128
+ for task in label_classes
129
+ ]
130
+ BUILDER_CONFIGS += [
131
+ NusantaraConfig(
132
+ name=f"nergrit_{task}_nusantara_seq_label",
133
+ version=datasets.Version(_SOURCE_VERSION),
134
+ description="NERGrit Nusantara schema",
135
+ schema="nusantara_seq_label",
136
+ subset_id=f"nergrit_{task}",
137
+ )
138
+ for task in label_classes
139
+ ]
140
+
141
+ DEFAULT_CONFIG_NAME = "nergrit_ner_source"
142
+
143
+ def _info(self):
144
+ features = None
145
+ task = self.config.subset_id.split("_")[-1]
146
+ if self.config.schema == "source":
147
+ features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "ner_tag": [datasets.Value("string")]})
148
+ elif self.config.schema == "nusantara_seq_label":
149
+ features = schemas.seq_label_features(self.label_classes[task])
150
+
151
+ return datasets.DatasetInfo(
152
+ description=_DESCRIPTION,
153
+ features=features,
154
+ homepage=_HOMEPAGE,
155
+ license=_LICENSE,
156
+ citation=_CITATION,
157
+ )
158
+
159
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
160
+ task = self.config.subset_id.split("_")[-1]
161
+ archive = Path(dl_manager.download_and_extract(_URL))
162
+ return [
163
+ datasets.SplitGenerator(
164
+ name=datasets.Split.TRAIN,
165
+ gen_kwargs={"filepath": archive / f"nergrit-corpus/{task}/data/train_corrected.txt"},
166
+ ),
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.TEST,
169
+ gen_kwargs={"filepath": archive / f"nergrit-corpus/{task}/data/test_corrected.txt"},
170
+ ),
171
+ datasets.SplitGenerator(
172
+ name=datasets.Split.VALIDATION,
173
+ gen_kwargs={"filepath": archive / f"nergrit-corpus/{task}/data/valid_corrected.txt"},
174
+ ),
175
+ ]
176
+
177
+ def _generate_examples(self, filepath: Path):
178
+ conll_dataset = load_conll_data(filepath)
179
+
180
+ if self.config.schema == "source":
181
+ for index, row in enumerate(conll_dataset):
182
+ ex = {"index": str(index), "tokens": row["sentence"], "ner_tag": row["label"]}
183
+ yield index, ex
184
+ elif self.config.schema == "nusantara_seq_label":
185
+ for index, row in enumerate(conll_dataset):
186
+ ex = {"id": str(index), "tokens": row["sentence"], "labels": row["label"]}
187
+ yield index, ex
188
+ else:
189
+ raise ValueError(f"Invalid config: {self.config.name}")