holylovenia
commited on
Commit
•
d84bba0
1
Parent(s):
a93dddd
Upload nergrit.py with huggingface_hub
Browse files- nergrit.py +11 -11
nergrit.py
CHANGED
@@ -19,10 +19,10 @@ from typing import List
|
|
19 |
|
20 |
import datasets
|
21 |
|
22 |
-
from
|
23 |
-
from
|
24 |
-
from
|
25 |
-
from
|
26 |
|
27 |
_CITATION = """\
|
28 |
@misc{Fahmi_NERGRIT_CORPUS_2019,
|
@@ -66,7 +66,7 @@ _LICENSE = "MIT"
|
|
66 |
_URL = "https://github.com/cahya-wirawan/indonesian-language-models/raw/master/data/nergrit-corpus_20190726_corrected.tgz"
|
67 |
_SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
|
68 |
_SOURCE_VERSION = "1.0.0"
|
69 |
-
|
70 |
|
71 |
|
72 |
class NergritDataset(datasets.GeneratorBasedBuilder):
|
@@ -118,7 +118,7 @@ class NergritDataset(datasets.GeneratorBasedBuilder):
|
|
118 |
"statement": ["B-BREL", "B-FREL", "B-STAT", "B-WHO", "I-BREL", "I-FREL", "I-STAT", "I-WHO", "O"],
|
119 |
}
|
120 |
BUILDER_CONFIGS = [
|
121 |
-
|
122 |
name=f"nergrit_{task}_source",
|
123 |
version=datasets.Version(_SOURCE_VERSION),
|
124 |
description="NERGrit source schema",
|
@@ -128,11 +128,11 @@ class NergritDataset(datasets.GeneratorBasedBuilder):
|
|
128 |
for task in label_classes
|
129 |
]
|
130 |
BUILDER_CONFIGS += [
|
131 |
-
|
132 |
-
name=f"nergrit_{task}
|
133 |
version=datasets.Version(_SOURCE_VERSION),
|
134 |
description="NERGrit Nusantara schema",
|
135 |
-
schema="
|
136 |
subset_id=f"nergrit_{task}",
|
137 |
)
|
138 |
for task in label_classes
|
@@ -145,7 +145,7 @@ class NergritDataset(datasets.GeneratorBasedBuilder):
|
|
145 |
task = self.config.subset_id.split("_")[-1]
|
146 |
if self.config.schema == "source":
|
147 |
features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "ner_tag": [datasets.Value("string")]})
|
148 |
-
elif self.config.schema == "
|
149 |
features = schemas.seq_label_features(self.label_classes[task])
|
150 |
|
151 |
return datasets.DatasetInfo(
|
@@ -181,7 +181,7 @@ class NergritDataset(datasets.GeneratorBasedBuilder):
|
|
181 |
for index, row in enumerate(conll_dataset):
|
182 |
ex = {"index": str(index), "tokens": row["sentence"], "ner_tag": row["label"]}
|
183 |
yield index, ex
|
184 |
-
elif self.config.schema == "
|
185 |
for index, row in enumerate(conll_dataset):
|
186 |
ex = {"id": str(index), "tokens": row["sentence"], "labels": row["label"]}
|
187 |
yield index, ex
|
|
|
19 |
|
20 |
import datasets
|
21 |
|
22 |
+
from seacrowd.utils import schemas
|
23 |
+
from seacrowd.utils.common_parser import load_conll_data
|
24 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
25 |
+
from seacrowd.utils.constants import Tasks
|
26 |
|
27 |
_CITATION = """\
|
28 |
@misc{Fahmi_NERGRIT_CORPUS_2019,
|
|
|
66 |
_URL = "https://github.com/cahya-wirawan/indonesian-language-models/raw/master/data/nergrit-corpus_20190726_corrected.tgz"
|
67 |
_SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
|
68 |
_SOURCE_VERSION = "1.0.0"
|
69 |
+
_SEACROWD_VERSION = "2024.06.20"
|
70 |
|
71 |
|
72 |
class NergritDataset(datasets.GeneratorBasedBuilder):
|
|
|
118 |
"statement": ["B-BREL", "B-FREL", "B-STAT", "B-WHO", "I-BREL", "I-FREL", "I-STAT", "I-WHO", "O"],
|
119 |
}
|
120 |
BUILDER_CONFIGS = [
|
121 |
+
SEACrowdConfig(
|
122 |
name=f"nergrit_{task}_source",
|
123 |
version=datasets.Version(_SOURCE_VERSION),
|
124 |
description="NERGrit source schema",
|
|
|
128 |
for task in label_classes
|
129 |
]
|
130 |
BUILDER_CONFIGS += [
|
131 |
+
SEACrowdConfig(
|
132 |
+
name=f"nergrit_{task}_seacrowd_seq_label",
|
133 |
version=datasets.Version(_SOURCE_VERSION),
|
134 |
description="NERGrit Nusantara schema",
|
135 |
+
schema="seacrowd_seq_label",
|
136 |
subset_id=f"nergrit_{task}",
|
137 |
)
|
138 |
for task in label_classes
|
|
|
145 |
task = self.config.subset_id.split("_")[-1]
|
146 |
if self.config.schema == "source":
|
147 |
features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "ner_tag": [datasets.Value("string")]})
|
148 |
+
elif self.config.schema == "seacrowd_seq_label":
|
149 |
features = schemas.seq_label_features(self.label_classes[task])
|
150 |
|
151 |
return datasets.DatasetInfo(
|
|
|
181 |
for index, row in enumerate(conll_dataset):
|
182 |
ex = {"index": str(index), "tokens": row["sentence"], "ner_tag": row["label"]}
|
183 |
yield index, ex
|
184 |
+
elif self.config.schema == "seacrowd_seq_label":
|
185 |
for index, row in enumerate(conll_dataset):
|
186 |
ex = {"id": str(index), "tokens": row["sentence"], "labels": row["label"]}
|
187 |
yield index, ex
|