Datasets:
Update wikineural.py
Browse files- wikineural.py +23 -34
wikineural.py
CHANGED
@@ -4,43 +4,32 @@ from itertools import chain
|
|
4 |
import datasets
|
5 |
|
6 |
logger = datasets.logging.get_logger(__name__)
|
7 |
-
_DESCRIPTION = """[
|
8 |
-
_NAME = "
|
9 |
-
_VERSION = "1.
|
10 |
_CITATION = """
|
11 |
-
@inproceedings{
|
12 |
-
title = "
|
13 |
-
author = "
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
address = "Vancouver, Canada",
|
23 |
publisher = "Association for Computational Linguistics",
|
24 |
-
url = "https://aclanthology.org/
|
25 |
-
doi = "10.18653/v1/
|
26 |
-
pages = "
|
27 |
-
abstract = "
|
28 |
}
|
29 |
"""
|
30 |
|
31 |
_HOME_PAGE = "https://github.com/asahi417/tner"
|
32 |
_URL = f'https://huggingface.co/datasets/tner/{_NAME}/resolve/main/dataset'
|
33 |
-
_LANGUAGE = [
|
34 |
-
"rm", "sv", "vls", "als", "bn", "diq", "ga", "is", "ln", "nds", "ro", "sw", "vo", "am", "bo", "dv", "gan", "it",
|
35 |
-
"lt", "ne", "ru", "szl", "wa", "an", "br", "el", "gd", "ja", "lv", "nl", "rw", "ta", "war", "ang", "bs", "eml",
|
36 |
-
"gl", "jbo", "map-bms", "nn", "sa", "te", "wuu", "ar", "ca", "en", "gn", "jv", "mg", "no", "sah", "tg", "xmf",
|
37 |
-
"arc", "cbk-zam", "eo", "gu", "ka", "mhr", "nov", "scn", "th", "yi", "arz", "cdo", "es", "hak", "kk", "mi",
|
38 |
-
"oc", "sco", "tk", "yo", "as", "ce", "et", "he", "km", "min", "or", "sd", "tl", "zea", "ast", "ceb", "eu", "hi",
|
39 |
-
"kn", "mk", "os", "sh", "tr", "zh-classical", "ay", "ckb", "ext", "hr", "ko", "ml", "pa", "si", "tt",
|
40 |
-
"zh-min-nan", "az", "co", "fa", "hsb", "ksh", "mn", "pdc", "simple", "ug", "zh-yue", "ba", "crh", "fi", "hu",
|
41 |
-
"ku", "mr", "pl", "sk", "uk", "zh", "bar", "cs", "fiu-vro", "hy", "ky", "ms", "pms", "sl", "ur", "bat-smg",
|
42 |
-
"csb", "fo", "ia", "la", "mt", "pnb", "so", "uz", "be-x-old", "cv", "fr", "id", "lb", "mwl", "ps", "sq", "vec",
|
43 |
-
"be", "cy", "frr", "ig", "li", "my", "pt", "sr", "vep"]
|
44 |
_URLS = {
|
45 |
l: {
|
46 |
str(datasets.Split.TEST): [f'{_URL}/{l}/test.jsonl'],
|
@@ -50,7 +39,7 @@ _URLS = {
|
|
50 |
}
|
51 |
|
52 |
|
53 |
-
class
|
54 |
"""BuilderConfig"""
|
55 |
|
56 |
def __init__(self, **kwargs):
|
@@ -59,14 +48,14 @@ class WikiAnnConfig(datasets.BuilderConfig):
|
|
59 |
Args:
|
60 |
**kwargs: keyword arguments forwarded to super.
|
61 |
"""
|
62 |
-
super(
|
63 |
|
64 |
|
65 |
-
class
|
66 |
"""Dataset."""
|
67 |
|
68 |
BUILDER_CONFIGS = [
|
69 |
-
|
70 |
]
|
71 |
|
72 |
def _split_generators(self, dl_manager):
|
|
|
4 |
import datasets
|
5 |
|
6 |
logger = datasets.logging.get_logger(__name__)
|
7 |
+
_DESCRIPTION = """[wikineural](https://aclanthology.org/2021.findings-emnlp.215/)"""
|
8 |
+
_NAME = "wikineural"
|
9 |
+
_VERSION = "1.0.0"
|
10 |
_CITATION = """
|
11 |
+
@inproceedings{tedeschi-etal-2021-wikineural-combined,
|
12 |
+
title = "{W}iki{NE}u{R}al: {C}ombined Neural and Knowledge-based Silver Data Creation for Multilingual {NER}",
|
13 |
+
author = "Tedeschi, Simone and
|
14 |
+
Maiorca, Valentino and
|
15 |
+
Campolungo, Niccol{\`o} and
|
16 |
+
Cecconi, Francesco and
|
17 |
+
Navigli, Roberto",
|
18 |
+
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
|
19 |
+
month = nov,
|
20 |
+
year = "2021",
|
21 |
+
address = "Punta Cana, Dominican Republic",
|
|
|
22 |
publisher = "Association for Computational Linguistics",
|
23 |
+
url = "https://aclanthology.org/2021.findings-emnlp.215",
|
24 |
+
doi = "10.18653/v1/2021.findings-emnlp.215",
|
25 |
+
pages = "2521--2533",
|
26 |
+
abstract = "Multilingual Named Entity Recognition (NER) is a key intermediate task which is needed in many areas of NLP. In this paper, we address the well-known issue of data scarcity in NER, especially relevant when moving to a multilingual scenario, and go beyond current approaches to the creation of multilingual silver data for the task. We exploit the texts of Wikipedia and introduce a new methodology based on the effective combination of knowledge-based approaches and neural models, together with a novel domain adaptation technique, to produce high-quality training corpora for NER. We evaluate our datasets extensively on standard benchmarks for NER, yielding substantial improvements up to 6 span-based F1-score points over previous state-of-the-art systems for data creation.",
|
27 |
}
|
28 |
"""
|
29 |
|
30 |
_HOME_PAGE = "https://github.com/asahi417/tner"
|
31 |
_URL = f'https://huggingface.co/datasets/tner/{_NAME}/resolve/main/dataset'
|
32 |
+
_LANGUAGE = ['de', 'en', 'es', 'fr', 'it', 'nl', 'pl', 'pt', 'ru']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
_URLS = {
|
34 |
l: {
|
35 |
str(datasets.Split.TEST): [f'{_URL}/{l}/test.jsonl'],
|
|
|
39 |
}
|
40 |
|
41 |
|
42 |
+
class WikiNeuralConfig(datasets.BuilderConfig):
|
43 |
"""BuilderConfig"""
|
44 |
|
45 |
def __init__(self, **kwargs):
|
|
|
48 |
Args:
|
49 |
**kwargs: keyword arguments forwarded to super.
|
50 |
"""
|
51 |
+
super(WikiNeuralConfig, self).__init__(**kwargs)
|
52 |
|
53 |
|
54 |
+
class WikiNeural(datasets.GeneratorBasedBuilder):
|
55 |
"""Dataset."""
|
56 |
|
57 |
BUILDER_CONFIGS = [
|
58 |
+
WikiNeuralConfig(name=l, version=datasets.Version(_VERSION), description=f"{_DESCRIPTION} (language: {l})") for l in _LANGUAGE
|
59 |
]
|
60 |
|
61 |
def _split_generators(self, dl_manager):
|