asahi417 commited on
Commit
bb5992e
1 Parent(s): 49249de

Update wikineural.py

Browse files
Files changed (1) hide show
  1. wikineural.py +23 -34
wikineural.py CHANGED
@@ -4,43 +4,32 @@ from itertools import chain
4
  import datasets
5
 
6
  logger = datasets.logging.get_logger(__name__)
7
- _DESCRIPTION = """[WikiAnn](https://aclanthology.org/P17-1178/)"""
8
- _NAME = "wikiann"
9
- _VERSION = "1.1.0"
10
  _CITATION = """
11
- @inproceedings{pan-etal-2017-cross,
12
- title = "Cross-lingual Name Tagging and Linking for 282 Languages",
13
- author = "Pan, Xiaoman and
14
- Zhang, Boliang and
15
- May, Jonathan and
16
- Nothman, Joel and
17
- Knight, Kevin and
18
- Ji, Heng",
19
- booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
20
- month = jul,
21
- year = "2017",
22
- address = "Vancouver, Canada",
23
  publisher = "Association for Computational Linguistics",
24
- url = "https://aclanthology.org/P17-1178",
25
- doi = "10.18653/v1/P17-1178",
26
- pages = "1946--1958",
27
- abstract = "The ambitious goal of this work is to develop a cross-lingual name tagging and linking framework for 282 languages that exist in Wikipedia. Given a document in any of these languages, our framework is able to identify name mentions, assign a coarse-grained or fine-grained type to each mention, and link it to an English Knowledge Base (KB) if it is linkable. We achieve this goal by performing a series of new KB mining methods: generating {``}silver-standard{''} annotations by transferring annotations from English to other languages through cross-lingual links and KB properties, refining annotations through self-training and topic selection, deriving language-specific morphology features from anchor links, and mining word translation pairs from cross-lingual links. Both name tagging and linking results for 282 languages are promising on Wikipedia data and on-Wikipedia data.",
28
  }
29
  """
30
 
31
  _HOME_PAGE = "https://github.com/asahi417/tner"
32
  _URL = f'https://huggingface.co/datasets/tner/{_NAME}/resolve/main/dataset'
33
- _LANGUAGE = ["ace", "bg", "da", "fur", "ilo", "lij", "mzn", "qu", "su", "vi", "af", "bh", "de", "fy", "io", "lmo", "nap",
34
- "rm", "sv", "vls", "als", "bn", "diq", "ga", "is", "ln", "nds", "ro", "sw", "vo", "am", "bo", "dv", "gan", "it",
35
- "lt", "ne", "ru", "szl", "wa", "an", "br", "el", "gd", "ja", "lv", "nl", "rw", "ta", "war", "ang", "bs", "eml",
36
- "gl", "jbo", "map-bms", "nn", "sa", "te", "wuu", "ar", "ca", "en", "gn", "jv", "mg", "no", "sah", "tg", "xmf",
37
- "arc", "cbk-zam", "eo", "gu", "ka", "mhr", "nov", "scn", "th", "yi", "arz", "cdo", "es", "hak", "kk", "mi",
38
- "oc", "sco", "tk", "yo", "as", "ce", "et", "he", "km", "min", "or", "sd", "tl", "zea", "ast", "ceb", "eu", "hi",
39
- "kn", "mk", "os", "sh", "tr", "zh-classical", "ay", "ckb", "ext", "hr", "ko", "ml", "pa", "si", "tt",
40
- "zh-min-nan", "az", "co", "fa", "hsb", "ksh", "mn", "pdc", "simple", "ug", "zh-yue", "ba", "crh", "fi", "hu",
41
- "ku", "mr", "pl", "sk", "uk", "zh", "bar", "cs", "fiu-vro", "hy", "ky", "ms", "pms", "sl", "ur", "bat-smg",
42
- "csb", "fo", "ia", "la", "mt", "pnb", "so", "uz", "be-x-old", "cv", "fr", "id", "lb", "mwl", "ps", "sq", "vec",
43
- "be", "cy", "frr", "ig", "li", "my", "pt", "sr", "vep"]
44
  _URLS = {
45
  l: {
46
  str(datasets.Split.TEST): [f'{_URL}/{l}/test.jsonl'],
@@ -50,7 +39,7 @@ _URLS = {
50
  }
51
 
52
 
53
- class WikiAnnConfig(datasets.BuilderConfig):
54
  """BuilderConfig"""
55
 
56
  def __init__(self, **kwargs):
@@ -59,14 +48,14 @@ class WikiAnnConfig(datasets.BuilderConfig):
59
  Args:
60
  **kwargs: keyword arguments forwarded to super.
61
  """
62
- super(WikiAnnConfig, self).__init__(**kwargs)
63
 
64
 
65
- class WikiAnn(datasets.GeneratorBasedBuilder):
66
  """Dataset."""
67
 
68
  BUILDER_CONFIGS = [
69
- WikiAnnConfig(name=l, version=datasets.Version(_VERSION), description=f"{_DESCRIPTION} (language: {l})") for l in _LANGUAGE
70
  ]
71
 
72
  def _split_generators(self, dl_manager):
 
4
  import datasets
5
 
6
  logger = datasets.logging.get_logger(__name__)
7
+ _DESCRIPTION = """[wikineural](https://aclanthology.org/2021.findings-emnlp.215/)"""
8
+ _NAME = "wikineural"
9
+ _VERSION = "1.0.0"
10
  _CITATION = """
11
+ @inproceedings{tedeschi-etal-2021-wikineural-combined,
12
+ title = "{W}iki{NE}u{R}al: {C}ombined Neural and Knowledge-based Silver Data Creation for Multilingual {NER}",
13
+ author = "Tedeschi, Simone and
14
+ Maiorca, Valentino and
15
+ Campolungo, Niccol{\`o} and
16
+ Cecconi, Francesco and
17
+ Navigli, Roberto",
18
+ booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
19
+ month = nov,
20
+ year = "2021",
21
+ address = "Punta Cana, Dominican Republic",
 
22
  publisher = "Association for Computational Linguistics",
23
+ url = "https://aclanthology.org/2021.findings-emnlp.215",
24
+ doi = "10.18653/v1/2021.findings-emnlp.215",
25
+ pages = "2521--2533",
26
+ abstract = "Multilingual Named Entity Recognition (NER) is a key intermediate task which is needed in many areas of NLP. In this paper, we address the well-known issue of data scarcity in NER, especially relevant when moving to a multilingual scenario, and go beyond current approaches to the creation of multilingual silver data for the task. We exploit the texts of Wikipedia and introduce a new methodology based on the effective combination of knowledge-based approaches and neural models, together with a novel domain adaptation technique, to produce high-quality training corpora for NER. We evaluate our datasets extensively on standard benchmarks for NER, yielding substantial improvements up to 6 span-based F1-score points over previous state-of-the-art systems for data creation.",
27
  }
28
  """
29
 
30
  _HOME_PAGE = "https://github.com/asahi417/tner"
31
  _URL = f'https://huggingface.co/datasets/tner/{_NAME}/resolve/main/dataset'
32
+ _LANGUAGE = ['de', 'en', 'es', 'fr', 'it', 'nl', 'pl', 'pt', 'ru']
 
 
 
 
 
 
 
 
 
 
33
  _URLS = {
34
  l: {
35
  str(datasets.Split.TEST): [f'{_URL}/{l}/test.jsonl'],
 
39
  }
40
 
41
 
42
+ class WikiNeuralConfig(datasets.BuilderConfig):
43
  """BuilderConfig"""
44
 
45
  def __init__(self, **kwargs):
 
48
  Args:
49
  **kwargs: keyword arguments forwarded to super.
50
  """
51
+ super(WikiNeuralConfig, self).__init__(**kwargs)
52
 
53
 
54
+ class WikiNeural(datasets.GeneratorBasedBuilder):
55
  """Dataset."""
56
 
57
  BUILDER_CONFIGS = [
58
+ WikiNeuralConfig(name=l, version=datasets.Version(_VERSION), description=f"{_DESCRIPTION} (language: {l})") for l in _LANGUAGE
59
  ]
60
 
61
  def _split_generators(self, dl_manager):