Datasets:
update
Browse files- .gitattributes +27 -0
- README.md +248 -0
- dataset/de/dev.jsonl +3 -0
- dataset/de/test.jsonl +3 -0
- dataset/de/train.jsonl +3 -0
- dataset/en/dev.jsonl +3 -0
- dataset/en/test.jsonl +3 -0
- dataset/en/train.jsonl +3 -0
- dataset/es/dev.jsonl +3 -0
- dataset/es/test.jsonl +3 -0
- dataset/es/train.jsonl +3 -0
- dataset/fr/dev.jsonl +3 -0
- dataset/fr/test.jsonl +3 -0
- dataset/fr/train.jsonl +3 -0
- dataset/it/dev.jsonl +3 -0
- dataset/it/test.jsonl +3 -0
- dataset/it/train.jsonl +3 -0
- dataset/label.json +1 -0
- dataset/nl/dev.jsonl +3 -0
- dataset/nl/test.jsonl +3 -0
- dataset/nl/train.jsonl +3 -0
- dataset/pl/dev.jsonl +3 -0
- dataset/pl/test.jsonl +3 -0
- dataset/pl/train.jsonl +3 -0
- dataset/pt/dev.jsonl +3 -0
- dataset/pt/test.jsonl +3 -0
- dataset/pt/train.jsonl +3 -0
- dataset/ru/dev.jsonl +3 -0
- dataset/ru/test.jsonl +3 -0
- dataset/ru/train.jsonl +3 -0
- wikineural.py +100 -0
.gitattributes
CHANGED
@@ -49,3 +49,30 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
49 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
50 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
51 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
50 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
51 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
52 |
+
dataset/nl/test.jsonl filter=lfs diff=lfs merge=lfs -text
|
53 |
+
dataset/pl/test.jsonl filter=lfs diff=lfs merge=lfs -text
|
54 |
+
dataset/pt/test.jsonl filter=lfs diff=lfs merge=lfs -text
|
55 |
+
dataset/de/train.jsonl filter=lfs diff=lfs merge=lfs -text
|
56 |
+
dataset/en/dev.jsonl filter=lfs diff=lfs merge=lfs -text
|
57 |
+
dataset/nl/train.jsonl filter=lfs diff=lfs merge=lfs -text
|
58 |
+
dataset/ru/dev.jsonl filter=lfs diff=lfs merge=lfs -text
|
59 |
+
dataset/ru/train.jsonl filter=lfs diff=lfs merge=lfs -text
|
60 |
+
dataset/de/dev.jsonl filter=lfs diff=lfs merge=lfs -text
|
61 |
+
dataset/pt/train.jsonl filter=lfs diff=lfs merge=lfs -text
|
62 |
+
dataset/es/train.jsonl filter=lfs diff=lfs merge=lfs -text
|
63 |
+
dataset/fr/test.jsonl filter=lfs diff=lfs merge=lfs -text
|
64 |
+
dataset/pl/train.jsonl filter=lfs diff=lfs merge=lfs -text
|
65 |
+
dataset/pt/dev.jsonl filter=lfs diff=lfs merge=lfs -text
|
66 |
+
dataset/ru/test.jsonl filter=lfs diff=lfs merge=lfs -text
|
67 |
+
dataset/en/test.jsonl filter=lfs diff=lfs merge=lfs -text
|
68 |
+
dataset/it/dev.jsonl filter=lfs diff=lfs merge=lfs -text
|
69 |
+
dataset/it/test.jsonl filter=lfs diff=lfs merge=lfs -text
|
70 |
+
dataset/it/train.jsonl filter=lfs diff=lfs merge=lfs -text
|
71 |
+
dataset/nl/dev.jsonl filter=lfs diff=lfs merge=lfs -text
|
72 |
+
dataset/pl/dev.jsonl filter=lfs diff=lfs merge=lfs -text
|
73 |
+
dataset/en/train.jsonl filter=lfs diff=lfs merge=lfs -text
|
74 |
+
dataset/es/test.jsonl filter=lfs diff=lfs merge=lfs -text
|
75 |
+
dataset/fr/dev.jsonl filter=lfs diff=lfs merge=lfs -text
|
76 |
+
dataset/de/test.jsonl filter=lfs diff=lfs merge=lfs -text
|
77 |
+
dataset/es/dev.jsonl filter=lfs diff=lfs merge=lfs -text
|
78 |
+
dataset/fr/train.jsonl filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- ace
|
4 |
+
- bg
|
5 |
+
- da
|
6 |
+
- fur
|
7 |
+
- ilo
|
8 |
+
- lij
|
9 |
+
- mzn
|
10 |
+
- qu
|
11 |
+
- su
|
12 |
+
- vi
|
13 |
+
- af
|
14 |
+
- bh
|
15 |
+
- de
|
16 |
+
- fy
|
17 |
+
- io
|
18 |
+
- lmo
|
19 |
+
- nap
|
20 |
+
- rm
|
21 |
+
- sv
|
22 |
+
- vls
|
23 |
+
- als
|
24 |
+
- bn
|
25 |
+
- diq
|
26 |
+
- ga
|
27 |
+
- is
|
28 |
+
- ln
|
29 |
+
- nds
|
30 |
+
- ro
|
31 |
+
- sw
|
32 |
+
- vo
|
33 |
+
- am
|
34 |
+
- bo
|
35 |
+
- dv
|
36 |
+
- gan
|
37 |
+
- it
|
38 |
+
- lt
|
39 |
+
- ne
|
40 |
+
- ru
|
41 |
+
- szl
|
42 |
+
- wa
|
43 |
+
- an
|
44 |
+
- br
|
45 |
+
- el
|
46 |
+
- gd
|
47 |
+
- ja
|
48 |
+
- lv
|
49 |
+
- nl
|
50 |
+
- rw
|
51 |
+
- ta
|
52 |
+
- war
|
53 |
+
- ang
|
54 |
+
- bs
|
55 |
+
- eml
|
56 |
+
- gl
|
57 |
+
- jbo
|
58 |
+
- nn
|
59 |
+
- sa
|
60 |
+
- te
|
61 |
+
- wuu
|
62 |
+
- ar
|
63 |
+
- ca
|
64 |
+
- en
|
65 |
+
- gn
|
66 |
+
- jv
|
67 |
+
- mg
|
68 |
+
- no
|
69 |
+
- sah
|
70 |
+
- tg
|
71 |
+
- xmf
|
72 |
+
- arc
|
73 |
+
- eo
|
74 |
+
- gu
|
75 |
+
- ka
|
76 |
+
- mhr
|
77 |
+
- nov
|
78 |
+
- scn
|
79 |
+
- th
|
80 |
+
- yi
|
81 |
+
- arz
|
82 |
+
- cdo
|
83 |
+
- es
|
84 |
+
- hak
|
85 |
+
- kk
|
86 |
+
- mi
|
87 |
+
- oc
|
88 |
+
- sco
|
89 |
+
- tk
|
90 |
+
- yo
|
91 |
+
- as
|
92 |
+
- ce
|
93 |
+
- et
|
94 |
+
- he
|
95 |
+
- km
|
96 |
+
- min
|
97 |
+
- or
|
98 |
+
- sd
|
99 |
+
- tl
|
100 |
+
- zea
|
101 |
+
- ast
|
102 |
+
- ceb
|
103 |
+
- eu
|
104 |
+
- hi
|
105 |
+
- kn
|
106 |
+
- mk
|
107 |
+
- os
|
108 |
+
- sh
|
109 |
+
- tr
|
110 |
+
- ay
|
111 |
+
- ckb
|
112 |
+
- ext
|
113 |
+
- hr
|
114 |
+
- ko
|
115 |
+
- ml
|
116 |
+
- pa
|
117 |
+
- si
|
118 |
+
- tt
|
119 |
+
- az
|
120 |
+
- co
|
121 |
+
- fa
|
122 |
+
- hsb
|
123 |
+
- ksh
|
124 |
+
- mn
|
125 |
+
- pdc
|
126 |
+
- ug
|
127 |
+
- ba
|
128 |
+
- crh
|
129 |
+
- fi
|
130 |
+
- hu
|
131 |
+
- ku
|
132 |
+
- mr
|
133 |
+
- pl
|
134 |
+
- sk
|
135 |
+
- uk
|
136 |
+
- zh
|
137 |
+
- bar
|
138 |
+
- cs
|
139 |
+
- hy
|
140 |
+
- ky
|
141 |
+
- ms
|
142 |
+
- pms
|
143 |
+
- sl
|
144 |
+
- ur
|
145 |
+
- csb
|
146 |
+
- fo
|
147 |
+
- ia
|
148 |
+
- la
|
149 |
+
- mt
|
150 |
+
- pnb
|
151 |
+
- so
|
152 |
+
- uz
|
153 |
+
- cv
|
154 |
+
- fr
|
155 |
+
- id
|
156 |
+
- lb
|
157 |
+
- mwl
|
158 |
+
- ps
|
159 |
+
- sq
|
160 |
+
- vec
|
161 |
+
- be
|
162 |
+
- cy
|
163 |
+
- frr
|
164 |
+
- ig
|
165 |
+
- li
|
166 |
+
- my
|
167 |
+
- pt
|
168 |
+
- sr
|
169 |
+
multilinguality:
|
170 |
+
- multilingual
|
171 |
+
size_categories:
|
172 |
+
- 10K<100k
|
173 |
+
task_categories:
|
174 |
+
- token-classification
|
175 |
+
task_ids:
|
176 |
+
- named-entity-recognition
|
177 |
+
pretty_name: WikiAnn
|
178 |
+
---
|
179 |
+
|
180 |
+
# Dataset Card for "tner/wikiann"
|
181 |
+
|
182 |
+
## Dataset Description
|
183 |
+
|
184 |
+
- **Repository:** [T-NER](https://github.com/asahi417/tner)
|
185 |
+
- **Paper:** [https://aclanthology.org/P17-1178/](https://aclanthology.org/P17-1178/)
|
186 |
+
- **Dataset:** WikiAnn
|
187 |
+
- **Domain:** Wikipedia
|
188 |
+
- **Number of Entity:** 3
|
189 |
+
|
190 |
+
|
191 |
+
### Dataset Summary
|
192 |
+
WikiAnn NER dataset formatted in a part of [TNER](https://github.com/asahi417/tner) project.
|
193 |
+
- Entity Types: `LOC`, `ORG`, `PER`
|
194 |
+
|
195 |
+
## Dataset Structure
|
196 |
+
|
197 |
+
### Data Instances
|
198 |
+
An example of `train` looks as follows.
|
199 |
+
|
200 |
+
```
|
201 |
+
{
|
202 |
+
'tokens': ['I', 'hate', 'the', 'words', 'chunder', ',', 'vomit', 'and', 'puke', '.', 'BUUH', '.'],
|
203 |
+
'tags': [6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6]
|
204 |
+
}
|
205 |
+
```
|
206 |
+
|
207 |
+
### Label ID
|
208 |
+
The label2id dictionary can be found at [here](https://huggingface.co/datasets/tner/btc/raw/main/dataset/label.json).
|
209 |
+
```python
|
210 |
+
{
|
211 |
+
"B-LOC": 0,
|
212 |
+
"B-ORG": 1,
|
213 |
+
"B-PER": 2,
|
214 |
+
"I-LOC": 3,
|
215 |
+
"I-ORG": 4,
|
216 |
+
"I-PER": 5,
|
217 |
+
"O": 6
|
218 |
+
}
|
219 |
+
```
|
220 |
+
|
221 |
+
### Data Splits
|
222 |
+
|
223 |
+
| name |train|validation|test|
|
224 |
+
|---------|----:|---------:|---:|
|
225 |
+
|btc | 6338| 1001|2000|
|
226 |
+
|
227 |
+
### Citation Information
|
228 |
+
|
229 |
+
```
|
230 |
+
@inproceedings{pan-etal-2017-cross,
|
231 |
+
title = "Cross-lingual Name Tagging and Linking for 282 Languages",
|
232 |
+
author = "Pan, Xiaoman and
|
233 |
+
Zhang, Boliang and
|
234 |
+
May, Jonathan and
|
235 |
+
Nothman, Joel and
|
236 |
+
Knight, Kevin and
|
237 |
+
Ji, Heng",
|
238 |
+
booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
|
239 |
+
month = jul,
|
240 |
+
year = "2017",
|
241 |
+
address = "Vancouver, Canada",
|
242 |
+
publisher = "Association for Computational Linguistics",
|
243 |
+
url = "https://aclanthology.org/P17-1178",
|
244 |
+
doi = "10.18653/v1/P17-1178",
|
245 |
+
pages = "1946--1958",
|
246 |
+
abstract = "The ambitious goal of this work is to develop a cross-lingual name tagging and linking framework for 282 languages that exist in Wikipedia. Given a document in any of these languages, our framework is able to identify name mentions, assign a coarse-grained or fine-grained type to each mention, and link it to an English Knowledge Base (KB) if it is linkable. We achieve this goal by performing a series of new KB mining methods: generating {``}silver-standard{''} annotations by transferring annotations from English to other languages through cross-lingual links and KB properties, refining annotations through self-training and topic selection, deriving language-specific morphology features from anchor links, and mining word translation pairs from cross-lingual links. Both name tagging and linking results for 282 languages are promising on Wikipedia data and on-Wikipedia data.",
|
247 |
+
}
|
248 |
+
```
|
dataset/de/dev.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2305fdf6ed0576f5e9e58fddc7168d93191e170bb9b033b8f5c14091a70633a2
|
3 |
+
size 2956643
|
dataset/de/test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1ba59411ad88a8d91cae04b80b9ffc72d68fd1c34f6498e19ca91269fd45f775
|
3 |
+
size 2955992
|
dataset/de/train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7550cdddfa9fca35342db069ed3294bac8682236aaea613a395ce4055762b855
|
3 |
+
size 24287841
|
dataset/en/dev.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b91e7fab275357a64833084dff97110e6e1dac70f61ba1424b76e5cde52a0bec
|
3 |
+
size 3331507
|
dataset/en/test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3b922d416e64becf45c1a98d47660970aa8a3e0daae4ac0c6f6e734ad6033bce
|
3 |
+
size 3315875
|
dataset/en/train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:09be83ba49a56b075f5dd86675bc8d3a3f5c65b3a532da9132af807c8ee3fc9e
|
3 |
+
size 27354401
|
dataset/es/dev.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8a579d647d28497dc472e3c430cd647ad706a925d8155be08eb51e9d2b655cb
|
3 |
+
size 2869872
|
dataset/es/test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9d3aafc8c6c9c88c144495e2c05bb8ccae6b98bb6a8bf0b1633592c53fc6092
|
3 |
+
size 2932404
|
dataset/es/train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:131461f3b136f5b9cd8cc3d3172e46b26ffa7b90e5bf199e1e81d4f2603fc64f
|
3 |
+
size 24288511
|
dataset/fr/dev.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8132bb1628bc17d2f823c4810b61e2afc5f35193b57c948855f9a5b96ffbf44a
|
3 |
+
size 4130359
|
dataset/fr/test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:449f4910fd5aa69e3450431f57b538fdf0c1d946814d62371a5ce41d5c3925d1
|
3 |
+
size 3981385
|
dataset/fr/train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c786599c5bd55b97041afb0e7fa4dcd0dbc55aec8a020a262bfd4c4a40417417
|
3 |
+
size 33908971
|
dataset/it/dev.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c08f73cfa3c6538dd73a9852b5d2a9af47a7bdababc557d0ded9dbf91e6c49b2
|
3 |
+
size 3877156
|
dataset/it/test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dd1599ba69efbef6a6b8653d0c7c7146afed833d1f71d35a39568cd786462618
|
3 |
+
size 3976121
|
dataset/it/train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1058a2733c64ec13b83a2c2782c047ea1a8b1c1cc00a7cb66307c7895ac68497
|
3 |
+
size 29879386
|
dataset/label.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"O": 0, "B-PER": 1, "I-PER": 2, "B-LOC": 3, "I-LOC": 4, "B-ORG": 5, "I-ORG": 6, "B-ANIM": 7, "I-ANIM": 8, "B-BIO": 9, "I-BIO": 10, "B-CEL": 11, "I-CEL": 12, "B-DIS": 13, "I-DIS": 14, "B-EVE": 15, "I-EVE": 16, "B-FOOD": 17, "I-FOOD": 18, "B-INST": 19, "I-INST": 20, "B-MEDIA": 21, "I-MEDIA": 22, "B-PLANT": 23, "I-PLANT": 24, "B-MYTH": 25, "I-MYTH": 26, "B-TIME": 27, "I-TIME": 28, "B-VEHI": 29, "I-VEHI": 30, "B-MISC": 31, "I-MISC": 32}
|
dataset/nl/dev.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6e3228346750dfc087b7e39d88e7c16c13721e7fd6b67b5c8b451226ac0b6b8c
|
3 |
+
size 2515461
|
dataset/nl/test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8e2183d2dcc576424d2a966e3c94c14ec54c8962ee37131f1e4049afb2868aa
|
3 |
+
size 2522449
|
dataset/nl/train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b00a6823853891d7e603854e2ecdd55cdb6a9a1022730522eba0726661c4b832
|
3 |
+
size 19593309
|
dataset/pl/dev.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:12beec59d787af8aa6a1c906ec0a19c94eedee12933f49594d6b98394dec1881
|
3 |
+
size 3221813
|
dataset/pl/test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:34b8b81c405fd4755bf3883706ec0d4e01027c6854a78e34e28e7588cebab9a5
|
3 |
+
size 3157834
|
dataset/pl/train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8f955cdd254a4de9b3de18425b51b0b08c6a384c397a7799ca3fdbb9a7e04b2
|
3 |
+
size 26917601
|
dataset/pt/dev.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de9857e6d6027dbf6cc8609bcd2f02119aa98aabde923b27e7299368345edf37
|
3 |
+
size 3399686
|
dataset/pt/test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27e6e38ad81e6a4e50b9db28db46d69567cee90cb644e7049b8fedb76d45f1f9
|
3 |
+
size 3417097
|
dataset/pt/train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1eb6f8450f9566e13c0ecb35f7d577b1568e02e98475c887ce50aeb872b982a1
|
3 |
+
size 25694361
|
dataset/ru/dev.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5c72e68ea9172b6e5bc53f82fced78b95ca7ee0b5d95c3060541ba9f34e7019d
|
3 |
+
size 7969439
|
dataset/ru/test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a7cc4b47f2811b83d73944e05dd7e0feef66dc676bab69b7561fc787a818cfe0
|
3 |
+
size 7898262
|
dataset/ru/train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:47c195a40fbe45f5fbac2c1a3379fba5bbcc103be47086803fa39c254d213eb4
|
3 |
+
size 67191608
|
wikineural.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" NER dataset compiled by T-NER library https://github.com/asahi417/tner/tree/master/tner """
|
2 |
+
import json
|
3 |
+
from itertools import chain
|
4 |
+
import datasets
|
5 |
+
|
6 |
+
logger = datasets.logging.get_logger(__name__)
|
7 |
+
_DESCRIPTION = """[WikiAnn](https://aclanthology.org/P17-1178/)"""
|
8 |
+
_NAME = "wikiann"
|
9 |
+
_VERSION = "1.1.0"
|
10 |
+
_CITATION = """
|
11 |
+
@inproceedings{pan-etal-2017-cross,
|
12 |
+
title = "Cross-lingual Name Tagging and Linking for 282 Languages",
|
13 |
+
author = "Pan, Xiaoman and
|
14 |
+
Zhang, Boliang and
|
15 |
+
May, Jonathan and
|
16 |
+
Nothman, Joel and
|
17 |
+
Knight, Kevin and
|
18 |
+
Ji, Heng",
|
19 |
+
booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
|
20 |
+
month = jul,
|
21 |
+
year = "2017",
|
22 |
+
address = "Vancouver, Canada",
|
23 |
+
publisher = "Association for Computational Linguistics",
|
24 |
+
url = "https://aclanthology.org/P17-1178",
|
25 |
+
doi = "10.18653/v1/P17-1178",
|
26 |
+
pages = "1946--1958",
|
27 |
+
abstract = "The ambitious goal of this work is to develop a cross-lingual name tagging and linking framework for 282 languages that exist in Wikipedia. Given a document in any of these languages, our framework is able to identify name mentions, assign a coarse-grained or fine-grained type to each mention, and link it to an English Knowledge Base (KB) if it is linkable. We achieve this goal by performing a series of new KB mining methods: generating {``}silver-standard{''} annotations by transferring annotations from English to other languages through cross-lingual links and KB properties, refining annotations through self-training and topic selection, deriving language-specific morphology features from anchor links, and mining word translation pairs from cross-lingual links. Both name tagging and linking results for 282 languages are promising on Wikipedia data and on-Wikipedia data.",
|
28 |
+
}
|
29 |
+
"""
|
30 |
+
|
31 |
+
_HOME_PAGE = "https://github.com/asahi417/tner"
|
32 |
+
_URL = f'https://huggingface.co/datasets/tner/{_NAME}/resolve/main/dataset'
|
33 |
+
_LANGUAGE = ["ace", "bg", "da", "fur", "ilo", "lij", "mzn", "qu", "su", "vi", "af", "bh", "de", "fy", "io", "lmo", "nap",
|
34 |
+
"rm", "sv", "vls", "als", "bn", "diq", "ga", "is", "ln", "nds", "ro", "sw", "vo", "am", "bo", "dv", "gan", "it",
|
35 |
+
"lt", "ne", "ru", "szl", "wa", "an", "br", "el", "gd", "ja", "lv", "nl", "rw", "ta", "war", "ang", "bs", "eml",
|
36 |
+
"gl", "jbo", "map-bms", "nn", "sa", "te", "wuu", "ar", "ca", "en", "gn", "jv", "mg", "no", "sah", "tg", "xmf",
|
37 |
+
"arc", "cbk-zam", "eo", "gu", "ka", "mhr", "nov", "scn", "th", "yi", "arz", "cdo", "es", "hak", "kk", "mi",
|
38 |
+
"oc", "sco", "tk", "yo", "as", "ce", "et", "he", "km", "min", "or", "sd", "tl", "zea", "ast", "ceb", "eu", "hi",
|
39 |
+
"kn", "mk", "os", "sh", "tr", "zh-classical", "ay", "ckb", "ext", "hr", "ko", "ml", "pa", "si", "tt",
|
40 |
+
"zh-min-nan", "az", "co", "fa", "hsb", "ksh", "mn", "pdc", "simple", "ug", "zh-yue", "ba", "crh", "fi", "hu",
|
41 |
+
"ku", "mr", "pl", "sk", "uk", "zh", "bar", "cs", "fiu-vro", "hy", "ky", "ms", "pms", "sl", "ur", "bat-smg",
|
42 |
+
"csb", "fo", "ia", "la", "mt", "pnb", "so", "uz", "be-x-old", "cv", "fr", "id", "lb", "mwl", "ps", "sq", "vec",
|
43 |
+
"be", "cy", "frr", "ig", "li", "my", "pt", "sr", "vep"]
|
44 |
+
_URLS = {
|
45 |
+
l: {
|
46 |
+
str(datasets.Split.TEST): [f'{_URL}/{l}/test.jsonl'],
|
47 |
+
str(datasets.Split.TRAIN): [f'{_URL}/{l}/train.jsonl'],
|
48 |
+
str(datasets.Split.VALIDATION): [f'{_URL}/{l}/dev.jsonl']
|
49 |
+
} for l in _LANGUAGE
|
50 |
+
}
|
51 |
+
|
52 |
+
|
53 |
+
class WikiAnnConfig(datasets.BuilderConfig):
|
54 |
+
"""BuilderConfig"""
|
55 |
+
|
56 |
+
def __init__(self, **kwargs):
|
57 |
+
"""BuilderConfig.
|
58 |
+
|
59 |
+
Args:
|
60 |
+
**kwargs: keyword arguments forwarded to super.
|
61 |
+
"""
|
62 |
+
super(WikiAnnConfig, self).__init__(**kwargs)
|
63 |
+
|
64 |
+
|
65 |
+
class WikiAnn(datasets.GeneratorBasedBuilder):
|
66 |
+
"""Dataset."""
|
67 |
+
|
68 |
+
BUILDER_CONFIGS = [
|
69 |
+
WikiAnnConfig(name=l, version=datasets.Version(_VERSION), description=f"{_DESCRIPTION} (language: {l})") for l in _LANGUAGE
|
70 |
+
]
|
71 |
+
|
72 |
+
def _split_generators(self, dl_manager):
|
73 |
+
downloaded_file = dl_manager.download_and_extract(_URLS[self.config.name])
|
74 |
+
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
|
75 |
+
for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
|
76 |
+
|
77 |
+
def _generate_examples(self, filepaths):
|
78 |
+
_key = 0
|
79 |
+
for filepath in filepaths:
|
80 |
+
logger.info(f"generating examples from = {filepath}")
|
81 |
+
with open(filepath, encoding="utf-8") as f:
|
82 |
+
_list = [i for i in f.read().split('\n') if len(i) > 0]
|
83 |
+
for i in _list:
|
84 |
+
data = json.loads(i)
|
85 |
+
yield _key, data
|
86 |
+
_key += 1
|
87 |
+
|
88 |
+
def _info(self):
|
89 |
+
return datasets.DatasetInfo(
|
90 |
+
description=_DESCRIPTION,
|
91 |
+
features=datasets.Features(
|
92 |
+
{
|
93 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
94 |
+
"tags": datasets.Sequence(datasets.Value("int32")),
|
95 |
+
}
|
96 |
+
),
|
97 |
+
supervised_keys=None,
|
98 |
+
homepage=_HOME_PAGE,
|
99 |
+
citation=_CITATION,
|
100 |
+
)
|