Add dataset files
Browse files- README.md +14 -0
- example.ipynb +175 -0
- reverse_vocab_wikidata_en.json +0 -0
- test_direct_vocab_wikidata_en.pkl +0 -0
- train_direct_vocab_wikidata_en.pkl +0 -0
- wikidata_rubq.py +173 -0
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# wikidata-rubq-hf
|
2 |
+
|
3 |
+
Huggingface Dataset wrapper for Wikidata-RuBQ 2.0 dataset
|
4 |
+
|
5 |
+
### Usage
|
6 |
+
|
7 |
+
```bash
|
8 |
+
git clone [email protected]:s-nlp/wikidata-rubq-hf.git wikidata_rubq
|
9 |
+
```
|
10 |
+
|
11 |
+
```python3
|
12 |
+
from datasets import load_dataset;
|
13 |
+
load_dataset('wikidata_rubq.py', 'multiple_en', cache_dir='.', ignore_verifications=True)
|
14 |
+
```
|
example.ipynb
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 61,
|
6 |
+
"id": "f3362bd9",
|
7 |
+
"metadata": {},
|
8 |
+
"outputs": [],
|
9 |
+
"source": [
|
10 |
+
"from transformers import PreTrainedTokenizer\n",
|
11 |
+
"from typing import Dict\n",
|
12 |
+
"import datasets"
|
13 |
+
]
|
14 |
+
},
|
15 |
+
{
|
16 |
+
"cell_type": "code",
|
17 |
+
"execution_count": 62,
|
18 |
+
"id": "8f447e4f",
|
19 |
+
"metadata": {},
|
20 |
+
"outputs": [
|
21 |
+
{
|
22 |
+
"name": "stdout",
|
23 |
+
"output_type": "stream",
|
24 |
+
"text": [
|
25 |
+
"Downloading and preparing dataset wikidata_rubq/multiple_en to /Users/m.shark/Documents/kq/cache/wikidata_rubq/multiple_en/0.0.1/876b4a13a1f967200cf24bbd09889db3ec1eaff98704d1f0cc7e278c5c1eac85...\n"
|
26 |
+
]
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"data": {
|
30 |
+
"application/vnd.jupyter.widget-view+json": {
|
31 |
+
"model_id": "c4460055d51c4041ac93247920423c49",
|
32 |
+
"version_major": 2,
|
33 |
+
"version_minor": 0
|
34 |
+
},
|
35 |
+
"text/plain": [
|
36 |
+
"Downloading data files: 0%| | 0/2 [00:00<?, ?it/s]"
|
37 |
+
]
|
38 |
+
},
|
39 |
+
"metadata": {},
|
40 |
+
"output_type": "display_data"
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"data": {
|
44 |
+
"application/vnd.jupyter.widget-view+json": {
|
45 |
+
"model_id": "f2c8e5a312434deeaed87df460b3ef69",
|
46 |
+
"version_major": 2,
|
47 |
+
"version_minor": 0
|
48 |
+
},
|
49 |
+
"text/plain": [
|
50 |
+
"Downloading data: 0%| | 0.00/619k [00:00<?, ?B/s]"
|
51 |
+
]
|
52 |
+
},
|
53 |
+
"metadata": {},
|
54 |
+
"output_type": "display_data"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"data": {
|
58 |
+
"application/vnd.jupyter.widget-view+json": {
|
59 |
+
"model_id": "6c30eb720a544bd6ad3606ef0a1b0ae9",
|
60 |
+
"version_major": 2,
|
61 |
+
"version_minor": 0
|
62 |
+
},
|
63 |
+
"text/plain": [
|
64 |
+
"Downloading data: 0%| | 0.00/158k [00:00<?, ?B/s]"
|
65 |
+
]
|
66 |
+
},
|
67 |
+
"metadata": {},
|
68 |
+
"output_type": "display_data"
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"data": {
|
72 |
+
"application/vnd.jupyter.widget-view+json": {
|
73 |
+
"model_id": "64d46fddb2194c90b2474d791ffcd11a",
|
74 |
+
"version_major": 2,
|
75 |
+
"version_minor": 0
|
76 |
+
},
|
77 |
+
"text/plain": [
|
78 |
+
"Extracting data files: 0%| | 0/2 [00:00<?, ?it/s]"
|
79 |
+
]
|
80 |
+
},
|
81 |
+
"metadata": {},
|
82 |
+
"output_type": "display_data"
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"data": {
|
86 |
+
"application/vnd.jupyter.widget-view+json": {
|
87 |
+
"model_id": "",
|
88 |
+
"version_major": 2,
|
89 |
+
"version_minor": 0
|
90 |
+
},
|
91 |
+
"text/plain": [
|
92 |
+
"Generating validation split: 0 examples [00:00, ? examples/s]"
|
93 |
+
]
|
94 |
+
},
|
95 |
+
"metadata": {},
|
96 |
+
"output_type": "display_data"
|
97 |
+
},
|
98 |
+
{
|
99 |
+
"data": {
|
100 |
+
"application/vnd.jupyter.widget-view+json": {
|
101 |
+
"model_id": "",
|
102 |
+
"version_major": 2,
|
103 |
+
"version_minor": 0
|
104 |
+
},
|
105 |
+
"text/plain": [
|
106 |
+
"Generating test split: 0 examples [00:00, ? examples/s]"
|
107 |
+
]
|
108 |
+
},
|
109 |
+
"metadata": {},
|
110 |
+
"output_type": "display_data"
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"name": "stdout",
|
114 |
+
"output_type": "stream",
|
115 |
+
"text": [
|
116 |
+
"Dataset wikidata_rubq downloaded and prepared to /Users/m.shark/Documents/kq/cache/wikidata_rubq/multiple_en/0.0.1/876b4a13a1f967200cf24bbd09889db3ec1eaff98704d1f0cc7e278c5c1eac85. Subsequent calls will reuse this data.\n"
|
117 |
+
]
|
118 |
+
}
|
119 |
+
],
|
120 |
+
"source": [
|
121 |
+
"from datasets import load_dataset\n",
|
122 |
+
"dataset = load_dataset(\n",
|
123 |
+
" 'wikidata_rubq.py', 'multiple_en', \n",
|
124 |
+
" cache_dir='cache', \n",
|
125 |
+
" ignore_verifications=True, split = 'test')"
|
126 |
+
]
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"cell_type": "code",
|
130 |
+
"execution_count": 63,
|
131 |
+
"id": "849bd722",
|
132 |
+
"metadata": {},
|
133 |
+
"outputs": [
|
134 |
+
{
|
135 |
+
"data": {
|
136 |
+
"application/vnd.jupyter.widget-view+json": {
|
137 |
+
"model_id": "6b30ff22b4794a4da4ae8e3f4949a3a5",
|
138 |
+
"version_major": 2,
|
139 |
+
"version_minor": 0
|
140 |
+
},
|
141 |
+
"text/plain": [
|
142 |
+
" 0%| | 0/2 [00:00<?, ?ba/s]"
|
143 |
+
]
|
144 |
+
},
|
145 |
+
"metadata": {},
|
146 |
+
"output_type": "display_data"
|
147 |
+
}
|
148 |
+
],
|
149 |
+
"source": [
|
150 |
+
"dataset = dataset.filter(lambda example: isinstance(example[\"object\"], str))"
|
151 |
+
]
|
152 |
+
}
|
153 |
+
],
|
154 |
+
"metadata": {
|
155 |
+
"kernelspec": {
|
156 |
+
"display_name": "Python 3 (ipykernel)",
|
157 |
+
"language": "python",
|
158 |
+
"name": "python3"
|
159 |
+
},
|
160 |
+
"language_info": {
|
161 |
+
"codemirror_mode": {
|
162 |
+
"name": "ipython",
|
163 |
+
"version": 3
|
164 |
+
},
|
165 |
+
"file_extension": ".py",
|
166 |
+
"mimetype": "text/x-python",
|
167 |
+
"name": "python",
|
168 |
+
"nbconvert_exporter": "python",
|
169 |
+
"pygments_lexer": "ipython3",
|
170 |
+
"version": "3.10.4"
|
171 |
+
}
|
172 |
+
},
|
173 |
+
"nbformat": 4,
|
174 |
+
"nbformat_minor": 5
|
175 |
+
}
|
reverse_vocab_wikidata_en.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
test_direct_vocab_wikidata_en.pkl
ADDED
Binary file (59.4 kB). View file
|
|
train_direct_vocab_wikidata_en.pkl
ADDED
Binary file (14.5 kB). View file
|
|
wikidata_rubq.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
import os
|
3 |
+
import json
|
4 |
+
import wikidata
|
5 |
+
import pickle
|
6 |
+
from wikidata.client import Client
|
7 |
+
|
8 |
+
client = Client()
|
9 |
+
|
10 |
+
_DESCRIPTION = """\
|
11 |
+
HuggingFace wrapper for https://github.com/vladislavneon/RuBQ dataset
|
12 |
+
"""
|
13 |
+
|
14 |
+
_HOMEPAGE = "https://zenodo.org/record/4345697#.Y01k81JBy3I"
|
15 |
+
|
16 |
+
|
17 |
+
_LICENSE = "Attribution-ShareAlike 4.0 International"
|
18 |
+
|
19 |
+
_LANGS = ["ru","en"]
|
20 |
+
|
21 |
+
|
22 |
+
_URLS = {
|
23 |
+
"test": "https://raw.githubusercontent.com/vladislavneon/RuBQ/master/RuBQ_2.0/RuBQ_2.0_test.json",
|
24 |
+
"dev": "https://raw.githubusercontent.com/vladislavneon/RuBQ/master/RuBQ_2.0/RuBQ_2.0_dev.json",
|
25 |
+
}
|
26 |
+
|
27 |
+
|
28 |
+
_DATA_DIRECTORY = "."
|
29 |
+
VERSION = datasets.Version("0.0.1")
|
30 |
+
|
31 |
+
|
32 |
+
class WikidataRuBQConfig(datasets.BuilderConfig):
|
33 |
+
"""BuilderConfig for WikidataRuBQ."""
|
34 |
+
|
35 |
+
def __init__(self, **kwargs):
|
36 |
+
"""BuilderConfig for WikidataRuBQ.
|
37 |
+
Args:
|
38 |
+
**kwargs: keyword arguments forwarded to super.
|
39 |
+
"""
|
40 |
+
super(WikidataRuBQConfig, self).__init__(**kwargs)
|
41 |
+
|
42 |
+
|
43 |
+
class WikidataRuBQ(datasets.GeneratorBasedBuilder):
|
44 |
+
"""HuggingFace wrapper https://github.com/vladislavneon/RuBQ/tree/master/RuBQ_2.0 dataset"""
|
45 |
+
|
46 |
+
BUILDER_CONFIG_CLASS = WikidataRuBQConfig
|
47 |
+
BUILDER_CONFIGS = []
|
48 |
+
BUILDER_CONFIGS += [
|
49 |
+
WikidataRuBQConfig(
|
50 |
+
name=f"multiple_{ln}",
|
51 |
+
version=VERSION,
|
52 |
+
description="questions with russian multiple labels as answers",
|
53 |
+
)
|
54 |
+
for ln in _LANGS
|
55 |
+
]
|
56 |
+
|
57 |
+
DEFAULT_CONFIG_NAME = "multiple_en"
|
58 |
+
|
59 |
+
def _info(self):
|
60 |
+
features = datasets.Features(
|
61 |
+
{
|
62 |
+
"object": datasets.Sequence(datasets.Value("string")),
|
63 |
+
"question": datasets.Value("string")
|
64 |
+
}
|
65 |
+
)
|
66 |
+
|
67 |
+
return datasets.DatasetInfo(
|
68 |
+
description=_DESCRIPTION,
|
69 |
+
features=features,
|
70 |
+
homepage=_HOMEPAGE,
|
71 |
+
license=_LICENSE,
|
72 |
+
)
|
73 |
+
|
74 |
+
def _split_generators(self, dl_manager):
|
75 |
+
if self.config.name == "default":
|
76 |
+
version, lang = "multiple", "en"
|
77 |
+
else:
|
78 |
+
version, lang = self.config.name.split("_")
|
79 |
+
|
80 |
+
if lang not in _LANGS:
|
81 |
+
raise ValueError(f"Language {lang} not supported")
|
82 |
+
|
83 |
+
downloaded_files = dl_manager.download_and_extract(_URLS)
|
84 |
+
|
85 |
+
data_dir = os.path.join(self.base_path, '')
|
86 |
+
vocab_path = os.path.join(data_dir, "reverse_vocab_wikidata_en.json")
|
87 |
+
|
88 |
+
return [
|
89 |
+
datasets.SplitGenerator(
|
90 |
+
name=datasets.Split.TRAIN,
|
91 |
+
gen_kwargs={
|
92 |
+
"filepath": downloaded_files["dev"],
|
93 |
+
"lang": lang,
|
94 |
+
"vocab_path": vocab_path,
|
95 |
+
"split": 'train',
|
96 |
+
"data_dir": data_dir
|
97 |
+
}),
|
98 |
+
datasets.SplitGenerator(
|
99 |
+
name=datasets.Split.VALIDATION,
|
100 |
+
gen_kwargs={
|
101 |
+
"filepath": downloaded_files["dev"],
|
102 |
+
"lang": lang,
|
103 |
+
"vocab_path": vocab_path,
|
104 |
+
"split": 'validation',
|
105 |
+
"data_dir": data_dir
|
106 |
+
}),
|
107 |
+
datasets.SplitGenerator(
|
108 |
+
name=datasets.Split.TEST,
|
109 |
+
gen_kwargs={
|
110 |
+
"filepath": downloaded_files["test"],
|
111 |
+
"lang": lang,
|
112 |
+
"vocab_path": vocab_path,
|
113 |
+
"split": 'test',
|
114 |
+
"data_dir": data_dir
|
115 |
+
})
|
116 |
+
]
|
117 |
+
|
118 |
+
def get_name(self, idd):
|
119 |
+
'''
|
120 |
+
This function returns a name of an entity and its description given WikiData id
|
121 |
+
input: (str) wikidata id, e.x. 'Q2'
|
122 |
+
output: (str) concatenated 'name, description' of a given entity
|
123 |
+
'''
|
124 |
+
entity = client.get(idd, load=True)
|
125 |
+
name = None
|
126 |
+
try:
|
127 |
+
name = entity.data["labels"]["en"]["value"]
|
128 |
+
except:
|
129 |
+
pass
|
130 |
+
return name
|
131 |
+
|
132 |
+
def _generate_examples(self, filepath, lang, vocab_path, split, data_dir):
|
133 |
+
direct_path = os.path.join(data_dir, f"{split}_direct_vocab_wikidata_en.pkl")
|
134 |
+
|
135 |
+
with open(direct_path, 'rb') as handle:
|
136 |
+
direct_vocab = pickle.load(handle)
|
137 |
+
|
138 |
+
with open(filepath, encoding="utf-8") as f:
|
139 |
+
item = json.load(f)
|
140 |
+
uid_slide = 0
|
141 |
+
for i in item:
|
142 |
+
question = i['question_text'] if lang == 'ru' else i['question_eng']
|
143 |
+
|
144 |
+
objects = list(set(
|
145 |
+
[answer['value'].split('entity/')[1] for answer in i['answers'] if '/Q' in answer['value']]
|
146 |
+
))
|
147 |
+
|
148 |
+
if len(set(objects)) >= 1:
|
149 |
+
if split == 'train':
|
150 |
+
for obj in set(objects):
|
151 |
+
key = i['uid'] + uid_slide
|
152 |
+
resolved_obj = direct_vocab.get(obj, None)
|
153 |
+
if resolved_obj is not None:
|
154 |
+
resolved_obj = resolved_obj[0].upper() + resolved_obj[1:]
|
155 |
+
|
156 |
+
uid_slide += 1
|
157 |
+
|
158 |
+
yield (
|
159 |
+
key,
|
160 |
+
{
|
161 |
+
"object": [resolved_obj],
|
162 |
+
"question": question,
|
163 |
+
}
|
164 |
+
)
|
165 |
+
else:
|
166 |
+
key = i['uid'] + uid_slide
|
167 |
+
yield (
|
168 |
+
key,
|
169 |
+
{
|
170 |
+
"object": objects,
|
171 |
+
"question": question,
|
172 |
+
}
|
173 |
+
)
|