wikidata_rubq / wikidata_rubq.py
rvashurin's picture
Use train dictionary for val data
027d08a
import datasets
import os
import json
import wikidata
import pickle
from wikidata.client import Client
client = Client()
_DESCRIPTION = """\
HuggingFace wrapper for https://github.com/vladislavneon/RuBQ dataset
"""
_HOMEPAGE = "https://zenodo.org/record/4345697#.Y01k81JBy3I"
_LICENSE = "Attribution-ShareAlike 4.0 International"
_LANGS = ["ru","en"]
_URLS = {
"test": "https://raw.githubusercontent.com/vladislavneon/RuBQ/master/RuBQ_2.0/RuBQ_2.0_test.json",
"dev": "https://raw.githubusercontent.com/vladislavneon/RuBQ/master/RuBQ_2.0/RuBQ_2.0_dev.json",
}
_DATA_DIRECTORY = "."
VERSION = datasets.Version("0.0.1")
class WikidataRuBQConfig(datasets.BuilderConfig):
"""BuilderConfig for WikidataRuBQ."""
def __init__(self, **kwargs):
"""BuilderConfig for WikidataRuBQ.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(WikidataRuBQConfig, self).__init__(**kwargs)
class WikidataRuBQ(datasets.GeneratorBasedBuilder):
"""HuggingFace wrapper https://github.com/vladislavneon/RuBQ/tree/master/RuBQ_2.0 dataset"""
BUILDER_CONFIG_CLASS = WikidataRuBQConfig
BUILDER_CONFIGS = []
BUILDER_CONFIGS += [
WikidataRuBQConfig(
name=f"multiple_{ln}",
version=VERSION,
description="questions with russian multiple labels as answers",
)
for ln in _LANGS
]
DEFAULT_CONFIG_NAME = "multiple_en"
def _info(self):
features = datasets.Features(
{
"object": datasets.Sequence(datasets.Value("string")),
"question": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
if self.config.name == "default":
version, lang = "multiple", "en"
else:
version, lang = self.config.name.split("_")
if lang not in _LANGS:
raise ValueError(f"Language {lang} not supported")
downloaded_files = dl_manager.download_and_extract(_URLS)
data_dir = os.path.join(self.base_path, '')
vocab_path = os.path.join(data_dir, "reverse_vocab_wikidata_en.json")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": downloaded_files["dev"],
"lang": lang,
"vocab_path": vocab_path,
"split": 'train',
"data_dir": data_dir
}),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": downloaded_files["dev"],
"lang": lang,
"vocab_path": vocab_path,
"split": 'validation',
"data_dir": data_dir
}),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": downloaded_files["test"],
"lang": lang,
"vocab_path": vocab_path,
"split": 'test',
"data_dir": data_dir
})
]
def get_name(self, idd):
'''
This function returns a name of an entity and its description given WikiData id
input: (str) wikidata id, e.x. 'Q2'
output: (str) concatenated 'name, description' of a given entity
'''
entity = client.get(idd, load=True)
name = None
try:
name = entity.data["labels"]["en"]["value"]
except:
pass
return name
def _generate_examples(self, filepath, lang, vocab_path, split, data_dir):
if split == 'test':
direct_path = os.path.join(data_dir, f"test_direct_vocab_wikidata_en.pkl")
else:
direct_path = os.path.join(data_dir, f"train_direct_vocab_wikidata_en.pkl")
with open(direct_path, 'rb') as handle:
direct_vocab = pickle.load(handle)
with open(filepath, encoding="utf-8") as f:
item = json.load(f)
uid_slide = 0
for i in item:
question = i['question_text'] if lang == 'ru' else i['question_eng']
objects = list(set(
[answer['value'].split('entity/')[1] for answer in i['answers'] if '/Q' in answer['value']]
))
if len(set(objects)) >= 1:
if split == 'train':
for obj in set(objects):
key = i['uid'] + uid_slide
resolved_obj = direct_vocab.get(obj, None)
if resolved_obj is not None:
resolved_obj = resolved_obj[0].upper() + resolved_obj[1:]
uid_slide += 1
yield (
key,
{
"object": [resolved_obj],
"question": question,
}
)
else:
key = i['uid'] + uid_slide
yield (
key,
{
"object": objects,
"question": question,
}
)