|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
ConvQuestions is the first realistic benchmark for conversational question answering over |
|
knowledge graphs. It contains 11,200 conversations which can be evaluated over Wikidata. |
|
They are compiled from the inputs of 70 Master crowdworkers on Amazon Mechanical Turk, |
|
with conversations from five domains: Books, Movies, Soccer, Music, and TV Series. |
|
The questions feature a variety of complex question phenomena like comparisons, aggregations, |
|
compositionality, and temporal reasoning. Answers are grounded in Wikidata entities to enable |
|
fair comparison across diverse methods. The data gathering setup was kept as natural as |
|
possible, with the annotators selecting entities of their choice from each of the five domains, |
|
and formulating the entire conversation in one session. All questions in a conversation are |
|
from the same Turker, who also provided gold answers to the questions. For suitability to knowledge |
|
graphs, questions were constrained to be objective or factoid in nature, but no other restrictive |
|
guidelines were set. A notable property of ConvQuestions is that several questions are not |
|
answerable by Wikidata alone (as of September 2019), but the required facts can, for example, |
|
be found in the open Web or in Wikipedia. For details, please refer to our CIKM 2019 full paper |
|
(https://dl.acm.org/citation.cfm?id=3358016). |
|
""" |
|
|
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{christmann2019look, |
|
title={Look before you hop: Conversational question answering over knowledge graphs using judicious context expansion}, |
|
author={Christmann, Philipp and Saha Roy, Rishiraj and Abujabal, Abdalghani and Singh, Jyotsna and Weikum, Gerhard}, |
|
booktitle={Proceedings of the 28th ACM International Conference on Information and Knowledge Management}, |
|
pages={729--738}, |
|
year={2019} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
ConvQuestions is the first realistic benchmark for conversational question answering over knowledge graphs. |
|
It contains 11,200 conversations which can be evaluated over Wikidata. The questions feature a variety of complex |
|
question phenomena like comparisons, aggregations, compositionality, and temporal reasoning.""" |
|
|
|
_HOMEPAGE = "https://convex.mpi-inf.mpg.de" |
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
|
|
|
|
_URL = "http://qa.mpi-inf.mpg.de/convex/" |
|
_URLs = { |
|
"train": _URL + "ConvQuestions_train.zip", |
|
"dev": _URL + "ConvQuestions_dev.zip", |
|
"test": _URL + "ConvQuestions_test.zip", |
|
} |
|
|
|
|
|
class ConvQuestions(datasets.GeneratorBasedBuilder): |
|
"""ConvQuestions is a realistic benchmark for conversational question answering over knowledge graphs.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"domain": datasets.Value("string"), |
|
"seed_entity": datasets.Value("string"), |
|
"seed_entity_text": datasets.Value("string"), |
|
"questions": datasets.features.Sequence(datasets.Value("string")), |
|
"answers": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))), |
|
"answer_texts": datasets.features.Sequence(datasets.Value("string")), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
data_dir = dl_manager.download_and_extract(_URLs) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir["train"], "train_set/train_set_ALL.json"), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir["dev"], "dev_set/dev_set_ALL.json"), |
|
"split": "dev", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={"filepath": os.path.join(data_dir["test"], "test_set/test_set_ALL.json"), "split": "test"}, |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, filepath, split |
|
): |
|
"""Yields examples as (key, example) tuples.""" |
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
data = json.load(f) |
|
for id_, instance in enumerate(data): |
|
yield id_, { |
|
"domain": instance["domain"], |
|
"seed_entity": instance["seed_entity"], |
|
"seed_entity_text": instance["seed_entity_text"], |
|
"questions": [turn["question"] for turn in instance["questions"]], |
|
"answers": [turn["answer"].split(";") for turn in instance["questions"]], |
|
"answer_texts": [turn["answer_text"] for turn in instance["questions"]], |
|
} |
|
|