Datasets:

Languages:
English
ArXiv:
License:
morehopqa / morehopqa.py
julianschnitzler
fix script
5c3480a
raw
history blame
4.07 kB
import datasets
from datasets import load_dataset
import json
import os
class MoreHopQAConfig(datasets.BuilderConfig):
"""BuilderConfig for MoreHopQA."""
def __init__(self, data_path, **kwargs):
"""BuilderConfig for MoreHopQA.
Args:
data_path: string, path to the data files containing the dataset.
**kwargs: keyword arguments forwarded to super.
"""
super(MoreHopQAConfig, self).__init__(**kwargs)
self.data_path = data_path
class MoreHopQA(datasets.GeneratorBasedBuilder):
"""MoreHopQA: A dataset for multi-hop question answering."""
BUILDER_CONFIG_CLASS = MoreHopQAConfig
BUILDER_CONFIGS = [
MoreHopQAConfig(
name="verified",
version=datasets.Version("1.0.0", ""),
description="MoreHopQA: A dataset for multi-hop question answering.",
data_path="data/with_human_verification.json",
),
MoreHopQAConfig(
name="unverified",
version=datasets.Version("1.0.0", ""),
description="MoreHopQA: A dataset for multi-hop question answering.",
data_path="data/without_human_verification.json",
),
]
DEFAULT_CONFIG_NAME = "verified"
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features({
"question": datasets.Value("string"),
"context": datasets.Sequence({
"title": datasets.Value("string"),
"paragraphs": datasets.Sequence(datasets.Value("string"))
}),
"answer": datasets.Value("string"),
"previous_question": datasets.Value("string"),
"previous_answer": datasets.Value("string"),
"question_decomposition": datasets.Sequence({
"sub_id": datasets.Value("string"),
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"paragraph_support_title": datasets.Value("string")
}),
"question_on_last_hop": datasets.Value("string"),
"answer_type": datasets.Value("string"),
"previous_answer_type": datasets.Value("string"),
"no_of_hops": datasets.Value("int32"),
"reasoning_type": datasets.Value("string"),
}),
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": os.path.join(self.config.data_path)}
)
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
for item in data:
yield item['_id'], {
"question": item["question"],
"context": [{
"title": subitem[0], # title is the first item in the sublist
"content": subitem[1] # paragraphs are the second item
} for subitem in item.get("context", [])],
"answer": item["answer"],
"previous_question": item["previous_question"],
"previous_answer": item["previous_answer"],
"question_decomposition": [{
"sub_id": subitem["sub_id"],
"question": subitem["question"],
"answer": subitem["answer"],
"paragraph_support_title": subitem["paragraph_support_title"]
} for subitem in item["question_decomposition"]],
"question_on_last_hop": item["ques_on_last_hop"],
"answer_type": item["answer_type"],
"previous_answer_type":item["previous_answer_type"],
"no_of_hops": item["no_of_hops"],
"reasoning_type": item["reasoning_type"],
}