|
"""TODO(quoref): Add a description here.""" |
|
|
|
from __future__ import absolute_import, division, print_function |
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@article{allenai:quoref, |
|
author = {Pradeep Dasigi and Nelson F. Liu and Ana Marasovic and Noah A. Smith and Matt Gardner}, |
|
title = {Quoref: A Reading Comprehension Dataset with Questions Requiring Coreferential Reasoning}, |
|
journal = {arXiv:1908.05803v2 }, |
|
year = {2019}, |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
Quoref is a QA dataset which tests the coreferential reasoning capability of reading comprehension systems. In this |
|
span-selection benchmark containing 24K questions over 4.7K paragraphs from Wikipedia, a system must resolve hard |
|
coreferences before selecting the appropriate span(s) in the paragraphs for answering questions. |
|
""" |
|
|
|
_URL = "https://quoref-dataset.s3-us-west-2.amazonaws.com/train_and_dev/quoref-train-dev-v0.1.zip" |
|
|
|
|
|
class Quoref(datasets.GeneratorBasedBuilder): |
|
"""TODO(quoref): Short description of my dataset.""" |
|
|
|
|
|
VERSION = datasets.Version("0.1.0") |
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"context": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
"answers": datasets.features.Sequence( |
|
{ |
|
"answer_start": datasets.Value("int32"), |
|
"text": datasets.Value("string"), |
|
} |
|
) |
|
|
|
} |
|
), |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage="https://leaderboard.allenai.org/quoref/submissions/get-started", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
dl_dir = dl_manager.download_and_extract(_URL) |
|
data_dir = os.path.join(dl_dir, "quoref-train-dev-v0.1") |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={"filepath": os.path.join(data_dir, "quoref-train-v0.1.json")}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={"filepath": os.path.join(data_dir, "quoref-dev-v0.1.json")}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples.""" |
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
data = json.load(f) |
|
for article in data["data"]: |
|
title = article.get("title", "").strip() |
|
url = article.get("url", "").strip() |
|
for paragraph in article["paragraphs"]: |
|
context = paragraph["context"].strip() |
|
for qa in paragraph["qas"]: |
|
question = qa["question"].strip() |
|
id_ = qa["id"] |
|
|
|
answer_starts = [answer["answer_start"] for answer in qa["answers"]] |
|
answers = [answer["text"].strip() for answer in qa["answers"]] |
|
|
|
|
|
|
|
yield id_, { |
|
"title": title, |
|
"context": context, |
|
"question": question, |
|
"id": id_, |
|
"answers": { |
|
"answer_start": answer_starts, |
|
"text": answers, |
|
}, |
|
"url": url, |
|
} |
|
|