|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""MRQA 2019 Shared task dataset.""" |
|
|
|
import json |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@inproceedings{fisch2019mrqa, |
|
title={{MRQA} 2019 Shared Task: Evaluating Generalization in Reading Comprehension}, |
|
author={Adam Fisch and Alon Talmor and Robin Jia and Minjoon Seo and Eunsol Choi and Danqi Chen}, |
|
booktitle={Proceedings of 2nd Machine Reading for Reading Comprehension (MRQA) Workshop at EMNLP}, |
|
year={2019}, |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The MRQA 2019 Shared Task focuses on generalization in question answering. |
|
An effective question answering system should do more than merely |
|
interpolate from the training set to answer test examples drawn |
|
from the same distribution: it should also be able to extrapolate |
|
to out-of-distribution examples — a significantly harder challenge. |
|
The dataset is a collection of 18 existing QA dataset (carefully selected |
|
subset of them) and converted to the same format (SQuAD format). Among |
|
these 18 datasets, six datasets were made available for training, |
|
six datasets were made available for development, and the final six |
|
for testing. The dataset is released as part of the MRQA 2019 Shared Task. |
|
""" |
|
|
|
_HOMEPAGE = "https://mrqa.github.io/2019/shared.html" |
|
|
|
_LICENSE = "Unknwon" |
|
|
|
_URLs = { |
|
|
|
"train+SQuAD": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/SQuAD.jsonl.gz", |
|
"train+NewsQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/NewsQA.jsonl.gz", |
|
"train+TriviaQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/TriviaQA-web.jsonl.gz", |
|
"train+SearchQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/SearchQA.jsonl.gz", |
|
"train+HotpotQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/HotpotQA.jsonl.gz", |
|
"train+NaturalQuestions": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/train/NaturalQuestionsShort.jsonl.gz", |
|
|
|
"validation+SQuAD": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/SQuAD.jsonl.gz", |
|
"validation+NewsQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/NewsQA.jsonl.gz", |
|
"validation+TriviaQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/TriviaQA-web.jsonl.gz", |
|
"validation+SearchQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/SearchQA.jsonl.gz", |
|
"validation+HotpotQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/HotpotQA.jsonl.gz", |
|
"validation+NaturalQuestions": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/NaturalQuestionsShort.jsonl.gz", |
|
|
|
"test+BioASQ": "http://participants-area.bioasq.org/MRQA2019/", |
|
"test+DROP": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/DROP.jsonl.gz", |
|
"test+DuoRC": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/DuoRC.ParaphraseRC.jsonl.gz", |
|
"test+RACE": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/RACE.jsonl.gz", |
|
"test+RelationExtraction": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/RelationExtraction.jsonl.gz", |
|
"test+TextbookQA": "https://s3.us-east-2.amazonaws.com/mrqa/release/v2/dev/TextbookQA.jsonl.gz", |
|
} |
|
|
|
|
|
class MRQAConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for FS.""" |
|
|
|
def __init__(self, data_url, **kwargs): |
|
"""BuilderConfig for FS. |
|
Args: |
|
additional_features: `list[string]`, list of the features that will appear in the feature dict |
|
additionally to the self.id_key, self.source_key and self.target_key. Should not include "label". |
|
data_url: `string`, url to download the zip file from. |
|
citation: `string`, citation for the data set. |
|
url: `string`, url for information about the data set. |
|
label_classes: `list[string]`, the list of classes for the label if the |
|
label is present as a string. Non-string labels will be cast to either |
|
'False' or 'True'. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(MRQAConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
self.data_url = data_url |
|
|
|
|
|
class MRQA(datasets.GeneratorBasedBuilder): |
|
"""MRQA 2019 Shared task dataset.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
MRQAConfig( |
|
name="searchqa", |
|
data_url={"validation": _URLs["validation+SearchQA"], |
|
"train": _URLs["train+SearchQA"], |
|
"test": _URLs["validation+SearchQA"]} |
|
), |
|
MRQAConfig( |
|
name="squad", |
|
data_url={"validation": _URLs["validation+SQuAD"], |
|
"train": _URLs["train+SQuAD"], |
|
"test": _URLs["validation+SQuAD"]} |
|
), |
|
MRQAConfig( |
|
name="newsqa", |
|
data_url={"validation": _URLs["validation+NewsQA"], |
|
"train": _URLs["train+NewsQA"], |
|
"test": _URLs["validation+NewsQA"]} |
|
), |
|
MRQAConfig( |
|
name="natural_questions", |
|
data_url={"validation": _URLs["validation+NaturalQuestions"], |
|
"train": _URLs["train+NaturalQuestions"], |
|
"test": _URLs["validation+NaturalQuestions"]} |
|
), |
|
MRQAConfig( |
|
name="hotpotqa", |
|
data_url={"validation": _URLs["validation+HotpotQA"], |
|
"train": _URLs["train+HotpotQA"], |
|
"test": _URLs["validation+HotpotQA"]} |
|
), |
|
MRQAConfig( |
|
name="triviaqa", |
|
data_url={"validation": _URLs["validation+TriviaQA"], |
|
"train": _URLs["train+TriviaQA"], |
|
"test": _URLs["validation+TriviaQA"]} |
|
), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features( |
|
{ |
|
"subset": datasets.Value("string"), |
|
"context": datasets.Value("string"), |
|
"qid": datasets.Value("string"), |
|
"idx": datasets.Value("int32"), |
|
"question": datasets.Value("string"), |
|
"answers": datasets.Sequence(datasets.Value("string")), |
|
"answer": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
data_dir = dl_manager.download_and_extract(self.config.data_url) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepaths_dict": data_dir, |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepaths_dict": data_dir, |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepaths_dict": data_dir, |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepaths_dict, split): |
|
"""Yields examples.""" |
|
for source, filepath in filepaths_dict.items(): |
|
if source not in split: |
|
continue |
|
with open(filepath, encoding="utf-8") as f: |
|
header = next(f) |
|
subset = json.loads(header)["header"]["dataset"] |
|
idx = 0 |
|
for row in f: |
|
paragraph = json.loads(row) |
|
context = clean_context(paragraph["context"]) |
|
for qa in paragraph["qas"]: |
|
qid = qa["qid"] |
|
question = qa["question"].strip() |
|
if question[-1] != "?": |
|
question += "?" |
|
answers = [clean_up_spaces(a) for a in qa["answers"]] |
|
final_row = { |
|
"subset": subset, |
|
"context": clean_up_spaces(context), |
|
"qid": qid, |
|
"idx": idx, |
|
"question": clean_up_spaces(question), |
|
"answers": answers, |
|
"answer": answers[0] |
|
} |
|
idx += 1 |
|
yield f"{source}_{qid}", final_row |
|
|
|
|
|
def clean_context(context): |
|
return ( |
|
context.replace("[PAR] ", "\n\n") |
|
.replace("[TLE]", "Title:") |
|
.replace("[SEP]", "\nPassage:").strip() |
|
.replace("<Li>", "") |
|
.replace("</Li>", "") |
|
.replace("<OI>", "") |
|
.replace("</OI>", "") |
|
.replace("<Ol>", "") |
|
.replace("</Ol>", "") |
|
.replace("<Dd>", "") |
|
.replace("</Dd>", "") |
|
.replace("<UI>", "") |
|
.replace("</UI>", "") |
|
.replace("<Ul>", "") |
|
.replace("</Ul>", "") |
|
.replace("<P>", "") |
|
.replace("</P>", "") |
|
.replace("[DOC]", "") |
|
).strip() |
|
|
|
|
|
def clean_up_spaces(s): |
|
out_string = s |
|
return ( |
|
out_string.replace(" .", ".") |
|
.replace(" ?", "?") |
|
.replace(" !", "!") |
|
.replace(" ,", ",") |
|
.replace(" ' ", "'") |
|
.replace(" n't", "n't") |
|
.replace(" 'm", "'m") |
|
.replace(" 's", "'s") |
|
.replace(" 've", "'ve") |
|
.replace(" 're", "'re") |
|
.replace("( ", "(") |
|
.replace(" )", ")") |
|
.replace(" %", "%") |
|
.replace("`` ", "\"") |
|
.replace(" ''", "\"") |
|
.replace(" :", ":") |
|
) |
|
|
|
|
|
if __name__ == '__main__': |
|
from datasets import load_dataset |
|
|
|
ssfd_debug = load_dataset("mrqa.py", name="squad") |
|
x = 5 |
|
|