|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TriviaQA: A Reading Comprehension Dataset.""" |
|
|
|
|
|
import glob |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """ |
|
@article{2017arXivtriviaqa, |
|
author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld}, |
|
Daniel and {Zettlemoyer}, Luke}, |
|
title = "{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}", |
|
journal = {arXiv e-prints}, |
|
year = 2017, |
|
eid = {arXiv:1705.03551}, |
|
pages = {arXiv:1705.03551}, |
|
archivePrefix = {arXiv}, |
|
eprint = {1705.03551}, |
|
} |
|
""" |
|
_DOWNLOAD_URL_TMPL = "http://nlp.cs.washington.edu/triviaqa/data/triviaqa-{}.tar.gz" |
|
_WEB_EVIDENCE_DIR = "evidence/web" |
|
_WIKI_EVIDENCE_DIR = "evidence/wikipedia" |
|
|
|
_DESCRIPTION = """\ |
|
TriviaqQA is a reading comprehension dataset containing over 650K |
|
question-answer-evidence triples. TriviaqQA includes 95K question-answer |
|
pairs authored by trivia enthusiasts and independently gathered evidence |
|
documents, six per question on average, that provide high quality distant |
|
supervision for answering the questions. |
|
""" |
|
|
|
_RC_DESCRIPTION = """\ |
|
Question-answer pairs where all documents for a given question contain the |
|
answer string(s). |
|
""" |
|
|
|
_UNFILTERED_DESCRIPTION = """\ |
|
110k question-answer pairs for open domain QA where not all documents for a |
|
given question contain the answer string(s). This makes the unfiltered dataset |
|
more appropriate for IR-style QA. |
|
""" |
|
|
|
_CONTEXT_ADDENDUM = "Includes context from Wikipedia and search results." |
|
|
|
|
|
def _web_evidence_dir(tmp_dir): |
|
return sorted(glob.glob(os.path.join(tmp_dir, _WEB_EVIDENCE_DIR))) |
|
|
|
|
|
def _wiki_evidence_dir(tmp_dir): |
|
return sorted(glob.glob(os.path.join(tmp_dir, _WIKI_EVIDENCE_DIR))) |
|
|
|
|
|
def _qa_files(file_paths, sources, split, unfiltered): |
|
qa_dir = ( |
|
os.path.join(file_paths["unfiltered"], "triviaqa-unfiltered") |
|
if unfiltered |
|
else os.path.join(file_paths["rc"], "qa") |
|
) |
|
|
|
suffix_mapping = { |
|
datasets.Split.TRAIN: "train.json", |
|
datasets.Split.VALIDATION: "dev.json", |
|
datasets.Split.TEST: "test-without-answers.json", |
|
} |
|
suffix = suffix_mapping[split] |
|
|
|
filenames = [f"unfiltered-web-{suffix}"] if unfiltered else [f"{source}-{suffix}" for source in sources] |
|
|
|
filenames = [os.path.join(qa_dir, filename) for filename in filenames] |
|
|
|
return sorted(filenames) |
|
|
|
|
|
class TriviaQaConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for TriviaQA.""" |
|
|
|
def __init__(self, source="all", unfiltered=False, exclude_context=False, **kwargs): |
|
"""BuilderConfig for TriviaQA. |
|
|
|
Args: |
|
unfiltered: bool, whether to use the unfiltered version of the dataset, |
|
intended for open-domain QA. |
|
exclude_context: bool, whether to exclude Wikipedia and search context for |
|
reduced size. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
name = "unfiltered" if unfiltered else "rc" |
|
|
|
assert source in ["all", "web", "wikipedia"] |
|
|
|
|
|
|
|
|
|
assert not unfiltered or source == "all" |
|
|
|
if source != "all": |
|
name += f".{source}" |
|
|
|
if exclude_context: |
|
name += ".nocontext" |
|
description = _UNFILTERED_DESCRIPTION if unfiltered else _RC_DESCRIPTION |
|
if not exclude_context: |
|
description += _CONTEXT_ADDENDUM |
|
super(TriviaQaConfig, self).__init__( |
|
name=name, description=description, version=datasets.Version("1.2.0"), **kwargs |
|
) |
|
|
|
self.sources = ["web", "wikipedia"] if source == "all" else [source] |
|
self.unfiltered = unfiltered |
|
self.exclude_context = exclude_context |
|
|
|
|
|
class TriviaQa(datasets.GeneratorBasedBuilder): |
|
"""TriviaQA is a reading comprehension dataset. |
|
|
|
It containss over 650K question-answer-evidence triples. |
|
""" |
|
|
|
BUILDER_CONFIGS = [ |
|
TriviaQaConfig(source="all", unfiltered=False, exclude_context=False), |
|
TriviaQaConfig(source="all", unfiltered=False, exclude_context=True), |
|
TriviaQaConfig(source="all", unfiltered=True, exclude_context=False), |
|
TriviaQaConfig(source="all", unfiltered=True, exclude_context=True), |
|
TriviaQaConfig(source="web", unfiltered=False, exclude_context=False), |
|
TriviaQaConfig(source="web", unfiltered=False, exclude_context=True), |
|
TriviaQaConfig(source="wikipedia", unfiltered=False, exclude_context=False), |
|
TriviaQaConfig(source="wikipedia", unfiltered=False, exclude_context=True), |
|
] |
|
DEFAULT_WRITER_BATCH_SIZE = 1000 |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"question": datasets.Value("string"), |
|
"question_id": datasets.Value("string"), |
|
"question_source": datasets.Value("string"), |
|
"entity_pages": datasets.features.Sequence( |
|
{ |
|
"doc_source": datasets.Value("string"), |
|
"filename": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"wiki_context": datasets.Value("string"), |
|
} |
|
), |
|
"search_results": datasets.features.Sequence( |
|
{ |
|
"description": datasets.Value("string"), |
|
"filename": datasets.Value("string"), |
|
"rank": datasets.Value("int32"), |
|
"title": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
"search_context": datasets.Value("string"), |
|
} |
|
), |
|
"answer": dict( |
|
{ |
|
"aliases": datasets.features.Sequence(datasets.Value("string")), |
|
"normalized_aliases": datasets.features.Sequence(datasets.Value("string")), |
|
"matched_wiki_entity_name": datasets.Value("string"), |
|
"normalized_matched_wiki_entity_name": datasets.Value("string"), |
|
"normalized_value": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
"value": datasets.Value("string"), |
|
} |
|
), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage="http://nlp.cs.washington.edu/triviaqa/", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
cfg = self.config |
|
download_urls = dict() |
|
if not (cfg.unfiltered and cfg.exclude_context): |
|
download_urls["rc"] = _DOWNLOAD_URL_TMPL.format("rc") |
|
if cfg.unfiltered: |
|
download_urls["unfiltered"] = _DOWNLOAD_URL_TMPL.format("unfiltered") |
|
file_paths = dl_manager.download_and_extract(download_urls) |
|
|
|
if cfg.exclude_context: |
|
web_evidence_dir = None |
|
wiki_evidence_dir = None |
|
else: |
|
web_evidence_dir = os.path.join(file_paths["rc"], _WEB_EVIDENCE_DIR) |
|
wiki_evidence_dir = os.path.join(file_paths["rc"], _WIKI_EVIDENCE_DIR) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=name, |
|
gen_kwargs={ |
|
"files": _qa_files(file_paths, cfg.sources, name, cfg.unfiltered), |
|
"web_dir": web_evidence_dir, |
|
"wiki_dir": wiki_evidence_dir, |
|
}, |
|
) |
|
for name in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST] |
|
] |
|
|
|
def _generate_examples(self, files, web_dir, wiki_dir): |
|
"""This function returns the examples.""" |
|
|
|
def parse_example(article): |
|
"""Return a single example from an article JSON record.""" |
|
|
|
def _strip(collection): |
|
return [item.strip() for item in collection] |
|
|
|
if "Answer" in article: |
|
answer = article["Answer"] |
|
answer_dict = { |
|
"aliases": _strip(answer["Aliases"]), |
|
"normalized_aliases": _strip(answer["NormalizedAliases"]), |
|
"matched_wiki_entity_name": answer.get("MatchedWikiEntryName", "").strip(), |
|
"normalized_matched_wiki_entity_name": answer.get("NormalizedMatchedWikiEntryName", "").strip(), |
|
"normalized_value": answer["NormalizedValue"].strip(), |
|
"type": answer["Type"].strip(), |
|
"value": answer["Value"].strip(), |
|
} |
|
else: |
|
answer_dict = { |
|
"aliases": [], |
|
"normalized_aliases": [], |
|
"matched_wiki_entity_name": "<unk>", |
|
"normalized_matched_wiki_entity_name": "<unk>", |
|
"normalized_value": "<unk>", |
|
"type": "", |
|
"value": "<unk>", |
|
} |
|
|
|
if self.config.exclude_context: |
|
article["SearchResults"] = [] |
|
article["EntityPages"] = [] |
|
|
|
def _add_context(collection, context_field, file_dir): |
|
"""Adds context from file, or skips if file does not exist.""" |
|
new_items = [] |
|
for item in collection: |
|
if "Filename" not in item: |
|
logger.info("Missing context 'Filename', skipping.") |
|
continue |
|
|
|
new_item = item.copy() |
|
fname = item["Filename"] |
|
try: |
|
with open(os.path.join(file_dir, fname), encoding="utf-8") as f: |
|
new_item[context_field] = f.read() |
|
except (IOError, FileNotFoundError): |
|
logger.info("File does not exist, skipping: %s", fname) |
|
continue |
|
new_items.append(new_item) |
|
return new_items |
|
|
|
def _strip_if_str(v): |
|
return v.strip() if isinstance(v, str) else v |
|
|
|
def _transpose_and_strip_dicts(dicts, field_names): |
|
return { |
|
datasets.naming.camelcase_to_snakecase(k): [_strip_if_str(d[k]) for d in dicts] |
|
for k in field_names |
|
} |
|
|
|
search_results = _transpose_and_strip_dicts( |
|
_add_context(article.get("SearchResults", []), "SearchContext", web_dir), |
|
["Description", "Filename", "Rank", "Title", "Url", "SearchContext"], |
|
) |
|
|
|
entity_pages = _transpose_and_strip_dicts( |
|
_add_context(article.get("EntityPages", []), "WikiContext", wiki_dir), |
|
["DocSource", "Filename", "Title", "WikiContext"], |
|
) |
|
|
|
question = article["Question"].strip() |
|
question_id = article["QuestionId"] |
|
question_source = article["QuestionSource"].strip() |
|
|
|
return { |
|
"entity_pages": entity_pages, |
|
"search_results": search_results, |
|
"question": question, |
|
"question_id": question_id, |
|
"question_source": question_source, |
|
"answer": answer_dict, |
|
} |
|
|
|
for filepath in files: |
|
logger.info("generating examples from = %s", filepath) |
|
fname = os.path.basename(filepath) |
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
current_record = "" |
|
for line in f: |
|
if line == " {\n": |
|
current_record = line |
|
elif line.startswith(" }"): |
|
article = json.loads(current_record + "}") |
|
current_record = "" |
|
example = parse_example(article) |
|
yield "%s_%s" % (fname, example["question_id"]), example |
|
else: |
|
current_record += line |
|
|