|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""OK-VQA loading script.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
from pathlib import Path |
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@article{DBLP:journals/corr/abs-1906-00067, |
|
author = {Kenneth Marino and |
|
Mohammad Rastegari and |
|
Ali Farhadi and |
|
Roozbeh Mottaghi}, |
|
title = {{OK-VQA:} {A} Visual Question Answering Benchmark Requiring External |
|
Knowledge}, |
|
journal = {CoRR}, |
|
volume = {abs/1906.00067}, |
|
year = {2019}, |
|
url = {http://arxiv.org/abs/1906.00067}, |
|
eprinttype = {arXiv}, |
|
eprint = {1906.00067}, |
|
timestamp = {Thu, 13 Jun 2019 13:36:00 +0200}, |
|
biburl = {https://dblp.org/rec/journals/corr/abs-1906-00067.bib}, |
|
bibsource = {dblp computer science bibliography, https://dblp.org} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
OK-VQA is a new dataset for visual question answering that requires methods which can draw upon outside knowledge to answer questions. |
|
- 14,055 open-ended questions |
|
- 5 ground truth answers per question |
|
- Manually filtered to ensure all questions require outside knowledge (e.g. from Wikipeida) |
|
- Reduced questions with most common answers to reduce dataset bias |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://okvqa.allenai.org/" |
|
|
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
|
|
_URLS = { |
|
"annotations": { |
|
"train": "https://okvqa.allenai.org/static/data/mscoco_train2014_annotations.json.zip", |
|
"val": "https://okvqa.allenai.org/static/data/mscoco_val2014_annotations.json.zip", |
|
}, |
|
"questions": { |
|
"train": "https://okvqa.allenai.org/static/data/OpenEnded_mscoco_train2014_questions.json.zip", |
|
"val": "https://okvqa.allenai.org/static/data/OpenEnded_mscoco_val2014_questions.json.zip", |
|
}, |
|
"images": { |
|
"train": "http://images.cocodataset.org/zips/train2014.zip", |
|
"val": "http://images.cocodataset.org/zips/val2014.zip", |
|
}, |
|
} |
|
|
|
|
|
class OKVQADataset(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"question_type": datasets.Value('string'), |
|
"confidence": datasets.Value('int32'), |
|
"answers": [{ |
|
"answer": datasets.Value('string'), |
|
"raw_answer": datasets.Value('string'), |
|
"answer_confidence": datasets.Value('string'), |
|
"answer_id": datasets.Value('int64'), |
|
}], |
|
"image_id": datasets.Value('int64'), |
|
"answer_type": datasets.Value('string'), |
|
"question_id": datasets.Value('int64'), |
|
"question": datasets.Value('string'), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
data_dir = dl_manager.download_and_extract(_URLS) |
|
gen_kwargs = {} |
|
for split_name in ["train", "val"]: |
|
gen_kwargs_per_split = {} |
|
for dir_name in _URLS.keys(): |
|
if split_name in data_dir[dir_name]: |
|
file_name = Path(_URLS[dir_name][split_name]).name[: -len(".zip")] |
|
path = Path(data_dir[dir_name][split_name]) / file_name |
|
gen_kwargs_per_split[f"{dir_name}_path"] = path |
|
else: |
|
gen_kwargs_per_split[f"{dir_name}_path"] = None |
|
gen_kwargs[split_name] = gen_kwargs_per_split |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs=gen_kwargs["train"], |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs=gen_kwargs["val"], |
|
), |
|
] |
|
|
|
def _generate_examples(self, questions_path, annotations_path, images_path): |
|
dataset = json.load(open(annotations_path, "r")) |
|
questions = json.load(open(questions_path, "r")) |
|
|
|
qa = {ann["question_id"]: [] for ann in dataset["annotations"]} |
|
for ann in dataset["annotations"]: |
|
qa[ann["question_id"]] = ann |
|
|
|
for question in questions["questions"]: |
|
annotation = qa[question["question_id"]] |
|
|
|
assert len(set(question.keys()) ^ {"image_id", "question", "question_id"}) == 0 |
|
assert ( |
|
len( |
|
set(annotation.keys()) |
|
^ { |
|
"question_type", |
|
"confidence", |
|
"answers", |
|
"image_id", |
|
"answer_type", |
|
"question_id", |
|
} |
|
) |
|
== 0 |
|
) |
|
|
|
record = question |
|
record.update(annotation) |
|
record["image"] = str(images_path / f"COCO_{images_path.name}_{record['image_id']:0>12}.jpg") |
|
yield question["question_id"], record |
|
|