Datasets:
Tasks:
Question Answering
Modalities:
Text
Sub-tasks:
extractive-qa
Languages:
Japanese
Size:
10K - 100K
ArXiv:
License:
'''Dataset loading script for JaQuAD. | |
We refer to https://huggingface.co/datasets/squad_v2/blob/main/squad_v2.py | |
''' | |
import json | |
import os | |
import datasets | |
_CITATION = ''' | |
@article{SkelterLabsInc:JaQuAD, | |
title = {{JaQuAD}: Japanese Question Answering Dataset for Machine | |
Reading Comprehension}, | |
author = {Byunghoon So and | |
Kyuhong Byun and | |
Kyungwon Kang and | |
Seongjin Cho}, | |
year = {2022}, | |
} | |
''' | |
_DESCRIPTION = '''Japanese Question Answering Dataset (JaQuAD), released in | |
2022, is a human-annotated dataset created for Japanese Machine Reading | |
Comprehension. JaQuAD is developed to provide a SQuAD-like QA dataset in | |
Japanese. JaQuAD contains 39,696 question-answer pairs. Questions and answers | |
are manually curated by human annotators. Contexts are collected from Japanese | |
Wikipedia articles. | |
''' | |
_LICENSE = 'CC BY-SA 3.0' | |
_HOMEPAGE = 'https://skelterlabs.com/en/' | |
_URL = 'https://huggingface.co/datasets/SkelterLabsInc/JaQuAD/raw/main/data/' | |
class JaQuAD(datasets.GeneratorBasedBuilder): | |
VERSION = datasets.Version('0.1.0') | |
def _info(self): | |
features = datasets.Features({ | |
'id': datasets.Value('string'), | |
'title': datasets.Value('string'), | |
'context': datasets.Value('string'), | |
'question': datasets.Value('string'), | |
'question_type': datasets.Value('string'), | |
'answers': | |
datasets.features.Sequence({ | |
'text': datasets.Value('string'), | |
'answer_start': datasets.Value('int32'), | |
'answer_type': datasets.Value('string'), | |
}), | |
}) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
urls_to_download = { | |
'train': [ | |
os.path.join(_URL, f'train/jaquad_train_{i:04d}.json') | |
for i in range(30) | |
], | |
'dev': [ | |
os.path.join(_URL, f'dev/jaquad_dev_{i:04d}.json') | |
for i in range(4) | |
], | |
} | |
downloaded_files = dl_manager.download_and_extract(urls_to_download) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={'filepaths': downloaded_files['train']}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={'filepaths': downloaded_files['dev']}, | |
), | |
] | |
def _generate_examples(self, filepaths): | |
for filename in filepaths: | |
with open(filename, encoding='utf-8') as ifile: | |
jaquad = json.load(ifile) | |
for article in jaquad['data']: | |
title = article.get('title', '').strip() | |
for paragraph in article['paragraphs']: | |
context = paragraph['context'].strip() | |
for qa in paragraph['qas']: | |
qa_id = qa['id'] | |
question = qa['question'].strip() | |
question_type = qa['question_type'] | |
answer_starts = [ | |
answer['answer_start'] | |
for answer in qa['answers'] | |
] | |
answer_texts = [ | |
answer['text'].strip() | |
for answer in qa['answers'] | |
] | |
answer_types = [ | |
answer['answer_type'] | |
for answer in qa['answers'] | |
] | |
yield qa_id, { | |
'title': title, | |
'context': context, | |
'question': question, | |
'question_type': question_type, | |
'id': qa_id, | |
'answers': { | |
'text': answer_texts, | |
'answer_start': answer_starts, | |
'answer_type': answer_types, | |
}, | |
} | |