|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{Elgohary:Peskov:Boyd-Graber-2019, |
|
Title = {Can You Unpack That? Learning to Rewrite Questions-in-Context}, |
|
Author = {Ahmed Elgohary and Denis Peskov and Jordan Boyd-Graber}, |
|
Booktitle = {Empirical Methods in Natural Language Processing}, |
|
Year = {2019} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
CANARD has been preprocessed by Voskarides et al. to train and evaluate their Query Resolution Term Classification |
|
model (QuReTeC). |
|
|
|
CANARD is a dataset for question-in-context rewriting that consists of questions each given in a dialog context |
|
together with a context-independent rewriting of the question. The context of each question is the dialog utterences |
|
that precede the question. CANARD can be used to evaluate question rewriting models that handle important linguistic |
|
phenomena such as coreference and ellipsis resolution. |
|
""" |
|
|
|
_HOMEPAGE = "https://sites.google.com/view/qanta/projects/canard" |
|
|
|
_LICENSE = "CC BY-SA 4.0" |
|
|
|
|
|
|
|
_URL = "https://drive.google.com/drive/folders/1e3s-V6VQqOKHrmn_kBStNsV0gGHPeJVf/" |
|
_URLs = { |
|
'gold_supervision': { |
|
'train': _URL+"train_gold_supervision.json", |
|
'dev': _URL+"dev_gold_supervision.json", |
|
'test': _URL+"test_gold_supervision.json" |
|
}, |
|
'original_all': { |
|
'train': _URL+"train_original_all.json", |
|
'dev': _URL+"dev_original_all.json", |
|
'test': _URL+"test_original_all.json" |
|
}, |
|
'distant_supervision': { |
|
'train': _URL+"train_distant_supervision.json", |
|
'dev': _URL+"dev_distant_supervision.json", |
|
'test': _URL+"test_distant_supervision.json" |
|
} |
|
} |
|
|
|
|
|
class CanardQuretec(datasets.GeneratorBasedBuilder): |
|
""" |
|
Voskarides et al. have preprocessed CANARD in different ways depending on their experiment. |
|
""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="gold_supervision", version=VERSION, description="Was used for training quretec with gold supervision"), |
|
datasets.BuilderConfig(name="original_all", version=VERSION, description="Was used for creating dataset statistics"), |
|
datasets.BuilderConfig(name="distant_supervision", version=VERSION, description="Was used for training quretec with distant supervision"), |
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "gold_supervision" |
|
|
|
def _info(self): |
|
|
|
|
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"prev_questions": datasets.Value("string"), |
|
"cur_question": datasets.Value("string"), |
|
"gold_terms": datasets.features.Sequence(feature=datasets.Value('string')), |
|
"semantic_terms": datasets.features.Sequence(feature=datasets.Value('string')), |
|
"overlapping_terms": datasets.features.Sequence(feature=datasets.Value('string')), |
|
"answer_text_with_window": datasets.Value("string"), |
|
"answer_text": datasets.Value("string"), |
|
"bert_ner_overlap": datasets.Array2D(shape=(2,), dtype="string") |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = _URLs[self.config.name] |
|
downloaded_files = dl_manager.download_and_extract(my_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": downloaded_files['train'], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": downloaded_files['test'], |
|
"split": "test" |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": downloaded_files['dev'], |
|
"split": "dev", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, filepath, split |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
with open(filepath) as f: |
|
data_array = json.load(f) |
|
for id_, item_dict in data_array: |
|
|
|
yield id_, item_dict |