File size: 3,449 Bytes
5f25043 9299753 5f25043 9299753 5f25043 9299753 5f25043 9299753 5f25043 1964298 5f25043 9299753 1964298 9299753 1964298 873f5f2 9299753 873f5f2 4feb502 9299753 5f25043 9299753 5f25043 9299753 5f25043 9299753 5f25043 9299753 5f25043 ffd3951 5f25043 9299753 5f25043 9299753 5f25043 1964298 9299753 16ab230 dcd7e33 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""SQUAD: The Stanford Question Answering Dataset."""
import csv
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@article{2016arXiv160605250R,
author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
Konstantin and {Liang}, Percy},
title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
journal = {arXiv e-prints},
year = 2016,
eid = {arXiv:1606.05250},
pages = {arXiv:1606.05250},
archivePrefix = {arXiv},
eprint = {1606.05250},
}
"""
_DESCRIPTION = """
Stanford Question Answering Dataset (SQuAD) is a reading comprehension
dataset, consisting of questions posed by crowdworkers on a set of Wikipedia
articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
train_url = "https://raw.githubusercontent.com/Sampson2016/test/master/train.csv?token=GHSAT0AAAAAABR4XKTH73T5VNFVZ3KS33FYYVQLQAA"
_URLS = {
"train": train_url,
"test": train_url,
}
class Demo2Config(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(Demo2Config, self).__init__(**kwargs)
class Demo2(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
Demo2Config(
name="plain_text",
version=datasets.Version("1.0.0", ""),
description="Plain text",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.features.ClassLabel(names=['0', '1'])
}
),
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage="https://rajpurkar.github.io/SQuAD-explorer/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
logger.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
demo2 = csv.DictReader(f)
for key, row in enumerate(demo2):
yield key, {
"text": row['text'],
"label": row['label'],
} |