Datasets:

Sub-tasks:
extractive-qa
Languages:
Spanish
ArXiv:
License:
File size: 5,562 Bytes
6df3a2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
# Loading script for the SQAC dataset.
import json
import datasets

logger = datasets.logging.get_logger(__name__)

_CITATION = """
bibtex
@article{DBLP:journals/corr/abs-2107-07253,
  author    = {Asier Guti{\'{e}}rrez{-}Fandi{\~{n}}o and
               Jordi Armengol{-}Estap{\'{e}} and
               Marc P{\`{a}}mies and
               Joan Llop{-}Palao and
               Joaqu{\'{\i}}n Silveira{-}Ocampo and
               Casimiro Pio Carrino and
               Aitor Gonzalez{-}Agirre and
               Carme Armentano{-}Oller and
               Carlos Rodr{\'{\i}}guez Penagos and
               Marta Villegas},
  title     = {Spanish Language Models},
  journal   = {CoRR},
  volume    = {abs/2107.07253},
  year      = {2021},
  url       = {https://arxiv.org/abs/2107.07253},
  archivePrefix = {arXiv},
  eprint    = {2107.07253},
  timestamp = {Wed, 21 Jul 2021 15:55:35 +0200},
  biburl    = {https://dblp.org/rec/journals/corr/abs-2107-07253.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}
            """

_DESCRIPTION = """
This dataset contains 6,247 contexts and 18,817 questions with their answers, 1 to 5 for each fragment.

The sources of the contexts are:

* Encyclopedic articles from [Wikipedia in Spanish](https://es.wikipedia.org/), used under [CC-by-sa licence](https://creativecommons.org/licenses/by-sa/3.0/legalcode). 

* News from [Wikinews in Spanish](https://es.wikinews.org/), used under [CC-by licence](https://creativecommons.org/licenses/by/2.5/). 

* Text from the Spanish corpus [AnCora](http://clic.ub.edu/corpus/en), which is a mix from diferent newswire and literature sources, used under [CC-by licence] (https://creativecommons.org/licenses/by/4.0/legalcode). 

This dataset can be used to build extractive-QA.
               """

_HOMEPAGE = """"""

_URL = "https://huggingface.co/datasets/BSC-TeMU/SQAC/resolve/main/"
_TRAINING_FILE = "train.json"
_DEV_FILE = "dev.json"
_TEST_FILE = "test.json"


class SQACConfig(datasets.BuilderConfig):
    """ Builder config for the SQAC dataset """

    def __init__(self, **kwargs):
        """BuilderConfig for SQAC.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(SQACConfig, self).__init__(**kwargs)


class SQAC(datasets.GeneratorBasedBuilder):
    """SQAC Dataset."""

    BUILDER_CONFIGS = [
        SQACConfig(
            name="SQAC",
            #version=datasets.Version("1.0.1"),
            description="SQAC dataset",
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "title": datasets.Value("string"),
                    "context": datasets.Value("string"),
                    "question": datasets.Value("string"),
                    "answers": datasets.features.Sequence(
                        {
                            "text": datasets.Value("string"),
                            "answer_start": datasets.Value("int32"),
                        }
                    ),
                }
            ),
            # No default supervised_keys (as we have to pass both question
            # and context as input).
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        urls_to_download = {
            "train": f"{_URL}{_TRAINING_FILE}",
            "dev": f"{_URL}{_DEV_FILE}",
            "test": f"{_URL}{_TEST_FILE}",
        }
        downloaded_files = dl_manager.download_and_extract(urls_to_download)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        logger.info("generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as f:
            viquiquad = json.load(f, encoding="utf-8")
            for article in viquiquad["data"]:
                title = article.get("title", "").strip()
                for paragraph in article["paragraphs"]:
                    context = paragraph["context"].strip()
                    for qa in paragraph["qas"]:
                        question = qa["question"].strip()
                        id_ = qa["id"]

                        answer_starts = [answer["answer_start"] for answer in qa["answers"]]
                        answers = [answer["text"].strip() for answer in qa["answers"]]

                        # Features currently used are "context", "question", and "answers".
                        # Others are extracted here for the ease of future expansions.
                        yield id_, {
                            "title": title,
                            "context": context,
                            "question": question,
                            "id": id_,
                            "answers": {
                                "answer_start": answer_starts,
                                "text": answers,
                            },
                        }