TheTung commited on
Commit
ea536cf
1 Parent(s): aef5261

Upload 4 files

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. SQAC.py +143 -0
  3. dev.json +0 -0
  4. test.json +0 -0
  5. train.json +3 -0
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ train.json filter=lfs diff=lfs merge=lfs -text
SQAC.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loading script for the SQAC dataset.
2
+ import json
3
+ import datasets
4
+
5
+ logger = datasets.logging.get_logger(__name__)
6
+
7
+ _CITATION = """
8
+ bibtex
9
+ @article{DBLP:journals/corr/abs-2107-07253,
10
+ author = {Asier Guti{\'{e}}rrez{-}Fandi{\~{n}}o and
11
+ Jordi Armengol{-}Estap{\'{e}} and
12
+ Marc P{\`{a}}mies and
13
+ Joan Llop{-}Palao and
14
+ Joaqu{\'{\i}}n Silveira{-}Ocampo and
15
+ Casimiro Pio Carrino and
16
+ Aitor Gonzalez{-}Agirre and
17
+ Carme Armentano{-}Oller and
18
+ Carlos Rodr{\'{\i}}guez Penagos and
19
+ Marta Villegas},
20
+ title = {Spanish Language Models},
21
+ journal = {CoRR},
22
+ volume = {abs/2107.07253},
23
+ year = {2021},
24
+ url = {https://arxiv.org/abs/2107.07253},
25
+ archivePrefix = {arXiv},
26
+ eprint = {2107.07253},
27
+ timestamp = {Wed, 21 Jul 2021 15:55:35 +0200},
28
+ biburl = {https://dblp.org/rec/journals/corr/abs-2107-07253.bib},
29
+ bibsource = {dblp computer science bibliography, https://dblp.org}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """
34
+ This dataset contains 6,247 contexts and 18,817 questions with their answers, 1 to 5 for each fragment.
35
+
36
+ The sources of the contexts are:
37
+
38
+ * Encyclopedic articles from [Wikipedia in Spanish](https://es.wikipedia.org/), used under [CC-by-sa licence](https://creativecommons.org/licenses/by-sa/3.0/legalcode).
39
+
40
+ * News from [Wikinews in Spanish](https://es.wikinews.org/), used under [CC-by licence](https://creativecommons.org/licenses/by/2.5/).
41
+
42
+ * Text from the Spanish corpus [AnCora](http://clic.ub.edu/corpus/en), which is a mix from diferent newswire and literature sources, used under [CC-by licence] (https://creativecommons.org/licenses/by/4.0/legalcode).
43
+
44
+ This dataset can be used to build extractive-QA.
45
+ """
46
+
47
+ _HOMEPAGE = """"""
48
+
49
+ _URL = "https://huggingface.co/datasets/PlanTL-GOB-ES/SQAC/resolve/main/"
50
+ _TRAINING_FILE = "train.json"
51
+ _DEV_FILE = "dev.json"
52
+ _TEST_FILE = "test.json"
53
+
54
+
55
+ class SQACConfig(datasets.BuilderConfig):
56
+ """ Builder config for the SQAC dataset """
57
+
58
+ def __init__(self, **kwargs):
59
+ """BuilderConfig for SQAC.
60
+ Args:
61
+ **kwargs: keyword arguments forwarded to super.
62
+ """
63
+ super(SQACConfig, self).__init__(**kwargs)
64
+
65
+
66
+ class SQAC(datasets.GeneratorBasedBuilder):
67
+ """SQAC Dataset."""
68
+
69
+ BUILDER_CONFIGS = [
70
+ SQACConfig(
71
+ name="SQAC",
72
+ #version=datasets.Version("1.0.1"),
73
+ description="SQAC dataset",
74
+ ),
75
+ ]
76
+
77
+ def _info(self):
78
+ return datasets.DatasetInfo(
79
+ description=_DESCRIPTION,
80
+ features=datasets.Features(
81
+ {
82
+ "id": datasets.Value("string"),
83
+ "title": datasets.Value("string"),
84
+ "context": datasets.Value("string"),
85
+ "question": datasets.Value("string"),
86
+ "answers": datasets.features.Sequence(
87
+ {
88
+ "text": datasets.Value("string"),
89
+ "answer_start": datasets.Value("int32"),
90
+ }
91
+ ),
92
+ }
93
+ ),
94
+ # No default supervised_keys (as we have to pass both question
95
+ # and context as input).
96
+ supervised_keys=None,
97
+ homepage=_HOMEPAGE,
98
+ citation=_CITATION,
99
+ )
100
+
101
+ def _split_generators(self, dl_manager):
102
+ """Returns SplitGenerators."""
103
+ urls_to_download = {
104
+ "train": f"{_URL}{_TRAINING_FILE}",
105
+ "dev": f"{_URL}{_DEV_FILE}",
106
+ "test": f"{_URL}{_TEST_FILE}",
107
+ }
108
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
109
+
110
+ return [
111
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
112
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
113
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
114
+ ]
115
+
116
+ def _generate_examples(self, filepath):
117
+ """This function returns the examples in the raw (text) form."""
118
+ logger.info("generating examples from = %s", filepath)
119
+ with open(filepath, encoding="utf-8") as f:
120
+ sqac_data = json.load(f)
121
+ for article in sqac_data["data"]:
122
+ title = article.get("title", "").strip()
123
+ for paragraph in article["paragraphs"]:
124
+ context = paragraph["context"].strip()
125
+ for qa in paragraph["qas"]:
126
+ question = qa["question"].strip()
127
+ id_ = qa["id"]
128
+
129
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
130
+ answers = [answer["text"].strip() for answer in qa["answers"]]
131
+
132
+ # Features currently used are "context", "question", and "answers".
133
+ # Others are extracted here for the ease of future expansions.
134
+ yield id_, {
135
+ "title": title,
136
+ "context": context,
137
+ "question": question,
138
+ "id": id_,
139
+ "answers": {
140
+ "answer_start": answer_starts,
141
+ "text": answers,
142
+ },
143
+ }
dev.json ADDED
The diff for this file is too large to render. See raw diff
 
test.json ADDED
The diff for this file is too large to render. See raw diff
 
train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d5c76176646e2ae7bdcd8b5ec6f18349102a9363aa25ad7d0e48262d7480d43
3
+ size 11042089