PiC commited on
Commit
7098bc3
1 Parent(s): 3cb23aa

Create phrase_retrieval.py

Browse files
Files changed (1) hide show
  1. phrase_retrieval.py +144 -0
phrase_retrieval.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """PiC: A Phrase-in-Context Dataset for Phrase Understanding and Semantic Search."""
18
+
19
+
20
+ import json
21
+ import os.path
22
+
23
+ import datasets
24
+ from datasets.tasks import QuestionAnsweringExtractive
25
+
26
+
27
+ logger = datasets.logging.get_logger(__name__)
28
+
29
+
30
+ _CITATION = """\
31
+
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+
36
+ """
37
+
38
+ _HOMEPAGE = ""
39
+
40
+ _LICENSE = "CC-BY-4.0"
41
+
42
+ _URL = "https://auburn.edu/~tmp0038/PiC/"
43
+ _SPLITS = {
44
+ "train": "train-v1.0.json",
45
+ "dev": "dev-v1.0.json",
46
+ "test": "test-v1.0.json",
47
+ }
48
+
49
+ _PR_PASS = "PR-pass"
50
+ _PR_PAGE = "PR-page"
51
+
52
+
53
+ class PiCConfig(datasets.BuilderConfig):
54
+ """BuilderConfig for Phrase Retrieval in PiC."""
55
+
56
+ def __init__(self, **kwargs):
57
+ """BuilderConfig for Phrase Retrieval in PiC.
58
+ Args:
59
+ **kwargs: keyword arguments forwarded to super.
60
+ """
61
+ super(PiCConfig, self).__init__(**kwargs)
62
+
63
+
64
+ class PhraseRetrieval(datasets.GeneratorBasedBuilder):
65
+ """Phrase Retrieval in PiC dataset. Version 1.0."""
66
+
67
+ BUILDER_CONFIGS = [PiCConfig(
68
+ name=_PR_PASS,
69
+ version=datasets.Version("1.0.0"),
70
+ description="The PiC Dataset for Phrase Retrieval at short passage level (~11 sentences)"
71
+ ),
72
+ PiCConfig(
73
+ name=_PR_PAGE,
74
+ version=datasets.Version("1.0.0"),
75
+ description="The PiC Dataset for Phrase Retrieval at Wiki page level"
76
+ ),
77
+ ]
78
+
79
+ def _info(self):
80
+ return datasets.DatasetInfo(
81
+ description=_DESCRIPTION,
82
+ features=datasets.Features(
83
+ {
84
+ "id": datasets.Value("string"),
85
+ "title": datasets.Value("string"),
86
+ "context": datasets.Value("string"),
87
+ "question": datasets.Value("string"),
88
+ "answers": datasets.features.Sequence(
89
+ {
90
+ "text": datasets.Sequence(datasets.Value("string")),
91
+ "answer_start": datasets.Sequence(datasets.Value("int32")),
92
+ }
93
+ ),
94
+ }
95
+ ),
96
+ # No default supervised_keys (as we have to pass both question and context as input).
97
+ supervised_keys=None,
98
+ homepage=_HOMEPAGE,
99
+ license=_LICENSE,
100
+ citation=_CITATION,
101
+ task_templates=[
102
+ QuestionAnsweringExtractive(
103
+ question_column="question", context_column="context", answers_column="answers"
104
+ )
105
+ ],
106
+ )
107
+
108
+ def _split_generators(self, dl_manager):
109
+
110
+ urls_to_download = {
111
+ "train": os.path.join(_URL, self.config.name, _SPLITS["train"]),
112
+ "dev": os.path.join(_URL, self.config.name, _SPLITS["dev"]),
113
+ "test": os.path.join(_URL, self.config.name, _SPLITS["test"])
114
+ }
115
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
116
+
117
+ return [
118
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
119
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
120
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
121
+ ]
122
+
123
+ def _generate_examples(self, filepath):
124
+ """This function returns the examples in the raw (text) form."""
125
+ logger.info("generating examples from = %s", filepath)
126
+ key = 0
127
+ with open(filepath, encoding="utf-8") as f:
128
+ pic_pr = json.load(f)
129
+ for example in pic_pr["data"]:
130
+ title = example.get("title", "")
131
+
132
+ # Features currently used are "context", "question", and "answers".
133
+ # Others are extracted here for the ease of future expansions.
134
+ yield key, {
135
+ "title": title,
136
+ "context": example["context"],
137
+ "question": example["question"],
138
+ "id": example["id"],
139
+ "answers": {
140
+ "answer_start": example["answers"]["answer_start"],
141
+ "text": example["answers"]["text"],
142
+ },
143
+ }
144
+ key += 1