Crystina commited on
Commit
6327fcf
1 Parent(s): e1d34f0
.gitattributes CHANGED
@@ -49,3 +49,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
49
  *.jpg filter=lfs diff=lfs merge=lfs -text
50
  *.jpeg filter=lfs diff=lfs merge=lfs -text
51
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
49
  *.jpg filter=lfs diff=lfs merge=lfs -text
50
  *.jpeg filter=lfs diff=lfs merge=lfs -text
51
  *.webp filter=lfs diff=lfs merge=lfs -text
52
+ dev filter=lfs diff=lfs merge=lfs -text
53
+ test/ filter=lfs diff=lfs merge=lfs -text
54
+ train/ filter=lfs diff=lfs merge=lfs -text
55
+ train/xor-t2e-100w.jsonl filter=lfs diff=lfs merge=lfs -text
dev/xor_dev_full_v1_1.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
dev/xor_dev_retrieve_eng_span_v1_1.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
test/xor_test_full_q_only_v1_1.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
test/xor_test_retrieve_eng_span_q_only_v1_1.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
train/.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ xor-t2e-100w.jsonl.gz filter=lfs diff=lfs merge=lfs -text
train/xor-t2e-100w.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aef9dc8c6bbf6fe94d5c751af2601cf48b4561a5712381d12178c0a1eba9e6e0
3
+ size 312241526
xor-tydi.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Wikipedia NQ dataset."""
18
+
19
+ import json
20
+
21
+ import datasets
22
+
23
+ _CITATION = """
24
+ @inproceedings{xorqa,
25
+ title = {{XOR} {QA}: Cross-lingual Open-Retrieval Question Answering},
26
+ author = {Akari Asai and Jungo Kasai and Jonathan H. Clark and Kenton Lee and Eunsol Choi and Hannaneh Hajishirzi},
27
+ booktitle={NAACL-HLT},
28
+ year = {2021}
29
+ }
30
+ """
31
+
32
+ _DESCRIPTION = "dataset load script for Wikipedia NQ"
33
+
34
+ base = "/home/czhang/src/task-sparse/tevatron/hgf_datasets/xor-tydi"
35
+ _DATASET_URLS = {
36
+ 'train': f"{base}/xor-t2e-100w.jsonl",
37
+ 'dev': f"{base}",
38
+ 'test': f"{base}",
39
+ }
40
+
41
+
42
+ class XORTyDi(datasets.GeneratorBasedBuilder):
43
+ VERSION = datasets.Version("0.0.1")
44
+
45
+ BUILDER_CONFIGS = [
46
+ datasets.BuilderConfig(version=VERSION,
47
+ description="XOR-TyDI train/dev/test datasets"),
48
+ ]
49
+
50
+ def _info(self):
51
+ features = datasets.Features({
52
+ 'query_id': datasets.Value('string'),
53
+ 'query': datasets.Value('string'),
54
+ 'answers': [datasets.Value('string')],
55
+ 'positive_passages': [
56
+ {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
57
+ 'title': datasets.Value('string')}
58
+ ],
59
+ 'negative_passages': [
60
+ {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
61
+ 'title': datasets.Value('string')}
62
+ ],
63
+ })
64
+ return datasets.DatasetInfo(
65
+ # This is the description that will appear on the datasets page.
66
+ description=_DESCRIPTION,
67
+ # This defines the different columns of the dataset and their types
68
+ features=features, # Here we define them above because they are different between the two configurations
69
+ supervised_keys=None,
70
+ # Homepage of the dataset for documentation
71
+ homepage="",
72
+ # License for the dataset if available
73
+ license="",
74
+ # Citation for the dataset
75
+ citation=_CITATION,
76
+ )
77
+
78
+ def _split_generators(self, dl_manager):
79
+ if self.config.data_files:
80
+ downloaded_files = self.config.data_files
81
+ else:
82
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
83
+ splits = [
84
+ datasets.SplitGenerator(
85
+ name=split,
86
+ gen_kwargs={
87
+ "files": [downloaded_files[split]] if isinstance(downloaded_files[split], str) else downloaded_files[split],
88
+ },
89
+ ) for split in downloaded_files
90
+ ]
91
+ return splits
92
+
93
+ def _generate_examples(self, files):
94
+ """Yields examples."""
95
+ def process_train_entry(data):
96
+ if data.get('negative_passages') is None:
97
+ data['negative_passages'] = []
98
+ if data.get('positive_passages') is None:
99
+ data['positive_passages'] = []
100
+ if data.get('answers') is None:
101
+ data['answers'] = []
102
+ return data['query_id'], data
103
+
104
+ def process_dev_test_entry(data):
105
+ return {
106
+ "query_id": data["id"],
107
+ "query": data["question"],
108
+ "answers": data.get("answers", [])
109
+ }
110
+
111
+ for filepath in files:
112
+ with open(filepath, encoding="utf-8") as f:
113
+ for line in f:
114
+ data = json.loads(line)
115
+
116
+ if "train" in filepath:
117
+ yield process_train_entry(data)
118
+ else:
119
+ yield process_dev_test_entry(data)