Datasets:

Languages:
English
ArXiv:
License:
julianschnitzler commited on
Commit
75bce8d
1 Parent(s): 02c0e76

fix dataset loading

Browse files
README.md CHANGED
@@ -11,8 +11,8 @@ size_categories:
11
  - 1K<n<10K
12
  configs:
13
  - config_name: verified
14
- data_files: "with_human_verification.json"
15
  default: true
16
  - config_name: unverified
17
- data_files: "without_human_verification.json"
18
  ---
 
11
  - 1K<n<10K
12
  configs:
13
  - config_name: verified
14
+ data_files: "data/with_human_verification.json"
15
  default: true
16
  - config_name: unverified
17
+ data_files: "data/without_human_verification.json"
18
  ---
with_human_verification.json → data/with_human_verification.json RENAMED
File without changes
without_human_verification.json → data/without_human_verification.json RENAMED
File without changes
morehopqa.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ from datasets import load_dataset
3
+ import json
4
+ import os
5
+
6
+ class MoreHopQA(datasets.GeneratorBasedBuilder):
7
+ def _info(self):
8
+ return datasets.DatasetInfo(
9
+ features=datasets.Features({
10
+ "question": datasets.Value("string"),
11
+ "context": datasets.Sequence({
12
+ "title": datasets.Value("string"),
13
+ "paragraphs": datasets.Sequence(datasets.Value("string"))
14
+ }),
15
+ "answer": datasets.Value("string"),
16
+ "previous_question": datasets.Value("string"),
17
+ "previous_answer": datasets.Value("string"),
18
+ "question_decomposition": datasets.Sequence({
19
+ "sub_id": datasets.Value("string"),
20
+ "question": datasets.Value("string"),
21
+ "answer": datasets.Value("string"),
22
+ "paragraph_support_title": datasets.Value("string")
23
+ }),
24
+ "question_on_last_hop": datasets.Value("string"),
25
+ "answer_type": datasets.Value("string"),
26
+ "previous_answer_type": datasets.Value("string"),
27
+ "no_of_hops": datasets.Value("int32"),
28
+ "reasoning_type": datasets.Value("string"),
29
+ }),
30
+ )
31
+
32
+ def _split_generators(self, dl_manager):
33
+ """Returns SplitGenerators."""
34
+ return [
35
+ datasets.SplitGenerator(
36
+ name="verified",
37
+ gen_kwargs={"filepath": os.path.join("data", "with_human_verification.json"),
38
+ "default": True}
39
+ ),
40
+ datasets.SplitGenerator(
41
+ name="unverified",
42
+ gen_kwargs={"filepath": os.path.join("data", "without_human_verification.json")}
43
+ )
44
+ ]
45
+
46
+ def _generate_examples(self, filepath):
47
+ """Yields examples."""
48
+ with open(filepath, encoding="utf-8") as f:
49
+ data = json.load(f)
50
+ for item in enumerate(data):
51
+ yield item['_id'], {
52
+ "question": item["question"],
53
+ "context": [{
54
+ "title": subitem[0], # title is the first item in the sublist
55
+ "content": subitem[1] # paragraphs are the second item
56
+ } for subitem in item.get("context", [])],
57
+ "answer": item["answer"],
58
+ "previous_question": item["previous_question"],
59
+ "previous_answer": item["previous_answer"],
60
+ "question_decomposition": [{
61
+ "sub_id": subitem["sub_id"],
62
+ "question": subitem["question"],
63
+ "answer": subitem["answer"],
64
+ "paragraph_support_title": subitem["paragraph_support_title"]
65
+ } for subitem in item["question_decomposition"]],
66
+ "question_on_last_hop": item["ques_on_last_hop"],
67
+ "answer_type": item["answer_type"],
68
+ "previous_answer_type":item["previous_answer_type"],
69
+ "no_of_hops": item["no_of_hops"],
70
+ "reasoning_type": item["reasoning_type"],
71
+ }