gabeorlanski
commited on
Commit
•
0749511
1
Parent(s):
be5d9cf
Update bc-mbpp.py
Browse files- bc-mbpp.py +123 -101
bc-mbpp.py
CHANGED
@@ -2,7 +2,6 @@ import json
|
|
2 |
|
3 |
import datasets
|
4 |
|
5 |
-
|
6 |
_DESCRIPTION = """The MBPP dataset in BabelCode format."""
|
7 |
|
8 |
_URL = "https://raw.githubusercontent.com/google-research/babelcode/main/data/hf_datasets/mbpp.jsonl"
|
@@ -46,108 +45,131 @@ _LICENSE = "CC-BY-4.0"
|
|
46 |
|
47 |
_VERSION = "1.0.0"
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
class BCMBPP(datasets.GeneratorBasedBuilder):
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
),
|
61 |
-
] + [
|
62 |
-
datasets.BuilderConfig(
|
63 |
-
name=lang,
|
64 |
-
version=datasets.Version(_VERSION),
|
65 |
-
description=_DESCRIPTION + f" Examples are only in {lang}.",
|
66 |
-
)
|
67 |
-
for lang in _LANGUAGES
|
68 |
]
|
69 |
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
)
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
name=datasets.Split.TRAIN,
|
108 |
-
gen_kwargs={"filepath": data_dir, "split": "train"},
|
109 |
-
),
|
110 |
-
datasets.SplitGenerator(
|
111 |
-
name=datasets.Split.TEST,
|
112 |
-
gen_kwargs={"filepath": data_dir, "split": "test"},
|
113 |
-
),
|
114 |
-
datasets.SplitGenerator(
|
115 |
-
name=datasets.Split.VALIDATION,
|
116 |
-
gen_kwargs={"filepath": data_dir, "split": "validation"},
|
117 |
-
),
|
118 |
-
datasets.SplitGenerator(
|
119 |
-
name=datasets.Split("prompt"),
|
120 |
-
gen_kwargs={"filepath": data_dir, "split": "prompt"},
|
121 |
-
),
|
122 |
-
]
|
123 |
-
|
124 |
-
def _generate_examples(self, filepath, split):
|
125 |
-
""" Yields the examples from the dataset"""
|
126 |
-
with open(filepath, encoding='utf-8') as file:
|
127 |
-
id_ = 0
|
128 |
-
idx_range = None
|
129 |
-
if split == 'test':
|
130 |
-
idx_range=(11,510)
|
131 |
-
elif split == "train":
|
132 |
-
idx_range=(601,974)
|
133 |
-
elif split == "validation":
|
134 |
-
idx_range=(511, 600)
|
135 |
-
else:
|
136 |
-
idx_range=(1,10)
|
137 |
-
|
138 |
-
for l in file:
|
139 |
-
if not l.strip():
|
140 |
-
continue
|
141 |
-
d = json.loads(l)
|
142 |
-
|
143 |
-
if self.config.name != 'all' and d['language'] != self.config.name:
|
144 |
-
continue
|
145 |
-
|
146 |
-
idx = int(d['title'].split('/')[-1])
|
147 |
-
if not (idx_range[0] <= idx <= idx_range[1]):
|
148 |
-
continue
|
149 |
-
d['test_list'] = json.dumps(d['test_list'])
|
150 |
-
d['solution'] = d.pop('solution_python')
|
151 |
-
yield id_, d
|
152 |
-
id_+=1
|
153 |
-
|
|
|
2 |
|
3 |
import datasets
|
4 |
|
|
|
5 |
_DESCRIPTION = """The MBPP dataset in BabelCode format."""
|
6 |
|
7 |
_URL = "https://raw.githubusercontent.com/google-research/babelcode/main/data/hf_datasets/mbpp.jsonl"
|
|
|
45 |
|
46 |
_VERSION = "1.0.0"
|
47 |
|
48 |
+
_QUESTION_INFO_KEYS = {
|
49 |
+
"entry_fn_name",
|
50 |
+
"entry_cls_name",
|
51 |
+
"test_code",
|
52 |
+
"test_list",
|
53 |
+
"test_case_ids",
|
54 |
+
}
|
55 |
|
56 |
class BCMBPP(datasets.GeneratorBasedBuilder):
|
57 |
+
"""BC-MBPP"""
|
58 |
+
|
59 |
+
VERSION = datasets.Version(_VERSION)
|
60 |
+
|
61 |
+
BUILDER_CONFIGS = [
|
62 |
+
datasets.BuilderConfig(
|
63 |
+
name="all",
|
64 |
+
version=datasets.Version(_VERSION),
|
65 |
+
description=_DESCRIPTION,
|
66 |
+
),
|
67 |
+
] + [
|
68 |
+
datasets.BuilderConfig(
|
69 |
+
name=lang,
|
70 |
+
version=datasets.Version(_VERSION),
|
71 |
+
description=_DESCRIPTION + f" Examples are only in {lang}.",
|
72 |
+
) for lang in _LANGUAGES
|
73 |
+
]
|
74 |
+
|
75 |
+
DEFAULT_CONFIG_NAME = "all"
|
76 |
+
|
77 |
+
def _info(self):
|
78 |
+
features = datasets.Features({
|
79 |
+
"qid": datasets.Value("string"),
|
80 |
+
"title": datasets.Value("string"),
|
81 |
+
"language": datasets.Value("string"),
|
82 |
+
"text": datasets.Value("string"),
|
83 |
+
"signature_with_docstring": datasets.Value("string"),
|
84 |
+
"signature": datasets.Value("string"),
|
85 |
+
"arguments": datasets.Sequence(datasets.Value("string")),
|
86 |
+
"solution": datasets.Value("string"),
|
87 |
+
"question_info":
|
88 |
+
datasets.Features({
|
89 |
+
k:datasets.Value(dtype="string")
|
90 |
+
for k in _QUESTION_INFO_KEYS
|
91 |
+
})
|
92 |
+
})
|
93 |
+
description = _DESCRIPTION
|
94 |
+
if self.config.name != 'all':
|
95 |
+
description = _DESCRIPTION + f" Examples are only in {self.config.name}."
|
96 |
+
return datasets.DatasetInfo(
|
97 |
+
description=description,
|
98 |
+
features=features,
|
99 |
+
supervised_keys=None,
|
100 |
+
homepage=_HOMEPAGE,
|
101 |
+
license=_LICENSE,
|
102 |
+
citation=_CITATION,
|
103 |
+
)
|
104 |
+
|
105 |
+
def _split_generators(self, dl_manager):
|
106 |
+
"""Returns SplitGenerators."""
|
107 |
+
data_dir = dl_manager.download_and_extract(_URL)
|
108 |
+
return [
|
109 |
+
datasets.SplitGenerator(
|
110 |
+
name=datasets.Split.TRAIN,
|
111 |
+
gen_kwargs={
|
112 |
+
"filepath": data_dir,
|
113 |
+
"split": "train"
|
114 |
+
},
|
115 |
+
),
|
116 |
+
datasets.SplitGenerator(
|
117 |
+
name=datasets.Split.TEST,
|
118 |
+
gen_kwargs={
|
119 |
+
"filepath": data_dir,
|
120 |
+
"split": "test"
|
121 |
+
},
|
122 |
+
),
|
123 |
+
datasets.SplitGenerator(
|
124 |
+
name=datasets.Split.VALIDATION,
|
125 |
+
gen_kwargs={
|
126 |
+
"filepath": data_dir,
|
127 |
+
"split": "validation"
|
128 |
+
},
|
129 |
+
),
|
130 |
+
datasets.SplitGenerator(
|
131 |
+
name=datasets.Split("prompt"),
|
132 |
+
gen_kwargs={
|
133 |
+
"filepath": data_dir,
|
134 |
+
"split": "prompt"
|
135 |
+
},
|
136 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
]
|
138 |
|
139 |
+
def _generate_examples(self, filepath, split):
|
140 |
+
""" Yields the examples from the dataset"""
|
141 |
+
with open(filepath, encoding='utf-8') as file:
|
142 |
+
id_ = 0
|
143 |
+
idx_range = None
|
144 |
+
if split == 'test':
|
145 |
+
idx_range = (11, 510)
|
146 |
+
elif split == "train":
|
147 |
+
idx_range = (601, 974)
|
148 |
+
elif split == "validation":
|
149 |
+
idx_range = (511, 600)
|
150 |
+
else:
|
151 |
+
idx_range = (1, 10)
|
152 |
+
|
153 |
+
for l in file:
|
154 |
+
if not l.strip():
|
155 |
+
continue
|
156 |
+
d = json.loads(l)
|
157 |
+
|
158 |
+
if self.config.name != 'all' and d['language'] != self.config.name:
|
159 |
+
continue
|
160 |
+
|
161 |
+
idx = int(d['title'].split('/')[-1])
|
162 |
+
if not (idx_range[0] <= idx <= idx_range[1]):
|
163 |
+
continue
|
164 |
+
|
165 |
+
question_info = {}
|
166 |
+
for k in _QUESTION_INFO_KEYS:
|
167 |
+
question_info[k] = d.pop(k)
|
168 |
+
|
169 |
+
question_info['test_list'] = json.dumps(question_info['test_list'])
|
170 |
+
|
171 |
+
d['question_info'] = question_info
|
172 |
+
|
173 |
+
d['solution'] = d.pop('solution_python')
|
174 |
+
yield id_, d
|
175 |
+
id_ += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|