Datasets:
Size:
1M<n<10M
ArXiv:
Tags:
programming-language
code
program-synthesis
automatic-code-repair
code-retrieval
code-translation
License:
update data loader
Browse files- xCodeEval.py +29 -14
xCodeEval.py
CHANGED
@@ -6,6 +6,7 @@ import zipfile
|
|
6 |
from collections import defaultdict
|
7 |
import datasets
|
8 |
import textwrap
|
|
|
9 |
logger = datasets.logging.get_logger(__name__)
|
10 |
|
11 |
_CITATION = """\
|
@@ -185,8 +186,8 @@ _CUSTOM_FEATURES = {
|
|
185 |
"insert_cnt": datasets.Value("int32"),
|
186 |
"fix_ops_cnt": datasets.Value("int32"),
|
187 |
"difficulty": datasets.Value("int32"),
|
188 |
-
"prob_desc_sample_inputs": datasets.Sequence(datasets.Value("string")),
|
189 |
-
"prob_desc_sample_outputs": datasets.Sequence(datasets.Value("string")),
|
190 |
}
|
191 |
|
192 |
_PROB_DESC_TEXT_FEATURES = {
|
@@ -1958,12 +1959,13 @@ class xCodeEval(datasets.GeneratorBasedBuilder):
|
|
1958 |
for feature in self.config.text_features
|
1959 |
}
|
1960 |
if self.config.name in _UNIT_TEST_REQ_TASK:
|
1961 |
-
features["hidden_unit_test"] = datasets.
|
1962 |
-
|
1963 |
-
|
1964 |
-
|
1965 |
-
|
1966 |
-
|
|
|
1967 |
return datasets.DatasetInfo(
|
1968 |
# This is the description that will appear on the datasets page.
|
1969 |
description=self.config.description + "\n" + _DESCRIPTION,
|
@@ -2038,7 +2040,7 @@ class xCodeEval(datasets.GeneratorBasedBuilder):
|
|
2038 |
}
|
2039 |
),
|
2040 |
]
|
2041 |
-
|
2042 |
def _generate_examples(self, filepaths, problem_description_file = None, unit_test_db_file = None):
|
2043 |
"""This function returns the examples"""
|
2044 |
task_name = self.config.name
|
@@ -2060,17 +2062,30 @@ class xCodeEval(datasets.GeneratorBasedBuilder):
|
|
2060 |
with open(filepath) as rp:
|
2061 |
for idx, line in enumerate(rp):
|
2062 |
sample = json.loads(line)
|
2063 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2064 |
yield idx, sample # if problem_description_file is None then unit_test_db_file should be None
|
|
|
2065 |
|
2066 |
# if problem_description_file is not None, the sample has `src_uid`
|
2067 |
src_uid = sample["src_uid"]
|
2068 |
prob_desc = problem_descriptions[src_uid]
|
2069 |
for key, ckey in _PROB_DESC_TEXT_FEATURES.items():
|
2070 |
-
|
|
|
|
|
|
|
2071 |
|
2072 |
-
if task_name not in _UNIT_TEST_REQ_TASK
|
2073 |
yield idx, sample
|
2074 |
-
|
2075 |
-
|
|
|
2076 |
yield idx, sample
|
|
|
6 |
from collections import defaultdict
|
7 |
import datasets
|
8 |
import textwrap
|
9 |
+
from multiprocessing import Pool, cpu_count
|
10 |
logger = datasets.logging.get_logger(__name__)
|
11 |
|
12 |
_CITATION = """\
|
|
|
186 |
"insert_cnt": datasets.Value("int32"),
|
187 |
"fix_ops_cnt": datasets.Value("int32"),
|
188 |
"difficulty": datasets.Value("int32"),
|
189 |
+
# "prob_desc_sample_inputs": datasets.Sequence(datasets.Value("string")),
|
190 |
+
# "prob_desc_sample_outputs": datasets.Sequence(datasets.Value("string")),
|
191 |
}
|
192 |
|
193 |
_PROB_DESC_TEXT_FEATURES = {
|
|
|
1959 |
for feature in self.config.text_features
|
1960 |
}
|
1961 |
if self.config.name in _UNIT_TEST_REQ_TASK:
|
1962 |
+
features["hidden_unit_test"] = datasets.Value("string")
|
1963 |
+
# features["hidden_unit_test"] = datasets.features.Sequence(
|
1964 |
+
# {
|
1965 |
+
# "input": datasets.Value("string"),
|
1966 |
+
# "output": datasets.Sequence(datasets.Value("string")),
|
1967 |
+
# }
|
1968 |
+
# ),
|
1969 |
return datasets.DatasetInfo(
|
1970 |
# This is the description that will appear on the datasets page.
|
1971 |
description=self.config.description + "\n" + _DESCRIPTION,
|
|
|
2040 |
}
|
2041 |
),
|
2042 |
]
|
2043 |
+
|
2044 |
def _generate_examples(self, filepaths, problem_description_file = None, unit_test_db_file = None):
|
2045 |
"""This function returns the examples"""
|
2046 |
task_name = self.config.name
|
|
|
2062 |
with open(filepath) as rp:
|
2063 |
for idx, line in enumerate(rp):
|
2064 |
sample = json.loads(line)
|
2065 |
+
for pre_feature in list(sample.keys()):
|
2066 |
+
if pre_feature not in _TEXT_FEATURES[task_name]:
|
2067 |
+
sample.pop(pre_feature)
|
2068 |
+
for feature in _TEXT_FEATURES[task_name]:
|
2069 |
+
if feature not in sample:
|
2070 |
+
sample[feature] = ""
|
2071 |
+
if task_name in _UNIT_TEST_REQ_TASK:
|
2072 |
+
sample["hidden_unit_test"] = ""
|
2073 |
+
if task_name not in _PROBLEM_DESC_REQ_TASK or problem_descriptions is None:
|
2074 |
yield idx, sample # if problem_description_file is None then unit_test_db_file should be None
|
2075 |
+
continue
|
2076 |
|
2077 |
# if problem_description_file is not None, the sample has `src_uid`
|
2078 |
src_uid = sample["src_uid"]
|
2079 |
prob_desc = problem_descriptions[src_uid]
|
2080 |
for key, ckey in _PROB_DESC_TEXT_FEATURES.items():
|
2081 |
+
if ckey == "prob_desc_sample_inputs" or ckey == "prob_desc_sample_outputs":
|
2082 |
+
sample[ckey] = json.dumps(prob_desc[key])
|
2083 |
+
else:
|
2084 |
+
sample[ckey] = prob_desc[key]
|
2085 |
|
2086 |
+
if task_name not in _UNIT_TEST_REQ_TASK or unit_test_db is None:
|
2087 |
yield idx, sample
|
2088 |
+
continue
|
2089 |
+
|
2090 |
+
sample["hidden_unit_test"] = json.dumps(unit_test_db[src_uid])
|
2091 |
yield idx, sample
|