Datasets:
Size:
1M<n<10M
ArXiv:
Tags:
programming-language
code
program-synthesis
automatic-code-repair
code-retrieval
code-translation
License:
update builder_script
Browse files- xCodeEval.py +29 -25
xCodeEval.py
CHANGED
@@ -8,6 +8,7 @@ import datasets
|
|
8 |
import urllib.parse
|
9 |
import textwrap
|
10 |
from multiprocessing import Pool, cpu_count
|
|
|
11 |
logger = datasets.logging.get_logger(__name__)
|
12 |
|
13 |
_CITATION = """\
|
@@ -187,6 +188,7 @@ _CUSTOM_FEATURES = {
|
|
187 |
"insert_cnt": datasets.Value("int32"),
|
188 |
"fix_ops_cnt": datasets.Value("int32"),
|
189 |
"difficulty": datasets.Value("int32"),
|
|
|
190 |
# "prob_desc_sample_inputs": datasets.Sequence(datasets.Value("string")),
|
191 |
# "prob_desc_sample_outputs": datasets.Sequence(datasets.Value("string")),
|
192 |
}
|
@@ -264,7 +266,6 @@ _TEXT_FEATURES = {
|
|
264 |
"code_compilation": {
|
265 |
"lang",
|
266 |
"source_code",
|
267 |
-
"tags",
|
268 |
"compilation_error",
|
269 |
"code_uid",
|
270 |
"src_uid",
|
@@ -282,9 +283,9 @@ _TEXT_FEATURES = {
|
|
282 |
}
|
283 |
|
284 |
for task in _PROBLEM_DESC_REQ_TASK:
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
_SHARDS = {
|
289 |
"apr":[
|
290 |
{"type":"directory","name":"apr/test","contents":[
|
@@ -1956,30 +1957,17 @@ class xCodeEval(datasets.GeneratorBasedBuilder):
|
|
1956 |
|
1957 |
def _info(self):
|
1958 |
features = {
|
1959 |
-
|
1960 |
-
|
1961 |
-
|
1962 |
if self.config.name in _UNIT_TEST_REQ_TASK:
|
1963 |
features["hidden_unit_test"] = datasets.Value("string")
|
1964 |
-
# features["hidden_unit_test"] = datasets.features.Sequence(
|
1965 |
-
# {
|
1966 |
-
# "input": datasets.Value("string"),
|
1967 |
-
# "output": datasets.Sequence(datasets.Value("string")),
|
1968 |
-
# }
|
1969 |
-
# ),
|
1970 |
return datasets.DatasetInfo(
|
1971 |
-
# This is the description that will appear on the datasets page.
|
1972 |
description=self.config.description + "\n" + _DESCRIPTION,
|
1973 |
-
# datasets.features.FeatureConnectors
|
1974 |
features=datasets.Features(
|
1975 |
features
|
1976 |
-
# These are the features of your dataset like images, labels ...
|
1977 |
),
|
1978 |
-
# If there's a common (input, target) tuple from the features,
|
1979 |
-
# specify them here. They'll be used if as_supervised=True in
|
1980 |
-
# builder.as_dataset.
|
1981 |
supervised_keys=None,
|
1982 |
-
# Homepage of the dataset for documentation
|
1983 |
homepage="https://github.com/ntunlp/xCodeEval",
|
1984 |
citation=self.config.citation,
|
1985 |
)
|
@@ -1989,8 +1977,10 @@ class xCodeEval(datasets.GeneratorBasedBuilder):
|
|
1989 |
task_name = self.config.name
|
1990 |
if task_name == "retrieval_corpus":
|
1991 |
TEST_FILE_NAMES = get_file_name(task_name, "")
|
1992 |
-
test_urls = [ BASE_URL.format(task_name=task_name, split="
|
|
|
1993 |
test_downloaded_files = dl_manager.download(test_urls)
|
|
|
1994 |
return [
|
1995 |
datasets.SplitGenerator(
|
1996 |
name=datasets.Split.TEST,
|
@@ -2003,13 +1993,22 @@ class xCodeEval(datasets.GeneratorBasedBuilder):
|
|
2003 |
TRAIN_FILE_NAMES = get_file_name(task_name, "train")
|
2004 |
VALIDATION_FILE_NAMES = get_file_name(task_name, "validation")
|
2005 |
TEST_FILE_NAMES = get_file_name(task_name, "test")
|
|
|
2006 |
train_urls = [ BASE_URL.format(task_name=task_name, split="train", file_name=urllib.parse.quote(file_name)) for file_name in TRAIN_FILE_NAMES]
|
2007 |
validation_urls = [ BASE_URL.format(task_name=task_name, split="validation", file_name=urllib.parse.quote(file_name)) for file_name in VALIDATION_FILE_NAMES]
|
2008 |
test_urls = [ BASE_URL.format(task_name=task_name, split="test", file_name=urllib.parse.quote(file_name)) for file_name in TEST_FILE_NAMES]
|
2009 |
|
|
|
|
|
|
|
|
|
|
|
2010 |
train_downloaded_files = dl_manager.download(train_urls)
|
2011 |
validation_downloaded_files = dl_manager.download(validation_urls)
|
2012 |
test_downloaded_files = dl_manager.download(test_urls)
|
|
|
|
|
|
|
2013 |
|
2014 |
prob_desc_file, unit_test_db_file = None, None
|
2015 |
if task_name in _PROBLEM_DESC_REQ_TASK:
|
@@ -2059,9 +2058,10 @@ class xCodeEval(datasets.GeneratorBasedBuilder):
|
|
2059 |
with open(unit_test_db_file) as rp:
|
2060 |
unit_test_db = json.load(rp)
|
2061 |
|
|
|
2062 |
for filepath in filepaths:
|
2063 |
with open(filepath) as rp:
|
2064 |
-
for
|
2065 |
sample = json.loads(line)
|
2066 |
for pre_feature in list(sample.keys()):
|
2067 |
if pre_feature not in _TEXT_FEATURES[task_name]:
|
@@ -2073,20 +2073,24 @@ class xCodeEval(datasets.GeneratorBasedBuilder):
|
|
2073 |
sample["hidden_unit_test"] = ""
|
2074 |
if task_name not in _PROBLEM_DESC_REQ_TASK or problem_descriptions is None:
|
2075 |
yield idx, sample # if problem_description_file is None then unit_test_db_file should be None
|
|
|
2076 |
continue
|
2077 |
|
2078 |
-
|
2079 |
src_uid = sample["src_uid"]
|
|
|
2080 |
prob_desc = problem_descriptions[src_uid]
|
2081 |
for key, ckey in _PROB_DESC_TEXT_FEATURES.items():
|
2082 |
if ckey == "prob_desc_sample_inputs" or ckey == "prob_desc_sample_outputs":
|
2083 |
sample[ckey] = json.dumps(prob_desc[key])
|
2084 |
else:
|
2085 |
sample[ckey] = prob_desc[key]
|
2086 |
-
|
2087 |
if task_name not in _UNIT_TEST_REQ_TASK or unit_test_db is None:
|
2088 |
yield idx, sample
|
|
|
2089 |
continue
|
2090 |
|
2091 |
sample["hidden_unit_test"] = json.dumps(unit_test_db[src_uid])
|
2092 |
-
yield idx, sample
|
|
|
|
8 |
import urllib.parse
|
9 |
import textwrap
|
10 |
from multiprocessing import Pool, cpu_count
|
11 |
+
import os
|
12 |
logger = datasets.logging.get_logger(__name__)
|
13 |
|
14 |
_CITATION = """\
|
|
|
188 |
"insert_cnt": datasets.Value("int32"),
|
189 |
"fix_ops_cnt": datasets.Value("int32"),
|
190 |
"difficulty": datasets.Value("int32"),
|
191 |
+
"compilation_error": datasets.Value("bool"),
|
192 |
# "prob_desc_sample_inputs": datasets.Sequence(datasets.Value("string")),
|
193 |
# "prob_desc_sample_outputs": datasets.Sequence(datasets.Value("string")),
|
194 |
}
|
|
|
266 |
"code_compilation": {
|
267 |
"lang",
|
268 |
"source_code",
|
|
|
269 |
"compilation_error",
|
270 |
"code_uid",
|
271 |
"src_uid",
|
|
|
283 |
}
|
284 |
|
285 |
for task in _PROBLEM_DESC_REQ_TASK:
|
286 |
+
_TEXT_FEATURES[task].update(set(_PROB_DESC_TEXT_FEATURES.values()))
|
287 |
+
|
288 |
+
|
289 |
_SHARDS = {
|
290 |
"apr":[
|
291 |
{"type":"directory","name":"apr/test","contents":[
|
|
|
1957 |
|
1958 |
def _info(self):
|
1959 |
features = {
|
1960 |
+
feature: _CUSTOM_FEATURES[feature] if feature in _CUSTOM_FEATURES else datasets.Value("string")
|
1961 |
+
for feature in self.config.text_features
|
1962 |
+
}
|
1963 |
if self.config.name in _UNIT_TEST_REQ_TASK:
|
1964 |
features["hidden_unit_test"] = datasets.Value("string")
|
|
|
|
|
|
|
|
|
|
|
|
|
1965 |
return datasets.DatasetInfo(
|
|
|
1966 |
description=self.config.description + "\n" + _DESCRIPTION,
|
|
|
1967 |
features=datasets.Features(
|
1968 |
features
|
|
|
1969 |
),
|
|
|
|
|
|
|
1970 |
supervised_keys=None,
|
|
|
1971 |
homepage="https://github.com/ntunlp/xCodeEval",
|
1972 |
citation=self.config.citation,
|
1973 |
)
|
|
|
1977 |
task_name = self.config.name
|
1978 |
if task_name == "retrieval_corpus":
|
1979 |
TEST_FILE_NAMES = get_file_name(task_name, "")
|
1980 |
+
test_urls = [ BASE_URL.format(task_name=task_name, split="", file_name=urllib.parse.quote(file_name)).replace("s//", "s/") for file_name in TEST_FILE_NAMES]
|
1981 |
+
# test_urls = [ BASE_URL.format(task_name=task_name, split="", file_name=file_name) for file_name in TEST_FILE_NAMES]
|
1982 |
test_downloaded_files = dl_manager.download(test_urls)
|
1983 |
+
# test_downloaded_files = test_urls
|
1984 |
return [
|
1985 |
datasets.SplitGenerator(
|
1986 |
name=datasets.Split.TEST,
|
|
|
1993 |
TRAIN_FILE_NAMES = get_file_name(task_name, "train")
|
1994 |
VALIDATION_FILE_NAMES = get_file_name(task_name, "validation")
|
1995 |
TEST_FILE_NAMES = get_file_name(task_name, "test")
|
1996 |
+
|
1997 |
train_urls = [ BASE_URL.format(task_name=task_name, split="train", file_name=urllib.parse.quote(file_name)) for file_name in TRAIN_FILE_NAMES]
|
1998 |
validation_urls = [ BASE_URL.format(task_name=task_name, split="validation", file_name=urllib.parse.quote(file_name)) for file_name in VALIDATION_FILE_NAMES]
|
1999 |
test_urls = [ BASE_URL.format(task_name=task_name, split="test", file_name=urllib.parse.quote(file_name)) for file_name in TEST_FILE_NAMES]
|
2000 |
|
2001 |
+
# train_urls = [ BASE_URL.format(task_name=task_name, split="train", file_name=file_name) for file_name in TRAIN_FILE_NAMES]
|
2002 |
+
# validation_urls = [ BASE_URL.format(task_name=task_name, split="validation", file_name=file_name) for file_name in VALIDATION_FILE_NAMES]
|
2003 |
+
# test_urls = [ BASE_URL.format(task_name=task_name, split="test", file_name=file_name) for file_name in TEST_FILE_NAMES]
|
2004 |
+
|
2005 |
+
|
2006 |
train_downloaded_files = dl_manager.download(train_urls)
|
2007 |
validation_downloaded_files = dl_manager.download(validation_urls)
|
2008 |
test_downloaded_files = dl_manager.download(test_urls)
|
2009 |
+
# train_downloaded_files = train_urls
|
2010 |
+
# validation_downloaded_files = validation_urls
|
2011 |
+
# test_downloaded_files = test_urls
|
2012 |
|
2013 |
prob_desc_file, unit_test_db_file = None, None
|
2014 |
if task_name in _PROBLEM_DESC_REQ_TASK:
|
|
|
2058 |
with open(unit_test_db_file) as rp:
|
2059 |
unit_test_db = json.load(rp)
|
2060 |
|
2061 |
+
idx = 0
|
2062 |
for filepath in filepaths:
|
2063 |
with open(filepath) as rp:
|
2064 |
+
for line in rp:
|
2065 |
sample = json.loads(line)
|
2066 |
for pre_feature in list(sample.keys()):
|
2067 |
if pre_feature not in _TEXT_FEATURES[task_name]:
|
|
|
2073 |
sample["hidden_unit_test"] = ""
|
2074 |
if task_name not in _PROBLEM_DESC_REQ_TASK or problem_descriptions is None:
|
2075 |
yield idx, sample # if problem_description_file is None then unit_test_db_file should be None
|
2076 |
+
idx += 1
|
2077 |
continue
|
2078 |
|
2079 |
+
|
2080 |
src_uid = sample["src_uid"]
|
2081 |
+
# if problem_description_file is not None, the sample has `src_uid`
|
2082 |
prob_desc = problem_descriptions[src_uid]
|
2083 |
for key, ckey in _PROB_DESC_TEXT_FEATURES.items():
|
2084 |
if ckey == "prob_desc_sample_inputs" or ckey == "prob_desc_sample_outputs":
|
2085 |
sample[ckey] = json.dumps(prob_desc[key])
|
2086 |
else:
|
2087 |
sample[ckey] = prob_desc[key]
|
2088 |
+
|
2089 |
if task_name not in _UNIT_TEST_REQ_TASK or unit_test_db is None:
|
2090 |
yield idx, sample
|
2091 |
+
idx += 1
|
2092 |
continue
|
2093 |
|
2094 |
sample["hidden_unit_test"] = json.dumps(unit_test_db[src_uid])
|
2095 |
+
yield idx, sample
|
2096 |
+
idx += 1
|