Datasets:
BAAI
/

Languages:
code
ArXiv:
Tags:
code
License:
bowen92 commited on
Commit
2866ccc
1 Parent(s): bfb7f52

dataset script file

Browse files
Files changed (1) hide show
  1. taco.py +145 -0
taco.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """APPS dataset."""
16
+
17
+ import json
18
+ import datasets
19
+
20
+
21
+ _REPO_NAME = "BAAI/TACO"
22
+
23
+ _CITATION = """
24
+ """
25
+
26
+ _DESCRIPTION = """
27
+ TACO is a benchmark for Python code generation, it includes 25443 problems and 1000 problems for train and test splits.
28
+ """
29
+
30
+ _HOMEPAGE = "https://github.com/FlagOpen/TACO"
31
+ _DIFFICULTY = ["EASY", "MEDIUM", "MEDIUM_HARD", "HARD", "VERY_HARD"]
32
+ _DIFFICULTY_CONFIGS = ["ALL"] + _DIFFICULTY
33
+ _SKILL = ['Data structures', 'Sorting', 'Range queries', 'Complete search', 'Amortized analysis', 'Dynamic programming', 'Bit manipulation', 'Greedy algorithms']
34
+ _SKILL_CONFIGS = ["ALL"] + _SKILL
35
+ _URLS = {
36
+ "train": ['train/data-00000-of-00009.arrow', 'train/data-00001-of-00009.arrow', 'train/data-00002-of-00009.arrow', 'train/data-00003-of-00009.arrow', 'train/data-00004-of-00009.arrow', 'train/data-00005-of-00009.arrow', 'train/data-00006-of-00009.arrow', 'train/data-00007-of-00009.arrow', 'train/data-00008-of-00009.arrow'],
37
+ "test": ['test/data-00000-of-00001.arrow'],
38
+ }
39
+
40
+
41
+ class TACOConfig(datasets.BuilderConfig):
42
+ """BuilderConfig for the TACO dataset."""
43
+
44
+ def __init__(self, *args, difficulties=["ALL"], skills=["ALL"], **kwargs):
45
+ """BuilderConfig for the APPS Code dataset.
46
+
47
+ Args:
48
+ difficulties (:obj:`List[str]`): List of problem difficulty levels to load.
49
+ skills (:obj:`List[str]`): List of algorithm skills of problems to load.
50
+ **kwargs: keyword arguments forwarded to super.
51
+ """
52
+ if "ALL" in difficulties:
53
+ assert len(difficulties) == 1
54
+ self.filter_difficulties = False
55
+ else:
56
+ self.filter_difficulties = True
57
+ if "ALL" in skills:
58
+ assert len(skills) == 1
59
+ self.filter_skills = False
60
+ else:
61
+ self.filter_skills = True
62
+
63
+ if self.filter_difficulties:
64
+ subset_name = '+'.join(sorted(difficulties))
65
+ assert not self.filter_skills, "Not supported to filter difficulties and skills together."
66
+ elif self.filter_skills:
67
+ subset_name = '+'.join(sorted(skills))
68
+ else:
69
+ subset_name = 'ALL'
70
+
71
+ super().__init__(
72
+ *args,
73
+ name=subset_name,
74
+ **kwargs,
75
+ )
76
+
77
+ self.subsets = {"difficulties": difficulties, "skills": skills}
78
+
79
+
80
+ class TACO(datasets.GeneratorBasedBuilder):
81
+ """TACO dataset."""
82
+
83
+ VERSION = datasets.Version("1.0.0")
84
+
85
+ BUILDER_CONFIG_CLASS = TACOConfig
86
+ BUILDER_CONFIGS = [
87
+ TACOConfig(difficulties=[level]) for level in _DIFFICULTY_CONFIGS
88
+ ] + [
89
+ TACOConfig(skills=[skill]) for skill in _SKILL_CONFIGS if skill!='ALL'
90
+ ]
91
+ DEFAULT_CONFIG_NAME = "ALL"
92
+
93
+ def _info(self):
94
+ return datasets.DatasetInfo(
95
+ description=_DESCRIPTION,
96
+ features=datasets.Features({
97
+ 'question': datasets.Value(dtype='string', id=None),
98
+ 'solutions': datasets.Value(dtype='string', id=None),
99
+ 'starter_code': datasets.Value(dtype='string', id=None),
100
+ 'input_output': datasets.Value(dtype='string', id=None),
101
+ 'difficulty': datasets.Value(dtype='string', id=None),
102
+ 'raw_tags': datasets.Value(dtype='string', id=None),
103
+ 'name': datasets.Value(dtype='string', id=None),
104
+ 'source': datasets.Value(dtype='string', id=None),
105
+ 'tags': datasets.Value(dtype='string', id=None),
106
+ 'skill_types': datasets.Value(dtype='string', id=None),
107
+ 'url': datasets.Value(dtype='string', id=None),
108
+ 'Expected Auxiliary Space': datasets.Value(dtype='string', id=None),
109
+ 'time_limit': datasets.Value(dtype='string', id=None),
110
+ 'date': datasets.Value(dtype='string', id=None),
111
+ 'picture_num': datasets.Value(dtype='string', id=None),
112
+ 'memory_limit': datasets.Value(dtype='string', id=None),
113
+ 'Expected Time Complexity': datasets.Value(dtype='string', id=None),
114
+ }),
115
+ supervised_keys=None,
116
+ citation=_CITATION,
117
+ homepage=_HOMEPAGE,
118
+ license="MIT License",
119
+
120
+ )
121
+
122
+ def _split_generators(self, dl_manager):
123
+
124
+ downloaded_files = _URLS
125
+
126
+ return [
127
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
128
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
129
+ ]
130
+
131
+ def _generate_examples(self, filepath):
132
+ key = 0
133
+ dataset = datasets.concatenate_datasets([datasets.Dataset.from_file(file) for file in filepath])
134
+ for idx, data in enumerate(dataset):
135
+ difficulty = data['difficulty']
136
+ skills = eval(data['skill_types'])
137
+ if self.config.filter_difficulties and not difficulty in self.config.subsets['difficulties']:
138
+ continue
139
+ if self.config.filter_skills:
140
+ valid_skills = self.config.subsets['skills']
141
+ if not bool(set(valid_skills) & set(skills)):
142
+ continue
143
+
144
+ yield key, {k:v for k, v in data.items() if k!='eval_topic'}
145
+ key += 1