Jordi Armengol-Estape commited on
Commit
24dbd99
1 Parent(s): dbde4ad

first commit

Browse files
.gitattributes CHANGED
@@ -39,3 +39,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
39
  *.mp3 filter=lfs diff=lfs merge=lfs -text
40
  *.ogg filter=lfs diff=lfs merge=lfs -text
41
  *.wav filter=lfs diff=lfs merge=lfs -text
 
 
39
  *.mp3 filter=lfs diff=lfs merge=lfs -text
40
  *.ogg filter=lfs diff=lfs merge=lfs -text
41
  *.wav filter=lfs diff=lfs merge=lfs -text
42
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
exebench.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 ExeBench authors
3
+ # The code required to produce and load this dataset is licensed under MIT License.
4
+ # The code samples included in this dataset keep their own licenses, which can be retrieved via their metadata.
5
+ # Unless required by applicable law or agreed to in writing, software
6
+ # distributed under the License is distributed on an "AS IS" BASIS,
7
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
8
+ # See the License for the specific language governing permissions and
9
+ # limitations under the License.
10
+
11
+ # Please note that the dataset release is still work in progress.
12
+
13
+ """The ExeBench dataset."""
14
+
15
+ import json
16
+
17
+ import datasets
18
+
19
+ from pathlib import Path
20
+
21
+
22
+ _CITATION = """\
23
+ @misc{TODO
24
+ }
25
+ """
26
+
27
+ _DESCRIPTION = """\
28
+ An ML-scale dataset of executable C functions
29
+ """ # TODO: expand
30
+
31
+ _HOMEPAGE = "https://github.com/jordiae/exebench"
32
+
33
+ _LICENSE = "Multiple: see each function license (fields 'ref' and 'path')"
34
+
35
+ _URL = "" # "https://huggingface.co/datasets/jordiae/exebench-test/resolve/main/"
36
+
37
+ _REMOVED_FEATURES = ["doc", "angha_error", "real_error", "angha_io_error", "real_io_error",
38
+ "angha_io_pairs_are_trivial", "real_io_pairs_are_trivial"]
39
+
40
+ _RENAMED_FEATURES = {"angha_deps": "synth_deps", "angha_io_pairs": "synth_io_pairs",
41
+ "angha_exe_wrapper": "synth_exe_wrapper", "angha_iospec": "synth_iospec"}
42
+
43
+ _FEATURES = datasets.Features(
44
+ {
45
+ "path": datasets.Value("string"),
46
+ "func_def": datasets.Value("string"),
47
+ "func_head": datasets.Value("string"),
48
+ "fname": datasets.Value("string"),
49
+ "signature": datasets.Sequence(datasets.Value("string")),
50
+ # "doc": datasets.Value("string"),
51
+ # "angha_error": datasets.Value("string"),
52
+ # "real_error": datasets.Value("string"),
53
+ "asm": datasets.Sequence({'target': datasets.Value("string"), 'code': datasets.Value("string")}), # unflat dict#Optional[Dict[str, Optional[FuncAsm]]] = None
54
+ "synth_deps": datasets.Value("string"),
55
+ "real_deps": datasets.Value("string"),
56
+ "synth_io_pairs": datasets.Sequence({
57
+ "input": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}),
58
+ "output": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}),
59
+ "dummy_funcs": datasets.Value("string"),
60
+ "dummy_funcs_seed": datasets.Value("int64")
61
+ }),
62
+ "real_io_pairs": datasets.Sequence({
63
+ "input": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}),
64
+ "output": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}),
65
+ "dummy_funcs": datasets.Value("string"),
66
+ "dummy_funcs_seed": datasets.Value("int64")
67
+ }),
68
+ # "angha_io_error": datasets.Value("string"),
69
+ # "real_io_error": datasets.Value("string"),
70
+ "synth_exe_wrapper": datasets.Value("string"),
71
+ "real_exe_wrapper": datasets.Value("string"),
72
+ # "angha_io_pairs_are_trivial": datasets.Value("bool"),
73
+ # "real_io_pairs_are_trivial": datasets.Value("bool"),
74
+ "ref": datasets.Value("string"),
75
+ "synth_iospec": datasets.Value("string"), # serialized, TODO: improve
76
+ "real_iospec": datasets.Value("string")
77
+ }
78
+ )
79
+
80
+
81
+ class ExeBenchConfig(datasets.BuilderConfig):
82
+ """BuilderConfig for ExeBench."""
83
+
84
+ def __init__(self, *args, **kwargs):
85
+ """BuilderConfig for The Pile.
86
+ Args:
87
+ **kwargs: keyword arguments forwarded to super.
88
+ """
89
+ super().__init__(
90
+ *args,
91
+ **kwargs,
92
+ )
93
+
94
+
95
+ class ExeBench(datasets.GeneratorBasedBuilder):
96
+ """Semantic Textual Similarity Ca dataset."""
97
+
98
+ BUILDER_CONFIGS = [
99
+ ExeBenchConfig(
100
+ name="ExeBench",
101
+ version=datasets.Version("1.0.1"),
102
+ description="Executable C dataset"
103
+ ),
104
+ ]
105
+
106
+ def _info(self):
107
+ """Give information and typings for the dataset."""
108
+ return datasets.DatasetInfo(
109
+ # This is the description that will appear on the datasets page.
110
+ description=_DESCRIPTION,
111
+ # This defines the different columns of the dataset and their types
112
+ features=_FEATURES,
113
+ # If there's a common (input, target) tuple from the features,
114
+ # specify them here. They'll be used if as_supervised=True in
115
+ # builder.as_dataset.
116
+ supervised_keys=None,
117
+ # Homepage of the dataset for documentation
118
+ homepage=_HOMEPAGE,
119
+ # License for the dataset if available
120
+ license=_LICENSE,
121
+ # Citation for the dataset
122
+ citation=_CITATION,
123
+ )
124
+
125
+ def _split_generators(self, dl_manager):
126
+ """Returns SplitGenerators."""
127
+ urls_to_download = {
128
+ # "train_not_compilable": f"{_URL}train_not_compilable.tar.gz",
129
+ #"train_synth_compilable": f"{_URL}train_synth_compilable.tar.gz",
130
+ # "train_real_compilable": f"{_URL}train_real_compilable.tar.gz",
131
+ #"train_synth_simple_io": f"{_URL}train_synth_simple_io.tar.gz",
132
+ # "train_real_simple_io": f"{_URL}train_real_simple_io.tar.gz",
133
+ #"train_synth_rich_io": f"{_URL}train_synth_rich_io.tar.gz",
134
+ #"valid_synth": f"{_URL}valid_synth.tar.gz",
135
+ # "valid_real": f"{_URL}valid_real.tar.gz",
136
+ "test_synth": f"{_URL}test_synth.tar.gz",
137
+ "test_real": f"{_URL}test_real.tar.gz",
138
+ }
139
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
140
+
141
+ return [
142
+ #datasets.SplitGenerator(name='train_not_compilable',
143
+ # gen_kwargs={"files": downloaded_files["train_not_compilable"]}),
144
+ #datasets.SplitGenerator(name='train_synth_compilable',
145
+ # gen_kwargs={"files": downloaded_files["train_synth_compilable"]}),
146
+ #datasets.SplitGenerator(name='train_real_compilable',
147
+ # gen_kwargs={"files": downloaded_files["train_real_compilable"]}),
148
+ #datasets.SplitGenerator(name='train_synth_simple_io',
149
+ # gen_kwargs={"files": downloaded_files["train_synth_simple_io"]}),
150
+ #datasets.SplitGenerator(name='train_real_simple_io',
151
+ # gen_kwargs={"files": downloaded_files["train_real_simple_io"]}),
152
+ #datasets.SplitGenerator(name='train_synth_rich_io',
153
+ # gen_kwargs={"files": downloaded_files["train_synth_rich_io"]}),
154
+ #datasets.SplitGenerator(name='valid_synth',
155
+ # gen_kwargs={"files": downloaded_files["valid_synth"]}),
156
+ #datasets.SplitGenerator(name='valid_real',
157
+ # gen_kwargs={"files": downloaded_files["valid_real"]}),
158
+ datasets.SplitGenerator(name='test_synth',
159
+ gen_kwargs={"files": downloaded_files["test_synth"]}),
160
+ datasets.SplitGenerator(name='test_real',
161
+ gen_kwargs={"files": downloaded_files["test_real"]}),
162
+ ]
163
+
164
+ def _generate_examples(self, files):
165
+ """Yield examples as (key, example) tuples."""
166
+ key = 0
167
+ import zstandard as zstd
168
+
169
+ for path in Path(files).rglob('*.jsonl.zst'):
170
+ with zstd.open(open(path, "rb"), "rt", encoding="utf-8") as f:
171
+ for row in f:
172
+ data = json.loads(row)
173
+ data = data['text']
174
+ data = self._fixes(data)
175
+ for io_pairs_kind in ('synth_io_pairs', 'real_io_pairs'):
176
+ if data[io_pairs_kind]:
177
+ new_io_pairs = []
178
+ for e in data[io_pairs_kind]:
179
+ new_e = {}
180
+ new_e['input'] = [{'var': var, 'value': json.dumps(value)} for (var, value) in e['input'].items()] if e['input'] else []
181
+ new_e['output'] = [{'var': var, 'value': json.dumps(value)} for (var, value) in e['output'].items()] if e['output'] else []
182
+ new_e['dummy_funcs'] = e['dummy_funcs']
183
+ new_e['dummy_funcs_seed'] = e['dummy_funcs_seed']
184
+ new_io_pairs.append(new_e)
185
+ data[io_pairs_kind] = new_io_pairs
186
+ data['synth_iospec'] = json.dumps(data['synth_iospec'])
187
+ data['real_iospec'] = json.dumps(data['real_iospec'])
188
+ yield key, data
189
+ key += 1
190
+
191
+ def _fixes(self, row):
192
+ row['asm'] = [{'target': target, 'code': code['func_asm'] if code else None} for (target, code) in
193
+ row['asm'].items()] # TODO: pre_asm etc
194
+ for removed_key in _REMOVED_FEATURES:
195
+ if removed_key in row:
196
+ del row[removed_key]
197
+ for original_key, new_key in _RENAMED_FEATURES.items():
198
+ row[new_key] = row[original_key]
199
+ del row[original_key]
200
+ return row
201
+
split_merged_asm_ok_fhead.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:667770ec358f80246ce4df02f171656ae58fa5e6ac084c563981233c1d9a4884
3
+ size 3892434961
test_real.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b49087c945e356ca459ff4f3b5faa2f5a5dcae13711cc5a41778a74895c5f148
3
+ size 7523671
test_synth.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29cace6fef3272d3d555a21d623413dea014e82e30e05b6f7231b1c2f125af8a
3
+ size 12458662
train_no_compilable.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6182a5555e0bb0c9942b1e666b9c48605c29360cd237518c159315e97f093eb2
3
+ size 153970953
train_real_compilable.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0879794e27c29e977871f6817c17d84d18377b305867a6bdc577775fece1ae74
3
+ size 420840904
train_real_simple_io.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5af07059a5760eccce8344f499e0014263391b0c0ca25b0275881b1773891de
3
+ size 22654808
train_synth_compilable.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8aaea6b7c14ff5f19a11967e40cd399ecb2f467ad8fb2eb9afd9ed8cf75f3886
3
+ size 2154640326
train_synth_rich_io.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8eefdffc19bf64125a3777667989c9d4f011596d950703dc1ec9fef1305d485d
3
+ size 269227686
train_synth_simple_io.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da367413202d358bc0ea9e2ac849532dd9f38b6ad25db6e0e49f9deeb9aefbe7
3
+ size 826386193
valid_real.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8eaedcbfa380dbba870bb2c82bee17c779a86f84b9482862d790b897754aaf3
3
+ size 11594985
valid_synth.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:334f157a2378e110eefcc8b2bc13ad93b6a7f3b8d4ba84626f406ba0ee61d01c
3
+ size 13127244