Upload 8 files
Browse files- EleutherAI-pythia-12b-0shot-shelloutput.txt +24 -0
- EleutherAI-pythia-12b-0shot/results.json +404 -0
- EleutherAI-pythia-12b-5shot-shelloutput.txt +24 -0
- EleutherAI-pythia-12b-5shot/results.json +404 -0
- sft-pythia-12b-0shot-shelloutput.txt +24 -0
- sft-pythia-12b-0shot/results.json +388 -0
- sft-pythia-12b-5shot-shelloutput.txt +24 -0
- sft-pythia-12b-5shot/results.json +388 -0
EleutherAI-pythia-12b-0shot-shelloutput.txt
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
bootstrapping for stddev: perplexity
|
2 |
+
hf (pretrained=EleutherAI/pythia-12b,parallelize=True), limit: None, num_fewshot: 0, batch_size: 4
|
3 |
+
| Task |Version|Filter| Metric | Value | |Stderr|
|
4 |
+
|--------------|-------|------|---------------|------:|---|-----:|
|
5 |
+
|arc_challenge |Yaml |none |acc | 0.3157|± |0.0136|
|
6 |
+
| | |none |acc_norm | 0.3515|± |0.0140|
|
7 |
+
|arc_easy |Yaml |none |acc | 0.7033|± |0.0094|
|
8 |
+
| | |none |acc_norm | 0.6372|± |0.0099|
|
9 |
+
|boolq |Yaml |none |acc | 0.6722|± |0.0082|
|
10 |
+
|hellaswag |Yaml |none |acc | 0.5038|± |0.0050|
|
11 |
+
| | |none |acc_norm | 0.6728|± |0.0047|
|
12 |
+
|lambada_openai|Yaml |none |perplexity | 3.9283|± |0.0838|
|
13 |
+
| | |none |acc | 0.7056|± |0.0063|
|
14 |
+
|openbookqa |Yaml |none |acc | 0.2640|± |0.0197|
|
15 |
+
| | |none |acc_norm | 0.3800|± |0.0217|
|
16 |
+
|piqa |Yaml |none |acc | 0.7612|± |0.0099|
|
17 |
+
| | |none |acc_norm | 0.7699|± |0.0098|
|
18 |
+
|sciq |Yaml |none |acc | 0.9040|± |0.0093|
|
19 |
+
| | |none |acc_norm | 0.8500|± |0.0113|
|
20 |
+
|wikitext |Yaml |none |word_perplexity|16.1038| | |
|
21 |
+
| | |none |byte_perplexity| 1.5811| | |
|
22 |
+
| | |none |bits_per_byte | 0.6610| | |
|
23 |
+
|winogrande |Yaml |none |acc | 0.6354|± |0.0135|
|
24 |
+
|
EleutherAI-pythia-12b-0shot/results.json
ADDED
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"results": {
|
3 |
+
"arc_challenge": {
|
4 |
+
"acc,none": 0.31569965870307165,
|
5 |
+
"acc_stderr,none": 0.013582571095815295,
|
6 |
+
"acc_norm,none": 0.3515358361774744,
|
7 |
+
"acc_norm_stderr,none": 0.013952413699600931
|
8 |
+
},
|
9 |
+
"arc_easy": {
|
10 |
+
"acc,none": 0.7032828282828283,
|
11 |
+
"acc_stderr,none": 0.009373559492986851,
|
12 |
+
"acc_norm,none": 0.6372053872053872,
|
13 |
+
"acc_norm_stderr,none": 0.009865936757013938
|
14 |
+
},
|
15 |
+
"boolq": {
|
16 |
+
"acc,none": 0.67217125382263,
|
17 |
+
"acc_stderr,none": 0.008210243237673385
|
18 |
+
},
|
19 |
+
"hellaswag": {
|
20 |
+
"acc,none": 0.5037841067516431,
|
21 |
+
"acc_stderr,none": 0.004989638507409938,
|
22 |
+
"acc_norm,none": 0.6727743477394941,
|
23 |
+
"acc_norm_stderr,none": 0.004682414968323615
|
24 |
+
},
|
25 |
+
"lambada_openai": {
|
26 |
+
"perplexity,none": 3.928303151284302,
|
27 |
+
"perplexity_stderr,none": 0.08380809965264784,
|
28 |
+
"acc,none": 0.7056083834659421,
|
29 |
+
"acc_stderr,none": 0.006349750457397497
|
30 |
+
},
|
31 |
+
"openbookqa": {
|
32 |
+
"acc,none": 0.264,
|
33 |
+
"acc_stderr,none": 0.019732885585922098,
|
34 |
+
"acc_norm,none": 0.38,
|
35 |
+
"acc_norm_stderr,none": 0.02172888143870172
|
36 |
+
},
|
37 |
+
"piqa": {
|
38 |
+
"acc,none": 0.7611534276387377,
|
39 |
+
"acc_stderr,none": 0.009948120385337496,
|
40 |
+
"acc_norm,none": 0.7698585418933623,
|
41 |
+
"acc_norm_stderr,none": 0.009820832826839813
|
42 |
+
},
|
43 |
+
"sciq": {
|
44 |
+
"acc,none": 0.904,
|
45 |
+
"acc_stderr,none": 0.009320454434783203,
|
46 |
+
"acc_norm,none": 0.85,
|
47 |
+
"acc_norm_stderr,none": 0.01129723982340931
|
48 |
+
},
|
49 |
+
"wikitext": {
|
50 |
+
"word_perplexity,none": 16.10381134238307,
|
51 |
+
"byte_perplexity,none": 1.5811322783219102,
|
52 |
+
"bits_per_byte,none": 0.6609580693383097
|
53 |
+
},
|
54 |
+
"winogrande": {
|
55 |
+
"acc,none": 0.6353591160220995,
|
56 |
+
"acc_stderr,none": 0.013527746622429842
|
57 |
+
}
|
58 |
+
},
|
59 |
+
"configs": {
|
60 |
+
"arc_challenge": {
|
61 |
+
"task": "arc_challenge",
|
62 |
+
"group": [
|
63 |
+
"ai2_arc",
|
64 |
+
"multiple_choice"
|
65 |
+
],
|
66 |
+
"dataset_path": "ai2_arc",
|
67 |
+
"dataset_name": "ARC-Challenge",
|
68 |
+
"training_split": "train",
|
69 |
+
"validation_split": "validation",
|
70 |
+
"test_split": "test",
|
71 |
+
"doc_to_text": "Question: {{question}}\nAnswer:",
|
72 |
+
"doc_to_target": "{{choices.label.index(answerKey)}}",
|
73 |
+
"doc_to_choice": "{{choices.text}}",
|
74 |
+
"description": "",
|
75 |
+
"target_delimiter": " ",
|
76 |
+
"fewshot_delimiter": "\n\n",
|
77 |
+
"num_fewshot": 0,
|
78 |
+
"metric_list": [
|
79 |
+
{
|
80 |
+
"metric": "acc",
|
81 |
+
"aggregation": "mean",
|
82 |
+
"higher_is_better": true
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"metric": "acc_norm",
|
86 |
+
"aggregation": "mean",
|
87 |
+
"higher_is_better": true
|
88 |
+
}
|
89 |
+
],
|
90 |
+
"output_type": "multiple_choice",
|
91 |
+
"repeats": 1,
|
92 |
+
"should_decontaminate": true,
|
93 |
+
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
|
94 |
+
},
|
95 |
+
"arc_easy": {
|
96 |
+
"task": "arc_easy",
|
97 |
+
"group": [
|
98 |
+
"ai2_arc",
|
99 |
+
"multiple_choice"
|
100 |
+
],
|
101 |
+
"dataset_path": "ai2_arc",
|
102 |
+
"dataset_name": "ARC-Easy",
|
103 |
+
"training_split": "train",
|
104 |
+
"validation_split": "validation",
|
105 |
+
"test_split": "test",
|
106 |
+
"doc_to_text": "Question: {{question}}\nAnswer:",
|
107 |
+
"doc_to_target": "{{choices.label.index(answerKey)}}",
|
108 |
+
"doc_to_choice": "{{choices.text}}",
|
109 |
+
"description": "",
|
110 |
+
"target_delimiter": " ",
|
111 |
+
"fewshot_delimiter": "\n\n",
|
112 |
+
"num_fewshot": 0,
|
113 |
+
"metric_list": [
|
114 |
+
{
|
115 |
+
"metric": "acc",
|
116 |
+
"aggregation": "mean",
|
117 |
+
"higher_is_better": true
|
118 |
+
},
|
119 |
+
{
|
120 |
+
"metric": "acc_norm",
|
121 |
+
"aggregation": "mean",
|
122 |
+
"higher_is_better": true
|
123 |
+
}
|
124 |
+
],
|
125 |
+
"output_type": "multiple_choice",
|
126 |
+
"repeats": 1,
|
127 |
+
"should_decontaminate": true,
|
128 |
+
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
|
129 |
+
},
|
130 |
+
"boolq": {
|
131 |
+
"task": "boolq",
|
132 |
+
"group": [
|
133 |
+
"super-glue-lm-eval-v1"
|
134 |
+
],
|
135 |
+
"dataset_path": "super_glue",
|
136 |
+
"dataset_name": "boolq",
|
137 |
+
"training_split": "train",
|
138 |
+
"validation_split": "validation",
|
139 |
+
"doc_to_text": "{{passage}}\nQuestion: {{question}}?\nAnswer:",
|
140 |
+
"doc_to_target": "label",
|
141 |
+
"doc_to_choice": [
|
142 |
+
"no",
|
143 |
+
"yes"
|
144 |
+
],
|
145 |
+
"description": "",
|
146 |
+
"target_delimiter": " ",
|
147 |
+
"fewshot_delimiter": "\n\n",
|
148 |
+
"num_fewshot": 0,
|
149 |
+
"metric_list": [
|
150 |
+
{
|
151 |
+
"metric": "acc"
|
152 |
+
}
|
153 |
+
],
|
154 |
+
"output_type": "multiple_choice",
|
155 |
+
"repeats": 1,
|
156 |
+
"should_decontaminate": true,
|
157 |
+
"doc_to_decontamination_query": "passage"
|
158 |
+
},
|
159 |
+
"hellaswag": {
|
160 |
+
"task": "hellaswag",
|
161 |
+
"group": [
|
162 |
+
"multiple_choice"
|
163 |
+
],
|
164 |
+
"dataset_path": "hellaswag",
|
165 |
+
"training_split": "train",
|
166 |
+
"validation_split": "validation",
|
167 |
+
"doc_to_text": "{% set text = activity_label ~ ': ' ~ ctx_a ~ ' ' ~ ctx_b.capitalize() %}{{text|trim|replace(' [title]', '. ')|regex_replace('\\[.*?\\]', '')|replace(' ', ' ')}}",
|
168 |
+
"doc_to_target": "{{label}}",
|
169 |
+
"doc_to_choice": "{{endings|map('trim')|map('replace', ' [title]', '. ')|map('regex_replace', '\\[.*?\\]', '')|map('replace', ' ', ' ')|list}}",
|
170 |
+
"description": "",
|
171 |
+
"target_delimiter": " ",
|
172 |
+
"fewshot_delimiter": "\n\n",
|
173 |
+
"num_fewshot": 0,
|
174 |
+
"metric_list": [
|
175 |
+
{
|
176 |
+
"metric": "acc",
|
177 |
+
"aggregation": "mean",
|
178 |
+
"higher_is_better": true
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"metric": "acc_norm",
|
182 |
+
"aggregation": "mean",
|
183 |
+
"higher_is_better": true
|
184 |
+
}
|
185 |
+
],
|
186 |
+
"output_type": "multiple_choice",
|
187 |
+
"repeats": 1,
|
188 |
+
"should_decontaminate": false
|
189 |
+
},
|
190 |
+
"lambada_openai": {
|
191 |
+
"task": "lambada_openai",
|
192 |
+
"group": [
|
193 |
+
"lambada",
|
194 |
+
"loglikelihood",
|
195 |
+
"perplexity"
|
196 |
+
],
|
197 |
+
"dataset_path": "EleutherAI/lambada_openai",
|
198 |
+
"dataset_name": "default",
|
199 |
+
"test_split": "test",
|
200 |
+
"doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
|
201 |
+
"doc_to_target": "{{' '+text.split(' ')[-1]}}",
|
202 |
+
"description": "",
|
203 |
+
"target_delimiter": " ",
|
204 |
+
"fewshot_delimiter": "\n\n",
|
205 |
+
"num_fewshot": 0,
|
206 |
+
"metric_list": [
|
207 |
+
{
|
208 |
+
"metric": "perplexity",
|
209 |
+
"aggregation": "perplexity",
|
210 |
+
"higher_is_better": false
|
211 |
+
},
|
212 |
+
{
|
213 |
+
"metric": "acc",
|
214 |
+
"aggregation": "mean",
|
215 |
+
"higher_is_better": true
|
216 |
+
}
|
217 |
+
],
|
218 |
+
"output_type": "loglikelihood",
|
219 |
+
"repeats": 1,
|
220 |
+
"should_decontaminate": true,
|
221 |
+
"doc_to_decontamination_query": "{{text}}"
|
222 |
+
},
|
223 |
+
"openbookqa": {
|
224 |
+
"task": "openbookqa",
|
225 |
+
"group": [
|
226 |
+
"multiple_choice"
|
227 |
+
],
|
228 |
+
"dataset_path": "openbookqa",
|
229 |
+
"dataset_name": "main",
|
230 |
+
"training_split": "train",
|
231 |
+
"validation_split": "validation",
|
232 |
+
"test_split": "test",
|
233 |
+
"doc_to_text": "question_stem",
|
234 |
+
"doc_to_target": "{{choices.label.index(answerKey.lstrip())}}",
|
235 |
+
"doc_to_choice": "{{choices.text}}",
|
236 |
+
"description": "",
|
237 |
+
"target_delimiter": " ",
|
238 |
+
"fewshot_delimiter": "\n\n",
|
239 |
+
"num_fewshot": 0,
|
240 |
+
"metric_list": [
|
241 |
+
{
|
242 |
+
"metric": "acc",
|
243 |
+
"aggregation": "mean",
|
244 |
+
"higher_is_better": true
|
245 |
+
},
|
246 |
+
{
|
247 |
+
"metric": "acc_norm",
|
248 |
+
"aggregation": "mean",
|
249 |
+
"higher_is_better": true
|
250 |
+
}
|
251 |
+
],
|
252 |
+
"output_type": "multiple_choice",
|
253 |
+
"repeats": 1,
|
254 |
+
"should_decontaminate": true,
|
255 |
+
"doc_to_decontamination_query": "question_stem"
|
256 |
+
},
|
257 |
+
"piqa": {
|
258 |
+
"task": "piqa",
|
259 |
+
"group": [
|
260 |
+
"multiple_choice"
|
261 |
+
],
|
262 |
+
"dataset_path": "piqa",
|
263 |
+
"training_split": "train",
|
264 |
+
"validation_split": "validation",
|
265 |
+
"doc_to_text": "Question: {{goal}}\nAnswer:",
|
266 |
+
"doc_to_target": "label",
|
267 |
+
"doc_to_choice": "{{[sol1, sol2]}}",
|
268 |
+
"description": "",
|
269 |
+
"target_delimiter": " ",
|
270 |
+
"fewshot_delimiter": "\n\n",
|
271 |
+
"num_fewshot": 0,
|
272 |
+
"metric_list": [
|
273 |
+
{
|
274 |
+
"metric": "acc",
|
275 |
+
"aggregation": "mean",
|
276 |
+
"higher_is_better": true
|
277 |
+
},
|
278 |
+
{
|
279 |
+
"metric": "acc_norm",
|
280 |
+
"aggregation": "mean",
|
281 |
+
"higher_is_better": true
|
282 |
+
}
|
283 |
+
],
|
284 |
+
"output_type": "multiple_choice",
|
285 |
+
"repeats": 1,
|
286 |
+
"should_decontaminate": true,
|
287 |
+
"doc_to_decontamination_query": "goal"
|
288 |
+
},
|
289 |
+
"sciq": {
|
290 |
+
"task": "sciq",
|
291 |
+
"group": [
|
292 |
+
"multiple_choice"
|
293 |
+
],
|
294 |
+
"dataset_path": "sciq",
|
295 |
+
"training_split": "train",
|
296 |
+
"validation_split": "validation",
|
297 |
+
"test_split": "test",
|
298 |
+
"doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:",
|
299 |
+
"doc_to_target": 3,
|
300 |
+
"doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}",
|
301 |
+
"description": "",
|
302 |
+
"target_delimiter": " ",
|
303 |
+
"fewshot_delimiter": "\n\n",
|
304 |
+
"num_fewshot": 0,
|
305 |
+
"metric_list": [
|
306 |
+
{
|
307 |
+
"metric": "acc",
|
308 |
+
"aggregation": "mean",
|
309 |
+
"higher_is_better": true
|
310 |
+
},
|
311 |
+
{
|
312 |
+
"metric": "acc_norm",
|
313 |
+
"aggregation": "mean",
|
314 |
+
"higher_is_better": true
|
315 |
+
}
|
316 |
+
],
|
317 |
+
"output_type": "multiple_choice",
|
318 |
+
"repeats": 1,
|
319 |
+
"should_decontaminate": true,
|
320 |
+
"doc_to_decontamination_query": "{{support}} {{question}}"
|
321 |
+
},
|
322 |
+
"wikitext": {
|
323 |
+
"task": "wikitext",
|
324 |
+
"group": [
|
325 |
+
"perplexity",
|
326 |
+
"loglikelihood_rolling"
|
327 |
+
],
|
328 |
+
"dataset_path": "EleutherAI/wikitext_document_level",
|
329 |
+
"dataset_name": "wikitext-2-raw-v1",
|
330 |
+
"training_split": "train",
|
331 |
+
"validation_split": "validation",
|
332 |
+
"test_split": "test",
|
333 |
+
"doc_to_text": "",
|
334 |
+
"doc_to_target": "<function wikitext_detokenizer at 0x7f1b6d8b9120>",
|
335 |
+
"description": "",
|
336 |
+
"target_delimiter": " ",
|
337 |
+
"fewshot_delimiter": "\n\n",
|
338 |
+
"num_fewshot": 0,
|
339 |
+
"metric_list": [
|
340 |
+
{
|
341 |
+
"metric": "word_perplexity"
|
342 |
+
},
|
343 |
+
{
|
344 |
+
"metric": "byte_perplexity"
|
345 |
+
},
|
346 |
+
{
|
347 |
+
"metric": "bits_per_byte"
|
348 |
+
}
|
349 |
+
],
|
350 |
+
"output_type": "loglikelihood_rolling",
|
351 |
+
"repeats": 1,
|
352 |
+
"should_decontaminate": true,
|
353 |
+
"doc_to_decontamination_query": "{{page}}"
|
354 |
+
},
|
355 |
+
"winogrande": {
|
356 |
+
"task": "winogrande",
|
357 |
+
"dataset_path": "winogrande",
|
358 |
+
"dataset_name": "winogrande_xl",
|
359 |
+
"training_split": "train",
|
360 |
+
"validation_split": "validation",
|
361 |
+
"doc_to_text": "<function doc_to_text at 0x7f1b6d8b9360>",
|
362 |
+
"doc_to_target": "<function doc_to_target at 0x7f1b6d8b96c0>",
|
363 |
+
"doc_to_choice": "<function doc_to_choice at 0x7f1b6d8b9a20>",
|
364 |
+
"description": "",
|
365 |
+
"target_delimiter": " ",
|
366 |
+
"fewshot_delimiter": "\n\n",
|
367 |
+
"num_fewshot": 0,
|
368 |
+
"metric_list": [
|
369 |
+
{
|
370 |
+
"metric": "acc",
|
371 |
+
"aggregation": "mean",
|
372 |
+
"higher_is_better": true
|
373 |
+
}
|
374 |
+
],
|
375 |
+
"output_type": "multiple_choice",
|
376 |
+
"repeats": 1,
|
377 |
+
"should_decontaminate": true,
|
378 |
+
"doc_to_decontamination_query": "sentence"
|
379 |
+
}
|
380 |
+
},
|
381 |
+
"versions": {
|
382 |
+
"arc_challenge": "Yaml",
|
383 |
+
"arc_easy": "Yaml",
|
384 |
+
"boolq": "Yaml",
|
385 |
+
"hellaswag": "Yaml",
|
386 |
+
"lambada_openai": "Yaml",
|
387 |
+
"openbookqa": "Yaml",
|
388 |
+
"piqa": "Yaml",
|
389 |
+
"sciq": "Yaml",
|
390 |
+
"wikitext": "Yaml",
|
391 |
+
"winogrande": "Yaml"
|
392 |
+
},
|
393 |
+
"config": {
|
394 |
+
"model": "hf",
|
395 |
+
"model_args": "pretrained=EleutherAI/pythia-12b,parallelize=True",
|
396 |
+
"batch_size": "4",
|
397 |
+
"batch_sizes": [],
|
398 |
+
"device": null,
|
399 |
+
"use_cache": null,
|
400 |
+
"limit": null,
|
401 |
+
"bootstrap_iters": 100000
|
402 |
+
},
|
403 |
+
"git_hash": "d1a44c8"
|
404 |
+
}
|
EleutherAI-pythia-12b-5shot-shelloutput.txt
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
bootstrapping for stddev: perplexity
|
2 |
+
hf (pretrained=EleutherAI/pythia-12b,parallelize=True), limit: None, num_fewshot: 5, batch_size: 4
|
3 |
+
| Task |Version|Filter| Metric | Value | |Stderr|
|
4 |
+
|--------------|-------|------|---------------|------:|---|-----:|
|
5 |
+
|arc_challenge |Yaml |none |acc | 0.3686|± |0.0141|
|
6 |
+
| | |none |acc_norm | 0.4010|± |0.0143|
|
7 |
+
|arc_easy |Yaml |none |acc | 0.7088|± |0.0093|
|
8 |
+
| | |none |acc_norm | 0.7146|± |0.0093|
|
9 |
+
|boolq |Yaml |none |acc | 0.6795|± |0.0082|
|
10 |
+
|hellaswag |Yaml |none |acc | 0.5043|± |0.0050|
|
11 |
+
| | |none |acc_norm | 0.6830|± |0.0046|
|
12 |
+
|lambada_openai|Yaml |none |perplexity | 4.7835|± |0.1073|
|
13 |
+
| | |none |acc | 0.6625|± |0.0066|
|
14 |
+
|openbookqa |Yaml |none |acc | 0.2940|± |0.0204|
|
15 |
+
| | |none |acc_norm | 0.3980|± |0.0219|
|
16 |
+
|piqa |Yaml |none |acc | 0.7699|± |0.0098|
|
17 |
+
| | |none |acc_norm | 0.7731|± |0.0098|
|
18 |
+
|sciq |Yaml |none |acc | 0.9480|± |0.0070|
|
19 |
+
| | |none |acc_norm | 0.9510|± |0.0068|
|
20 |
+
|wikitext |Yaml |none |word_perplexity|16.1038| | |
|
21 |
+
| | |none |byte_perplexity| 1.5811| | |
|
22 |
+
| | |none |bits_per_byte | 0.6610| | |
|
23 |
+
|winogrande |Yaml |none |acc | 0.6354|± |0.0135|
|
24 |
+
|
EleutherAI-pythia-12b-5shot/results.json
ADDED
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"results": {
|
3 |
+
"arc_challenge": {
|
4 |
+
"acc,none": 0.36860068259385664,
|
5 |
+
"acc_stderr,none": 0.014097810678042194,
|
6 |
+
"acc_norm,none": 0.40102389078498296,
|
7 |
+
"acc_norm_stderr,none": 0.014322255790719867
|
8 |
+
},
|
9 |
+
"arc_easy": {
|
10 |
+
"acc,none": 0.7087542087542088,
|
11 |
+
"acc_stderr,none": 0.009322788837938856,
|
12 |
+
"acc_norm,none": 0.7146464646464646,
|
13 |
+
"acc_norm_stderr,none": 0.00926628058499775
|
14 |
+
},
|
15 |
+
"boolq": {
|
16 |
+
"acc,none": 0.6795107033639144,
|
17 |
+
"acc_stderr,none": 0.008162016261049398
|
18 |
+
},
|
19 |
+
"hellaswag": {
|
20 |
+
"acc,none": 0.504282015534754,
|
21 |
+
"acc_stderr,none": 0.0049895984262495335,
|
22 |
+
"acc_norm,none": 0.6830312686715794,
|
23 |
+
"acc_norm_stderr,none": 0.004643441945489853
|
24 |
+
},
|
25 |
+
"lambada_openai": {
|
26 |
+
"perplexity,none": 4.783522630921447,
|
27 |
+
"perplexity_stderr,none": 0.10727737476272488,
|
28 |
+
"acc,none": 0.6625266834853484,
|
29 |
+
"acc_stderr,none": 0.006587694938528712
|
30 |
+
},
|
31 |
+
"openbookqa": {
|
32 |
+
"acc,none": 0.294,
|
33 |
+
"acc_stderr,none": 0.020395095484936614,
|
34 |
+
"acc_norm,none": 0.398,
|
35 |
+
"acc_norm_stderr,none": 0.02191237788577998
|
36 |
+
},
|
37 |
+
"piqa": {
|
38 |
+
"acc,none": 0.7698585418933623,
|
39 |
+
"acc_stderr,none": 0.009820832826839817,
|
40 |
+
"acc_norm,none": 0.7731229597388466,
|
41 |
+
"acc_norm_stderr,none": 0.00977158425921514
|
42 |
+
},
|
43 |
+
"sciq": {
|
44 |
+
"acc,none": 0.948,
|
45 |
+
"acc_stderr,none": 0.0070246242138171325,
|
46 |
+
"acc_norm,none": 0.951,
|
47 |
+
"acc_norm_stderr,none": 0.006829761756140924
|
48 |
+
},
|
49 |
+
"wikitext": {
|
50 |
+
"word_perplexity,none": 16.10381134238307,
|
51 |
+
"byte_perplexity,none": 1.5811322783219102,
|
52 |
+
"bits_per_byte,none": 0.6609580693383097
|
53 |
+
},
|
54 |
+
"winogrande": {
|
55 |
+
"acc,none": 0.6353591160220995,
|
56 |
+
"acc_stderr,none": 0.013527746622429842
|
57 |
+
}
|
58 |
+
},
|
59 |
+
"configs": {
|
60 |
+
"arc_challenge": {
|
61 |
+
"task": "arc_challenge",
|
62 |
+
"group": [
|
63 |
+
"ai2_arc",
|
64 |
+
"multiple_choice"
|
65 |
+
],
|
66 |
+
"dataset_path": "ai2_arc",
|
67 |
+
"dataset_name": "ARC-Challenge",
|
68 |
+
"training_split": "train",
|
69 |
+
"validation_split": "validation",
|
70 |
+
"test_split": "test",
|
71 |
+
"doc_to_text": "Question: {{question}}\nAnswer:",
|
72 |
+
"doc_to_target": "{{choices.label.index(answerKey)}}",
|
73 |
+
"doc_to_choice": "{{choices.text}}",
|
74 |
+
"description": "",
|
75 |
+
"target_delimiter": " ",
|
76 |
+
"fewshot_delimiter": "\n\n",
|
77 |
+
"num_fewshot": 5,
|
78 |
+
"metric_list": [
|
79 |
+
{
|
80 |
+
"metric": "acc",
|
81 |
+
"aggregation": "mean",
|
82 |
+
"higher_is_better": true
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"metric": "acc_norm",
|
86 |
+
"aggregation": "mean",
|
87 |
+
"higher_is_better": true
|
88 |
+
}
|
89 |
+
],
|
90 |
+
"output_type": "multiple_choice",
|
91 |
+
"repeats": 1,
|
92 |
+
"should_decontaminate": true,
|
93 |
+
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
|
94 |
+
},
|
95 |
+
"arc_easy": {
|
96 |
+
"task": "arc_easy",
|
97 |
+
"group": [
|
98 |
+
"ai2_arc",
|
99 |
+
"multiple_choice"
|
100 |
+
],
|
101 |
+
"dataset_path": "ai2_arc",
|
102 |
+
"dataset_name": "ARC-Easy",
|
103 |
+
"training_split": "train",
|
104 |
+
"validation_split": "validation",
|
105 |
+
"test_split": "test",
|
106 |
+
"doc_to_text": "Question: {{question}}\nAnswer:",
|
107 |
+
"doc_to_target": "{{choices.label.index(answerKey)}}",
|
108 |
+
"doc_to_choice": "{{choices.text}}",
|
109 |
+
"description": "",
|
110 |
+
"target_delimiter": " ",
|
111 |
+
"fewshot_delimiter": "\n\n",
|
112 |
+
"num_fewshot": 5,
|
113 |
+
"metric_list": [
|
114 |
+
{
|
115 |
+
"metric": "acc",
|
116 |
+
"aggregation": "mean",
|
117 |
+
"higher_is_better": true
|
118 |
+
},
|
119 |
+
{
|
120 |
+
"metric": "acc_norm",
|
121 |
+
"aggregation": "mean",
|
122 |
+
"higher_is_better": true
|
123 |
+
}
|
124 |
+
],
|
125 |
+
"output_type": "multiple_choice",
|
126 |
+
"repeats": 1,
|
127 |
+
"should_decontaminate": true,
|
128 |
+
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
|
129 |
+
},
|
130 |
+
"boolq": {
|
131 |
+
"task": "boolq",
|
132 |
+
"group": [
|
133 |
+
"super-glue-lm-eval-v1"
|
134 |
+
],
|
135 |
+
"dataset_path": "super_glue",
|
136 |
+
"dataset_name": "boolq",
|
137 |
+
"training_split": "train",
|
138 |
+
"validation_split": "validation",
|
139 |
+
"doc_to_text": "{{passage}}\nQuestion: {{question}}?\nAnswer:",
|
140 |
+
"doc_to_target": "label",
|
141 |
+
"doc_to_choice": [
|
142 |
+
"no",
|
143 |
+
"yes"
|
144 |
+
],
|
145 |
+
"description": "",
|
146 |
+
"target_delimiter": " ",
|
147 |
+
"fewshot_delimiter": "\n\n",
|
148 |
+
"num_fewshot": 5,
|
149 |
+
"metric_list": [
|
150 |
+
{
|
151 |
+
"metric": "acc"
|
152 |
+
}
|
153 |
+
],
|
154 |
+
"output_type": "multiple_choice",
|
155 |
+
"repeats": 1,
|
156 |
+
"should_decontaminate": true,
|
157 |
+
"doc_to_decontamination_query": "passage"
|
158 |
+
},
|
159 |
+
"hellaswag": {
|
160 |
+
"task": "hellaswag",
|
161 |
+
"group": [
|
162 |
+
"multiple_choice"
|
163 |
+
],
|
164 |
+
"dataset_path": "hellaswag",
|
165 |
+
"training_split": "train",
|
166 |
+
"validation_split": "validation",
|
167 |
+
"doc_to_text": "{% set text = activity_label ~ ': ' ~ ctx_a ~ ' ' ~ ctx_b.capitalize() %}{{text|trim|replace(' [title]', '. ')|regex_replace('\\[.*?\\]', '')|replace(' ', ' ')}}",
|
168 |
+
"doc_to_target": "{{label}}",
|
169 |
+
"doc_to_choice": "{{endings|map('trim')|map('replace', ' [title]', '. ')|map('regex_replace', '\\[.*?\\]', '')|map('replace', ' ', ' ')|list}}",
|
170 |
+
"description": "",
|
171 |
+
"target_delimiter": " ",
|
172 |
+
"fewshot_delimiter": "\n\n",
|
173 |
+
"num_fewshot": 5,
|
174 |
+
"metric_list": [
|
175 |
+
{
|
176 |
+
"metric": "acc",
|
177 |
+
"aggregation": "mean",
|
178 |
+
"higher_is_better": true
|
179 |
+
},
|
180 |
+
{
|
181 |
+
"metric": "acc_norm",
|
182 |
+
"aggregation": "mean",
|
183 |
+
"higher_is_better": true
|
184 |
+
}
|
185 |
+
],
|
186 |
+
"output_type": "multiple_choice",
|
187 |
+
"repeats": 1,
|
188 |
+
"should_decontaminate": false
|
189 |
+
},
|
190 |
+
"lambada_openai": {
|
191 |
+
"task": "lambada_openai",
|
192 |
+
"group": [
|
193 |
+
"lambada",
|
194 |
+
"loglikelihood",
|
195 |
+
"perplexity"
|
196 |
+
],
|
197 |
+
"dataset_path": "EleutherAI/lambada_openai",
|
198 |
+
"dataset_name": "default",
|
199 |
+
"test_split": "test",
|
200 |
+
"doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
|
201 |
+
"doc_to_target": "{{' '+text.split(' ')[-1]}}",
|
202 |
+
"description": "",
|
203 |
+
"target_delimiter": " ",
|
204 |
+
"fewshot_delimiter": "\n\n",
|
205 |
+
"num_fewshot": 5,
|
206 |
+
"metric_list": [
|
207 |
+
{
|
208 |
+
"metric": "perplexity",
|
209 |
+
"aggregation": "perplexity",
|
210 |
+
"higher_is_better": false
|
211 |
+
},
|
212 |
+
{
|
213 |
+
"metric": "acc",
|
214 |
+
"aggregation": "mean",
|
215 |
+
"higher_is_better": true
|
216 |
+
}
|
217 |
+
],
|
218 |
+
"output_type": "loglikelihood",
|
219 |
+
"repeats": 1,
|
220 |
+
"should_decontaminate": true,
|
221 |
+
"doc_to_decontamination_query": "{{text}}"
|
222 |
+
},
|
223 |
+
"openbookqa": {
|
224 |
+
"task": "openbookqa",
|
225 |
+
"group": [
|
226 |
+
"multiple_choice"
|
227 |
+
],
|
228 |
+
"dataset_path": "openbookqa",
|
229 |
+
"dataset_name": "main",
|
230 |
+
"training_split": "train",
|
231 |
+
"validation_split": "validation",
|
232 |
+
"test_split": "test",
|
233 |
+
"doc_to_text": "question_stem",
|
234 |
+
"doc_to_target": "{{choices.label.index(answerKey.lstrip())}}",
|
235 |
+
"doc_to_choice": "{{choices.text}}",
|
236 |
+
"description": "",
|
237 |
+
"target_delimiter": " ",
|
238 |
+
"fewshot_delimiter": "\n\n",
|
239 |
+
"num_fewshot": 5,
|
240 |
+
"metric_list": [
|
241 |
+
{
|
242 |
+
"metric": "acc",
|
243 |
+
"aggregation": "mean",
|
244 |
+
"higher_is_better": true
|
245 |
+
},
|
246 |
+
{
|
247 |
+
"metric": "acc_norm",
|
248 |
+
"aggregation": "mean",
|
249 |
+
"higher_is_better": true
|
250 |
+
}
|
251 |
+
],
|
252 |
+
"output_type": "multiple_choice",
|
253 |
+
"repeats": 1,
|
254 |
+
"should_decontaminate": true,
|
255 |
+
"doc_to_decontamination_query": "question_stem"
|
256 |
+
},
|
257 |
+
"piqa": {
|
258 |
+
"task": "piqa",
|
259 |
+
"group": [
|
260 |
+
"multiple_choice"
|
261 |
+
],
|
262 |
+
"dataset_path": "piqa",
|
263 |
+
"training_split": "train",
|
264 |
+
"validation_split": "validation",
|
265 |
+
"doc_to_text": "Question: {{goal}}\nAnswer:",
|
266 |
+
"doc_to_target": "label",
|
267 |
+
"doc_to_choice": "{{[sol1, sol2]}}",
|
268 |
+
"description": "",
|
269 |
+
"target_delimiter": " ",
|
270 |
+
"fewshot_delimiter": "\n\n",
|
271 |
+
"num_fewshot": 5,
|
272 |
+
"metric_list": [
|
273 |
+
{
|
274 |
+
"metric": "acc",
|
275 |
+
"aggregation": "mean",
|
276 |
+
"higher_is_better": true
|
277 |
+
},
|
278 |
+
{
|
279 |
+
"metric": "acc_norm",
|
280 |
+
"aggregation": "mean",
|
281 |
+
"higher_is_better": true
|
282 |
+
}
|
283 |
+
],
|
284 |
+
"output_type": "multiple_choice",
|
285 |
+
"repeats": 1,
|
286 |
+
"should_decontaminate": true,
|
287 |
+
"doc_to_decontamination_query": "goal"
|
288 |
+
},
|
289 |
+
"sciq": {
|
290 |
+
"task": "sciq",
|
291 |
+
"group": [
|
292 |
+
"multiple_choice"
|
293 |
+
],
|
294 |
+
"dataset_path": "sciq",
|
295 |
+
"training_split": "train",
|
296 |
+
"validation_split": "validation",
|
297 |
+
"test_split": "test",
|
298 |
+
"doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:",
|
299 |
+
"doc_to_target": 3,
|
300 |
+
"doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}",
|
301 |
+
"description": "",
|
302 |
+
"target_delimiter": " ",
|
303 |
+
"fewshot_delimiter": "\n\n",
|
304 |
+
"num_fewshot": 5,
|
305 |
+
"metric_list": [
|
306 |
+
{
|
307 |
+
"metric": "acc",
|
308 |
+
"aggregation": "mean",
|
309 |
+
"higher_is_better": true
|
310 |
+
},
|
311 |
+
{
|
312 |
+
"metric": "acc_norm",
|
313 |
+
"aggregation": "mean",
|
314 |
+
"higher_is_better": true
|
315 |
+
}
|
316 |
+
],
|
317 |
+
"output_type": "multiple_choice",
|
318 |
+
"repeats": 1,
|
319 |
+
"should_decontaminate": true,
|
320 |
+
"doc_to_decontamination_query": "{{support}} {{question}}"
|
321 |
+
},
|
322 |
+
"wikitext": {
|
323 |
+
"task": "wikitext",
|
324 |
+
"group": [
|
325 |
+
"perplexity",
|
326 |
+
"loglikelihood_rolling"
|
327 |
+
],
|
328 |
+
"dataset_path": "EleutherAI/wikitext_document_level",
|
329 |
+
"dataset_name": "wikitext-2-raw-v1",
|
330 |
+
"training_split": "train",
|
331 |
+
"validation_split": "validation",
|
332 |
+
"test_split": "test",
|
333 |
+
"doc_to_text": "",
|
334 |
+
"doc_to_target": "<function wikitext_detokenizer at 0x7f9e93ad1120>",
|
335 |
+
"description": "",
|
336 |
+
"target_delimiter": " ",
|
337 |
+
"fewshot_delimiter": "\n\n",
|
338 |
+
"num_fewshot": 5,
|
339 |
+
"metric_list": [
|
340 |
+
{
|
341 |
+
"metric": "word_perplexity"
|
342 |
+
},
|
343 |
+
{
|
344 |
+
"metric": "byte_perplexity"
|
345 |
+
},
|
346 |
+
{
|
347 |
+
"metric": "bits_per_byte"
|
348 |
+
}
|
349 |
+
],
|
350 |
+
"output_type": "loglikelihood_rolling",
|
351 |
+
"repeats": 1,
|
352 |
+
"should_decontaminate": true,
|
353 |
+
"doc_to_decontamination_query": "{{page}}"
|
354 |
+
},
|
355 |
+
"winogrande": {
|
356 |
+
"task": "winogrande",
|
357 |
+
"dataset_path": "winogrande",
|
358 |
+
"dataset_name": "winogrande_xl",
|
359 |
+
"training_split": "train",
|
360 |
+
"validation_split": "validation",
|
361 |
+
"doc_to_text": "<function doc_to_text at 0x7f9e93ad1360>",
|
362 |
+
"doc_to_target": "<function doc_to_target at 0x7f9e93ad16c0>",
|
363 |
+
"doc_to_choice": "<function doc_to_choice at 0x7f9e93ad1a20>",
|
364 |
+
"description": "",
|
365 |
+
"target_delimiter": " ",
|
366 |
+
"fewshot_delimiter": "\n\n",
|
367 |
+
"num_fewshot": 5,
|
368 |
+
"metric_list": [
|
369 |
+
{
|
370 |
+
"metric": "acc",
|
371 |
+
"aggregation": "mean",
|
372 |
+
"higher_is_better": true
|
373 |
+
}
|
374 |
+
],
|
375 |
+
"output_type": "multiple_choice",
|
376 |
+
"repeats": 1,
|
377 |
+
"should_decontaminate": true,
|
378 |
+
"doc_to_decontamination_query": "sentence"
|
379 |
+
}
|
380 |
+
},
|
381 |
+
"versions": {
|
382 |
+
"arc_challenge": "Yaml",
|
383 |
+
"arc_easy": "Yaml",
|
384 |
+
"boolq": "Yaml",
|
385 |
+
"hellaswag": "Yaml",
|
386 |
+
"lambada_openai": "Yaml",
|
387 |
+
"openbookqa": "Yaml",
|
388 |
+
"piqa": "Yaml",
|
389 |
+
"sciq": "Yaml",
|
390 |
+
"wikitext": "Yaml",
|
391 |
+
"winogrande": "Yaml"
|
392 |
+
},
|
393 |
+
"config": {
|
394 |
+
"model": "hf",
|
395 |
+
"model_args": "pretrained=EleutherAI/pythia-12b,parallelize=True",
|
396 |
+
"batch_size": "4",
|
397 |
+
"batch_sizes": [],
|
398 |
+
"device": null,
|
399 |
+
"use_cache": null,
|
400 |
+
"limit": null,
|
401 |
+
"bootstrap_iters": 100000
|
402 |
+
},
|
403 |
+
"git_hash": "d1a44c8"
|
404 |
+
}
|
sft-pythia-12b-0shot-shelloutput.txt
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
bootstrapping for stddev: perplexity
|
2 |
+
hf (pretrained=lomahony/eleuther-pythia12b-hh-sft,parallelize=True), limit: None, num_fewshot: 0, batch_size: 16
|
3 |
+
| Task |Version|Filter| Metric | Value | |Stderr|
|
4 |
+
|--------------|-------|------|---------------|------:|---|-----:|
|
5 |
+
|arc_challenge |Yaml |none |acc | 0.3106|± |0.0135|
|
6 |
+
| | |none |acc_norm | 0.3464|± |0.0139|
|
7 |
+
|arc_easy |Yaml |none |acc | 0.7012|± |0.0094|
|
8 |
+
| | |none |acc_norm | 0.6187|± |0.0100|
|
9 |
+
|boolq |Yaml |none |acc | 0.6954|± |0.0080|
|
10 |
+
|hellaswag |Yaml |none |acc | 0.5056|± |0.0050|
|
11 |
+
| | |none |acc_norm | 0.6668|± |0.0047|
|
12 |
+
|lambada_openai|Yaml |none |perplexity | 3.5325|± |0.0776|
|
13 |
+
| | |none |acc | 0.7105|± |0.0063|
|
14 |
+
|openbookqa |Yaml |none |acc | 0.2760|± |0.0200|
|
15 |
+
| | |none |acc_norm | 0.3820|± |0.0218|
|
16 |
+
|piqa |Yaml |none |acc | 0.7633|± |0.0099|
|
17 |
+
| | |none |acc_norm | 0.7644|± |0.0099|
|
18 |
+
|sciq |Yaml |none |acc | 0.9060|± |0.0092|
|
19 |
+
| | |none |acc_norm | 0.8440|± |0.0115|
|
20 |
+
|wikitext |Yaml |none |word_perplexity|16.5611| | |
|
21 |
+
| | |none |byte_perplexity| 1.5884| | |
|
22 |
+
| | |none |bits_per_byte | 0.6676| | |
|
23 |
+
|winogrande |Yaml |none |acc | 0.6346|± |0.0135|
|
24 |
+
|
sft-pythia-12b-0shot/results.json
ADDED
@@ -0,0 +1,388 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"results": {
|
3 |
+
"arc_challenge": {
|
4 |
+
"acc,none": 0.310580204778157,
|
5 |
+
"acc_stderr,none": 0.013522292098053069,
|
6 |
+
"acc_norm,none": 0.3464163822525597,
|
7 |
+
"acc_norm_stderr,none": 0.013905011180063242
|
8 |
+
},
|
9 |
+
"arc_easy": {
|
10 |
+
"acc,none": 0.7011784511784511,
|
11 |
+
"acc_stderr,none": 0.009392656275408732,
|
12 |
+
"acc_norm,none": 0.6186868686868687,
|
13 |
+
"acc_norm_stderr,none": 0.00996654249717102
|
14 |
+
},
|
15 |
+
"boolq": {
|
16 |
+
"acc,none": 0.6954128440366972,
|
17 |
+
"acc_stderr,none": 0.00804951448892039
|
18 |
+
},
|
19 |
+
"hellaswag": {
|
20 |
+
"acc,none": 0.5055765783708425,
|
21 |
+
"acc_stderr,none": 0.004989471055090951,
|
22 |
+
"acc_norm,none": 0.6667994423421629,
|
23 |
+
"acc_norm_stderr,none": 0.004703942346762236
|
24 |
+
},
|
25 |
+
"lambada_openai": {
|
26 |
+
"perplexity,none": 3.5324751127269423,
|
27 |
+
"perplexity_stderr,none": 0.07759679763422017,
|
28 |
+
"acc,none": 0.7104599262565496,
|
29 |
+
"acc_stderr,none": 0.006318823234213228
|
30 |
+
},
|
31 |
+
"openbookqa": {
|
32 |
+
"acc,none": 0.276,
|
33 |
+
"acc_stderr,none": 0.02001121929807353,
|
34 |
+
"acc_norm,none": 0.382,
|
35 |
+
"acc_norm_stderr,none": 0.021750820591250827
|
36 |
+
},
|
37 |
+
"piqa": {
|
38 |
+
"acc,none": 0.7633297062023939,
|
39 |
+
"acc_stderr,none": 0.009916841655042807,
|
40 |
+
"acc_norm,none": 0.764417845484222,
|
41 |
+
"acc_norm_stderr,none": 0.009901067586473916
|
42 |
+
},
|
43 |
+
"sciq": {
|
44 |
+
"acc,none": 0.906,
|
45 |
+
"acc_stderr,none": 0.009233052000787728,
|
46 |
+
"acc_norm,none": 0.844,
|
47 |
+
"acc_norm_stderr,none": 0.011480235006122356
|
48 |
+
},
|
49 |
+
"wikitext": {
|
50 |
+
"word_perplexity,none": 16.56112447245529,
|
51 |
+
"byte_perplexity,none": 1.588448103407034,
|
52 |
+
"bits_per_byte,none": 0.6676179561638627
|
53 |
+
},
|
54 |
+
"winogrande": {
|
55 |
+
"acc,none": 0.6345698500394633,
|
56 |
+
"acc_stderr,none": 0.013533965097638798
|
57 |
+
}
|
58 |
+
},
|
59 |
+
"configs": {
|
60 |
+
"arc_challenge": {
|
61 |
+
"task": "arc_challenge",
|
62 |
+
"group": [
|
63 |
+
"ai2_arc"
|
64 |
+
],
|
65 |
+
"dataset_path": "ai2_arc",
|
66 |
+
"dataset_name": "ARC-Challenge",
|
67 |
+
"training_split": "train",
|
68 |
+
"validation_split": "validation",
|
69 |
+
"test_split": "test",
|
70 |
+
"doc_to_text": "Question: {{question}}\nAnswer:",
|
71 |
+
"doc_to_target": "{{choices.label.index(answerKey)}}",
|
72 |
+
"doc_to_choice": "{{choices.text}}",
|
73 |
+
"description": "",
|
74 |
+
"target_delimiter": " ",
|
75 |
+
"fewshot_delimiter": "\n\n",
|
76 |
+
"num_fewshot": 0,
|
77 |
+
"metric_list": [
|
78 |
+
{
|
79 |
+
"metric": "acc",
|
80 |
+
"aggregation": "mean",
|
81 |
+
"higher_is_better": true
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"metric": "acc_norm",
|
85 |
+
"aggregation": "mean",
|
86 |
+
"higher_is_better": true
|
87 |
+
}
|
88 |
+
],
|
89 |
+
"output_type": "multiple_choice",
|
90 |
+
"repeats": 1,
|
91 |
+
"should_decontaminate": true,
|
92 |
+
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
|
93 |
+
},
|
94 |
+
"arc_easy": {
|
95 |
+
"task": "arc_easy",
|
96 |
+
"group": [
|
97 |
+
"ai2_arc"
|
98 |
+
],
|
99 |
+
"dataset_path": "ai2_arc",
|
100 |
+
"dataset_name": "ARC-Easy",
|
101 |
+
"training_split": "train",
|
102 |
+
"validation_split": "validation",
|
103 |
+
"test_split": "test",
|
104 |
+
"doc_to_text": "Question: {{question}}\nAnswer:",
|
105 |
+
"doc_to_target": "{{choices.label.index(answerKey)}}",
|
106 |
+
"doc_to_choice": "{{choices.text}}",
|
107 |
+
"description": "",
|
108 |
+
"target_delimiter": " ",
|
109 |
+
"fewshot_delimiter": "\n\n",
|
110 |
+
"num_fewshot": 0,
|
111 |
+
"metric_list": [
|
112 |
+
{
|
113 |
+
"metric": "acc",
|
114 |
+
"aggregation": "mean",
|
115 |
+
"higher_is_better": true
|
116 |
+
},
|
117 |
+
{
|
118 |
+
"metric": "acc_norm",
|
119 |
+
"aggregation": "mean",
|
120 |
+
"higher_is_better": true
|
121 |
+
}
|
122 |
+
],
|
123 |
+
"output_type": "multiple_choice",
|
124 |
+
"repeats": 1,
|
125 |
+
"should_decontaminate": true,
|
126 |
+
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
|
127 |
+
},
|
128 |
+
"boolq": {
|
129 |
+
"task": "boolq",
|
130 |
+
"group": [
|
131 |
+
"super-glue-lm-eval-v1"
|
132 |
+
],
|
133 |
+
"dataset_path": "super_glue",
|
134 |
+
"dataset_name": "boolq",
|
135 |
+
"training_split": "train",
|
136 |
+
"validation_split": "validation",
|
137 |
+
"doc_to_text": "{{passage}}\nQuestion: {{question}}?\nAnswer:",
|
138 |
+
"doc_to_target": "label",
|
139 |
+
"doc_to_choice": [
|
140 |
+
"no",
|
141 |
+
"yes"
|
142 |
+
],
|
143 |
+
"description": "",
|
144 |
+
"target_delimiter": " ",
|
145 |
+
"fewshot_delimiter": "\n\n",
|
146 |
+
"num_fewshot": 0,
|
147 |
+
"metric_list": [
|
148 |
+
{
|
149 |
+
"metric": "acc"
|
150 |
+
}
|
151 |
+
],
|
152 |
+
"output_type": "multiple_choice",
|
153 |
+
"repeats": 1,
|
154 |
+
"should_decontaminate": true,
|
155 |
+
"doc_to_decontamination_query": "passage"
|
156 |
+
},
|
157 |
+
"hellaswag": {
|
158 |
+
"task": "hellaswag",
|
159 |
+
"group": [
|
160 |
+
"multiple_choice"
|
161 |
+
],
|
162 |
+
"dataset_path": "hellaswag",
|
163 |
+
"training_split": "train",
|
164 |
+
"validation_split": "validation",
|
165 |
+
"process_docs": "<function process_docs at 0x7fa0005d55e0>",
|
166 |
+
"doc_to_text": "{{query}}",
|
167 |
+
"doc_to_target": "{{label}}",
|
168 |
+
"doc_to_choice": "{{choices}}",
|
169 |
+
"description": "",
|
170 |
+
"target_delimiter": " ",
|
171 |
+
"fewshot_delimiter": "\n\n",
|
172 |
+
"num_fewshot": 0,
|
173 |
+
"metric_list": [
|
174 |
+
{
|
175 |
+
"metric": "acc",
|
176 |
+
"aggregation": "mean",
|
177 |
+
"higher_is_better": true
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"metric": "acc_norm",
|
181 |
+
"aggregation": "mean",
|
182 |
+
"higher_is_better": true
|
183 |
+
}
|
184 |
+
],
|
185 |
+
"output_type": "multiple_choice",
|
186 |
+
"repeats": 1,
|
187 |
+
"should_decontaminate": false
|
188 |
+
},
|
189 |
+
"lambada_openai": {
|
190 |
+
"task": "lambada_openai",
|
191 |
+
"group": [
|
192 |
+
"lambada"
|
193 |
+
],
|
194 |
+
"dataset_path": "EleutherAI/lambada_openai",
|
195 |
+
"dataset_name": "default",
|
196 |
+
"test_split": "test",
|
197 |
+
"doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
|
198 |
+
"doc_to_target": "{{' '+text.split(' ')[-1]}}",
|
199 |
+
"description": "",
|
200 |
+
"target_delimiter": " ",
|
201 |
+
"fewshot_delimiter": "\n\n",
|
202 |
+
"num_fewshot": 0,
|
203 |
+
"metric_list": [
|
204 |
+
{
|
205 |
+
"metric": "perplexity",
|
206 |
+
"aggregation": "perplexity",
|
207 |
+
"higher_is_better": false
|
208 |
+
},
|
209 |
+
{
|
210 |
+
"metric": "acc",
|
211 |
+
"aggregation": "mean",
|
212 |
+
"higher_is_better": true
|
213 |
+
}
|
214 |
+
],
|
215 |
+
"output_type": "loglikelihood",
|
216 |
+
"repeats": 1,
|
217 |
+
"should_decontaminate": true,
|
218 |
+
"doc_to_decontamination_query": "{{text}}"
|
219 |
+
},
|
220 |
+
"openbookqa": {
|
221 |
+
"task": "openbookqa",
|
222 |
+
"dataset_path": "openbookqa",
|
223 |
+
"dataset_name": "main",
|
224 |
+
"training_split": "train",
|
225 |
+
"validation_split": "validation",
|
226 |
+
"test_split": "test",
|
227 |
+
"doc_to_text": "question_stem",
|
228 |
+
"doc_to_target": "{{choices.label.index(answerKey.lstrip())}}",
|
229 |
+
"doc_to_choice": "{{choices.text}}",
|
230 |
+
"description": "",
|
231 |
+
"target_delimiter": " ",
|
232 |
+
"fewshot_delimiter": "\n\n",
|
233 |
+
"num_fewshot": 0,
|
234 |
+
"metric_list": [
|
235 |
+
{
|
236 |
+
"metric": "acc",
|
237 |
+
"aggregation": "mean",
|
238 |
+
"higher_is_better": true
|
239 |
+
},
|
240 |
+
{
|
241 |
+
"metric": "acc_norm",
|
242 |
+
"aggregation": "mean",
|
243 |
+
"higher_is_better": true
|
244 |
+
}
|
245 |
+
],
|
246 |
+
"output_type": "multiple_choice",
|
247 |
+
"repeats": 1,
|
248 |
+
"should_decontaminate": true,
|
249 |
+
"doc_to_decontamination_query": "question_stem"
|
250 |
+
},
|
251 |
+
"piqa": {
|
252 |
+
"task": "piqa",
|
253 |
+
"dataset_path": "piqa",
|
254 |
+
"training_split": "train",
|
255 |
+
"validation_split": "validation",
|
256 |
+
"doc_to_text": "Question: {{goal}}\nAnswer:",
|
257 |
+
"doc_to_target": "label",
|
258 |
+
"doc_to_choice": "{{[sol1, sol2]}}",
|
259 |
+
"description": "",
|
260 |
+
"target_delimiter": " ",
|
261 |
+
"fewshot_delimiter": "\n\n",
|
262 |
+
"num_fewshot": 0,
|
263 |
+
"metric_list": [
|
264 |
+
{
|
265 |
+
"metric": "acc",
|
266 |
+
"aggregation": "mean",
|
267 |
+
"higher_is_better": true
|
268 |
+
},
|
269 |
+
{
|
270 |
+
"metric": "acc_norm",
|
271 |
+
"aggregation": "mean",
|
272 |
+
"higher_is_better": true
|
273 |
+
}
|
274 |
+
],
|
275 |
+
"output_type": "multiple_choice",
|
276 |
+
"repeats": 1,
|
277 |
+
"should_decontaminate": true,
|
278 |
+
"doc_to_decontamination_query": "goal"
|
279 |
+
},
|
280 |
+
"sciq": {
|
281 |
+
"task": "sciq",
|
282 |
+
"dataset_path": "sciq",
|
283 |
+
"training_split": "train",
|
284 |
+
"validation_split": "validation",
|
285 |
+
"test_split": "test",
|
286 |
+
"doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:",
|
287 |
+
"doc_to_target": 3,
|
288 |
+
"doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}",
|
289 |
+
"description": "",
|
290 |
+
"target_delimiter": " ",
|
291 |
+
"fewshot_delimiter": "\n\n",
|
292 |
+
"num_fewshot": 0,
|
293 |
+
"metric_list": [
|
294 |
+
{
|
295 |
+
"metric": "acc",
|
296 |
+
"aggregation": "mean",
|
297 |
+
"higher_is_better": true
|
298 |
+
},
|
299 |
+
{
|
300 |
+
"metric": "acc_norm",
|
301 |
+
"aggregation": "mean",
|
302 |
+
"higher_is_better": true
|
303 |
+
}
|
304 |
+
],
|
305 |
+
"output_type": "multiple_choice",
|
306 |
+
"repeats": 1,
|
307 |
+
"should_decontaminate": true,
|
308 |
+
"doc_to_decontamination_query": "{{support}} {{question}}"
|
309 |
+
},
|
310 |
+
"wikitext": {
|
311 |
+
"task": "wikitext",
|
312 |
+
"dataset_path": "EleutherAI/wikitext_document_level",
|
313 |
+
"dataset_name": "wikitext-2-raw-v1",
|
314 |
+
"training_split": "train",
|
315 |
+
"validation_split": "validation",
|
316 |
+
"test_split": "test",
|
317 |
+
"doc_to_text": "",
|
318 |
+
"doc_to_target": "<function wikitext_detokenizer at 0x7f9fff5441f0>",
|
319 |
+
"description": "",
|
320 |
+
"target_delimiter": " ",
|
321 |
+
"fewshot_delimiter": "\n\n",
|
322 |
+
"num_fewshot": 0,
|
323 |
+
"metric_list": [
|
324 |
+
{
|
325 |
+
"metric": "word_perplexity"
|
326 |
+
},
|
327 |
+
{
|
328 |
+
"metric": "byte_perplexity"
|
329 |
+
},
|
330 |
+
{
|
331 |
+
"metric": "bits_per_byte"
|
332 |
+
}
|
333 |
+
],
|
334 |
+
"output_type": "loglikelihood_rolling",
|
335 |
+
"repeats": 1,
|
336 |
+
"should_decontaminate": true,
|
337 |
+
"doc_to_decontamination_query": "{{page}}"
|
338 |
+
},
|
339 |
+
"winogrande": {
|
340 |
+
"task": "winogrande",
|
341 |
+
"dataset_path": "winogrande",
|
342 |
+
"dataset_name": "winogrande_xl",
|
343 |
+
"training_split": "train",
|
344 |
+
"validation_split": "validation",
|
345 |
+
"doc_to_text": "<function doc_to_text at 0x7f9fff544430>",
|
346 |
+
"doc_to_target": "<function doc_to_target at 0x7f9fff544790>",
|
347 |
+
"doc_to_choice": "<function doc_to_choice at 0x7f9fff544af0>",
|
348 |
+
"description": "",
|
349 |
+
"target_delimiter": " ",
|
350 |
+
"fewshot_delimiter": "\n\n",
|
351 |
+
"num_fewshot": 0,
|
352 |
+
"metric_list": [
|
353 |
+
{
|
354 |
+
"metric": "acc",
|
355 |
+
"aggregation": "mean",
|
356 |
+
"higher_is_better": true
|
357 |
+
}
|
358 |
+
],
|
359 |
+
"output_type": "multiple_choice",
|
360 |
+
"repeats": 1,
|
361 |
+
"should_decontaminate": true,
|
362 |
+
"doc_to_decontamination_query": "sentence"
|
363 |
+
}
|
364 |
+
},
|
365 |
+
"versions": {
|
366 |
+
"arc_challenge": "Yaml",
|
367 |
+
"arc_easy": "Yaml",
|
368 |
+
"boolq": "Yaml",
|
369 |
+
"hellaswag": "Yaml",
|
370 |
+
"lambada_openai": "Yaml",
|
371 |
+
"openbookqa": "Yaml",
|
372 |
+
"piqa": "Yaml",
|
373 |
+
"sciq": "Yaml",
|
374 |
+
"wikitext": "Yaml",
|
375 |
+
"winogrande": "Yaml"
|
376 |
+
},
|
377 |
+
"config": {
|
378 |
+
"model": "hf",
|
379 |
+
"model_args": "pretrained=lomahony/eleuther-pythia12b-hh-sft,parallelize=True",
|
380 |
+
"batch_size": "16",
|
381 |
+
"batch_sizes": [],
|
382 |
+
"device": null,
|
383 |
+
"use_cache": null,
|
384 |
+
"limit": null,
|
385 |
+
"bootstrap_iters": 100000
|
386 |
+
},
|
387 |
+
"git_hash": "4cda3a1c"
|
388 |
+
}
|
sft-pythia-12b-5shot-shelloutput.txt
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
bootstrapping for stddev: perplexity
|
2 |
+
hf (pretrained=lomahony/eleuther-pythia12b-hh-sft,parallelize=True), limit: None, num_fewshot: 5, batch_size: 8
|
3 |
+
| Task |Version|Filter| Metric | Value | |Stderr|
|
4 |
+
|--------------|-------|------|---------------|------:|---|-----:|
|
5 |
+
|arc_challenge |Yaml |none |acc | 0.3712|± |0.0141|
|
6 |
+
| | |none |acc_norm | 0.4036|± |0.0143|
|
7 |
+
|arc_easy |Yaml |none |acc | 0.7201|± |0.0092|
|
8 |
+
| | |none |acc_norm | 0.7273|± |0.0091|
|
9 |
+
|boolq |Yaml |none |acc | 0.7220|± |0.0078|
|
10 |
+
|hellaswag |Yaml |none |acc | 0.5075|± |0.0050|
|
11 |
+
| | |none |acc_norm | 0.6749|± |0.0047|
|
12 |
+
|lambada_openai|Yaml |none |perplexity | 4.3599|± |0.1008|
|
13 |
+
| | |none |acc | 0.6697|± |0.0066|
|
14 |
+
|openbookqa |Yaml |none |acc | 0.3080|± |0.0207|
|
15 |
+
| | |none |acc_norm | 0.4040|± |0.0220|
|
16 |
+
|piqa |Yaml |none |acc | 0.7677|± |0.0099|
|
17 |
+
| | |none |acc_norm | 0.7704|± |0.0098|
|
18 |
+
|sciq |Yaml |none |acc | 0.9450|± |0.0072|
|
19 |
+
| | |none |acc_norm | 0.9480|± |0.0070|
|
20 |
+
|wikitext |Yaml |none |word_perplexity|16.5611| | |
|
21 |
+
| | |none |byte_perplexity| 1.5884| | |
|
22 |
+
| | |none |bits_per_byte | 0.6676| | |
|
23 |
+
|winogrande |Yaml |none |acc | 0.6346|± |0.0135|
|
24 |
+
|
sft-pythia-12b-5shot/results.json
ADDED
@@ -0,0 +1,388 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"results": {
|
3 |
+
"arc_challenge": {
|
4 |
+
"acc,none": 0.371160409556314,
|
5 |
+
"acc_stderr,none": 0.014117971901142818,
|
6 |
+
"acc_norm,none": 0.4035836177474403,
|
7 |
+
"acc_norm_stderr,none": 0.014337158914268436
|
8 |
+
},
|
9 |
+
"arc_easy": {
|
10 |
+
"acc,none": 0.7201178451178452,
|
11 |
+
"acc_stderr,none": 0.009212077524656533,
|
12 |
+
"acc_norm,none": 0.7272727272727273,
|
13 |
+
"acc_norm_stderr,none": 0.00913863072636423
|
14 |
+
},
|
15 |
+
"boolq": {
|
16 |
+
"acc,none": 0.7220183486238532,
|
17 |
+
"acc_stderr,none": 0.00783564446741566
|
18 |
+
},
|
19 |
+
"hellaswag": {
|
20 |
+
"acc,none": 0.507468631746664,
|
21 |
+
"acc_stderr,none": 0.004989224715784542,
|
22 |
+
"acc_norm,none": 0.6748655646285601,
|
23 |
+
"acc_norm_stderr,none": 0.004674677287148605
|
24 |
+
},
|
25 |
+
"lambada_openai": {
|
26 |
+
"perplexity,none": 4.359872504756906,
|
27 |
+
"perplexity_stderr,none": 0.10082046734843834,
|
28 |
+
"acc,none": 0.6697069668154473,
|
29 |
+
"acc_stderr,none": 0.006552457124918184
|
30 |
+
},
|
31 |
+
"openbookqa": {
|
32 |
+
"acc,none": 0.308,
|
33 |
+
"acc_stderr,none": 0.0206670329874661,
|
34 |
+
"acc_norm,none": 0.404,
|
35 |
+
"acc_norm_stderr,none": 0.02196663529383293
|
36 |
+
},
|
37 |
+
"piqa": {
|
38 |
+
"acc,none": 0.7676822633297062,
|
39 |
+
"acc_stderr,none": 0.009853201384168241,
|
40 |
+
"acc_norm,none": 0.7704026115342764,
|
41 |
+
"acc_norm_stderr,none": 0.009812682950815206
|
42 |
+
},
|
43 |
+
"sciq": {
|
44 |
+
"acc,none": 0.945,
|
45 |
+
"acc_stderr,none": 0.007212976294639239,
|
46 |
+
"acc_norm,none": 0.948,
|
47 |
+
"acc_norm_stderr,none": 0.007024624213817149
|
48 |
+
},
|
49 |
+
"wikitext": {
|
50 |
+
"word_perplexity,none": 16.56112447245529,
|
51 |
+
"byte_perplexity,none": 1.588448103407034,
|
52 |
+
"bits_per_byte,none": 0.6676179561638627
|
53 |
+
},
|
54 |
+
"winogrande": {
|
55 |
+
"acc,none": 0.6345698500394633,
|
56 |
+
"acc_stderr,none": 0.013533965097638798
|
57 |
+
}
|
58 |
+
},
|
59 |
+
"configs": {
|
60 |
+
"arc_challenge": {
|
61 |
+
"task": "arc_challenge",
|
62 |
+
"group": [
|
63 |
+
"ai2_arc"
|
64 |
+
],
|
65 |
+
"dataset_path": "ai2_arc",
|
66 |
+
"dataset_name": "ARC-Challenge",
|
67 |
+
"training_split": "train",
|
68 |
+
"validation_split": "validation",
|
69 |
+
"test_split": "test",
|
70 |
+
"doc_to_text": "Question: {{question}}\nAnswer:",
|
71 |
+
"doc_to_target": "{{choices.label.index(answerKey)}}",
|
72 |
+
"doc_to_choice": "{{choices.text}}",
|
73 |
+
"description": "",
|
74 |
+
"target_delimiter": " ",
|
75 |
+
"fewshot_delimiter": "\n\n",
|
76 |
+
"num_fewshot": 5,
|
77 |
+
"metric_list": [
|
78 |
+
{
|
79 |
+
"metric": "acc",
|
80 |
+
"aggregation": "mean",
|
81 |
+
"higher_is_better": true
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"metric": "acc_norm",
|
85 |
+
"aggregation": "mean",
|
86 |
+
"higher_is_better": true
|
87 |
+
}
|
88 |
+
],
|
89 |
+
"output_type": "multiple_choice",
|
90 |
+
"repeats": 1,
|
91 |
+
"should_decontaminate": true,
|
92 |
+
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
|
93 |
+
},
|
94 |
+
"arc_easy": {
|
95 |
+
"task": "arc_easy",
|
96 |
+
"group": [
|
97 |
+
"ai2_arc"
|
98 |
+
],
|
99 |
+
"dataset_path": "ai2_arc",
|
100 |
+
"dataset_name": "ARC-Easy",
|
101 |
+
"training_split": "train",
|
102 |
+
"validation_split": "validation",
|
103 |
+
"test_split": "test",
|
104 |
+
"doc_to_text": "Question: {{question}}\nAnswer:",
|
105 |
+
"doc_to_target": "{{choices.label.index(answerKey)}}",
|
106 |
+
"doc_to_choice": "{{choices.text}}",
|
107 |
+
"description": "",
|
108 |
+
"target_delimiter": " ",
|
109 |
+
"fewshot_delimiter": "\n\n",
|
110 |
+
"num_fewshot": 5,
|
111 |
+
"metric_list": [
|
112 |
+
{
|
113 |
+
"metric": "acc",
|
114 |
+
"aggregation": "mean",
|
115 |
+
"higher_is_better": true
|
116 |
+
},
|
117 |
+
{
|
118 |
+
"metric": "acc_norm",
|
119 |
+
"aggregation": "mean",
|
120 |
+
"higher_is_better": true
|
121 |
+
}
|
122 |
+
],
|
123 |
+
"output_type": "multiple_choice",
|
124 |
+
"repeats": 1,
|
125 |
+
"should_decontaminate": true,
|
126 |
+
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
|
127 |
+
},
|
128 |
+
"boolq": {
|
129 |
+
"task": "boolq",
|
130 |
+
"group": [
|
131 |
+
"super-glue-lm-eval-v1"
|
132 |
+
],
|
133 |
+
"dataset_path": "super_glue",
|
134 |
+
"dataset_name": "boolq",
|
135 |
+
"training_split": "train",
|
136 |
+
"validation_split": "validation",
|
137 |
+
"doc_to_text": "{{passage}}\nQuestion: {{question}}?\nAnswer:",
|
138 |
+
"doc_to_target": "label",
|
139 |
+
"doc_to_choice": [
|
140 |
+
"no",
|
141 |
+
"yes"
|
142 |
+
],
|
143 |
+
"description": "",
|
144 |
+
"target_delimiter": " ",
|
145 |
+
"fewshot_delimiter": "\n\n",
|
146 |
+
"num_fewshot": 5,
|
147 |
+
"metric_list": [
|
148 |
+
{
|
149 |
+
"metric": "acc"
|
150 |
+
}
|
151 |
+
],
|
152 |
+
"output_type": "multiple_choice",
|
153 |
+
"repeats": 1,
|
154 |
+
"should_decontaminate": true,
|
155 |
+
"doc_to_decontamination_query": "passage"
|
156 |
+
},
|
157 |
+
"hellaswag": {
|
158 |
+
"task": "hellaswag",
|
159 |
+
"group": [
|
160 |
+
"multiple_choice"
|
161 |
+
],
|
162 |
+
"dataset_path": "hellaswag",
|
163 |
+
"training_split": "train",
|
164 |
+
"validation_split": "validation",
|
165 |
+
"process_docs": "<function process_docs at 0x7fb4b75e5700>",
|
166 |
+
"doc_to_text": "{{query}}",
|
167 |
+
"doc_to_target": "{{label}}",
|
168 |
+
"doc_to_choice": "{{choices}}",
|
169 |
+
"description": "",
|
170 |
+
"target_delimiter": " ",
|
171 |
+
"fewshot_delimiter": "\n\n",
|
172 |
+
"num_fewshot": 5,
|
173 |
+
"metric_list": [
|
174 |
+
{
|
175 |
+
"metric": "acc",
|
176 |
+
"aggregation": "mean",
|
177 |
+
"higher_is_better": true
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"metric": "acc_norm",
|
181 |
+
"aggregation": "mean",
|
182 |
+
"higher_is_better": true
|
183 |
+
}
|
184 |
+
],
|
185 |
+
"output_type": "multiple_choice",
|
186 |
+
"repeats": 1,
|
187 |
+
"should_decontaminate": false
|
188 |
+
},
|
189 |
+
"lambada_openai": {
|
190 |
+
"task": "lambada_openai",
|
191 |
+
"group": [
|
192 |
+
"lambada"
|
193 |
+
],
|
194 |
+
"dataset_path": "EleutherAI/lambada_openai",
|
195 |
+
"dataset_name": "default",
|
196 |
+
"test_split": "test",
|
197 |
+
"doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
|
198 |
+
"doc_to_target": "{{' '+text.split(' ')[-1]}}",
|
199 |
+
"description": "",
|
200 |
+
"target_delimiter": " ",
|
201 |
+
"fewshot_delimiter": "\n\n",
|
202 |
+
"num_fewshot": 5,
|
203 |
+
"metric_list": [
|
204 |
+
{
|
205 |
+
"metric": "perplexity",
|
206 |
+
"aggregation": "perplexity",
|
207 |
+
"higher_is_better": false
|
208 |
+
},
|
209 |
+
{
|
210 |
+
"metric": "acc",
|
211 |
+
"aggregation": "mean",
|
212 |
+
"higher_is_better": true
|
213 |
+
}
|
214 |
+
],
|
215 |
+
"output_type": "loglikelihood",
|
216 |
+
"repeats": 1,
|
217 |
+
"should_decontaminate": true,
|
218 |
+
"doc_to_decontamination_query": "{{text}}"
|
219 |
+
},
|
220 |
+
"openbookqa": {
|
221 |
+
"task": "openbookqa",
|
222 |
+
"dataset_path": "openbookqa",
|
223 |
+
"dataset_name": "main",
|
224 |
+
"training_split": "train",
|
225 |
+
"validation_split": "validation",
|
226 |
+
"test_split": "test",
|
227 |
+
"doc_to_text": "question_stem",
|
228 |
+
"doc_to_target": "{{choices.label.index(answerKey.lstrip())}}",
|
229 |
+
"doc_to_choice": "{{choices.text}}",
|
230 |
+
"description": "",
|
231 |
+
"target_delimiter": " ",
|
232 |
+
"fewshot_delimiter": "\n\n",
|
233 |
+
"num_fewshot": 5,
|
234 |
+
"metric_list": [
|
235 |
+
{
|
236 |
+
"metric": "acc",
|
237 |
+
"aggregation": "mean",
|
238 |
+
"higher_is_better": true
|
239 |
+
},
|
240 |
+
{
|
241 |
+
"metric": "acc_norm",
|
242 |
+
"aggregation": "mean",
|
243 |
+
"higher_is_better": true
|
244 |
+
}
|
245 |
+
],
|
246 |
+
"output_type": "multiple_choice",
|
247 |
+
"repeats": 1,
|
248 |
+
"should_decontaminate": true,
|
249 |
+
"doc_to_decontamination_query": "question_stem"
|
250 |
+
},
|
251 |
+
"piqa": {
|
252 |
+
"task": "piqa",
|
253 |
+
"dataset_path": "piqa",
|
254 |
+
"training_split": "train",
|
255 |
+
"validation_split": "validation",
|
256 |
+
"doc_to_text": "Question: {{goal}}\nAnswer:",
|
257 |
+
"doc_to_target": "label",
|
258 |
+
"doc_to_choice": "{{[sol1, sol2]}}",
|
259 |
+
"description": "",
|
260 |
+
"target_delimiter": " ",
|
261 |
+
"fewshot_delimiter": "\n\n",
|
262 |
+
"num_fewshot": 5,
|
263 |
+
"metric_list": [
|
264 |
+
{
|
265 |
+
"metric": "acc",
|
266 |
+
"aggregation": "mean",
|
267 |
+
"higher_is_better": true
|
268 |
+
},
|
269 |
+
{
|
270 |
+
"metric": "acc_norm",
|
271 |
+
"aggregation": "mean",
|
272 |
+
"higher_is_better": true
|
273 |
+
}
|
274 |
+
],
|
275 |
+
"output_type": "multiple_choice",
|
276 |
+
"repeats": 1,
|
277 |
+
"should_decontaminate": true,
|
278 |
+
"doc_to_decontamination_query": "goal"
|
279 |
+
},
|
280 |
+
"sciq": {
|
281 |
+
"task": "sciq",
|
282 |
+
"dataset_path": "sciq",
|
283 |
+
"training_split": "train",
|
284 |
+
"validation_split": "validation",
|
285 |
+
"test_split": "test",
|
286 |
+
"doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:",
|
287 |
+
"doc_to_target": 3,
|
288 |
+
"doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}",
|
289 |
+
"description": "",
|
290 |
+
"target_delimiter": " ",
|
291 |
+
"fewshot_delimiter": "\n\n",
|
292 |
+
"num_fewshot": 5,
|
293 |
+
"metric_list": [
|
294 |
+
{
|
295 |
+
"metric": "acc",
|
296 |
+
"aggregation": "mean",
|
297 |
+
"higher_is_better": true
|
298 |
+
},
|
299 |
+
{
|
300 |
+
"metric": "acc_norm",
|
301 |
+
"aggregation": "mean",
|
302 |
+
"higher_is_better": true
|
303 |
+
}
|
304 |
+
],
|
305 |
+
"output_type": "multiple_choice",
|
306 |
+
"repeats": 1,
|
307 |
+
"should_decontaminate": true,
|
308 |
+
"doc_to_decontamination_query": "{{support}} {{question}}"
|
309 |
+
},
|
310 |
+
"wikitext": {
|
311 |
+
"task": "wikitext",
|
312 |
+
"dataset_path": "EleutherAI/wikitext_document_level",
|
313 |
+
"dataset_name": "wikitext-2-raw-v1",
|
314 |
+
"training_split": "train",
|
315 |
+
"validation_split": "validation",
|
316 |
+
"test_split": "test",
|
317 |
+
"doc_to_text": "",
|
318 |
+
"doc_to_target": "<function wikitext_detokenizer at 0x7fb4b654f310>",
|
319 |
+
"description": "",
|
320 |
+
"target_delimiter": " ",
|
321 |
+
"fewshot_delimiter": "\n\n",
|
322 |
+
"num_fewshot": 5,
|
323 |
+
"metric_list": [
|
324 |
+
{
|
325 |
+
"metric": "word_perplexity"
|
326 |
+
},
|
327 |
+
{
|
328 |
+
"metric": "byte_perplexity"
|
329 |
+
},
|
330 |
+
{
|
331 |
+
"metric": "bits_per_byte"
|
332 |
+
}
|
333 |
+
],
|
334 |
+
"output_type": "loglikelihood_rolling",
|
335 |
+
"repeats": 1,
|
336 |
+
"should_decontaminate": true,
|
337 |
+
"doc_to_decontamination_query": "{{page}}"
|
338 |
+
},
|
339 |
+
"winogrande": {
|
340 |
+
"task": "winogrande",
|
341 |
+
"dataset_path": "winogrande",
|
342 |
+
"dataset_name": "winogrande_xl",
|
343 |
+
"training_split": "train",
|
344 |
+
"validation_split": "validation",
|
345 |
+
"doc_to_text": "<function doc_to_text at 0x7fb4b654f550>",
|
346 |
+
"doc_to_target": "<function doc_to_target at 0x7fb4b654f8b0>",
|
347 |
+
"doc_to_choice": "<function doc_to_choice at 0x7fb4b654fc10>",
|
348 |
+
"description": "",
|
349 |
+
"target_delimiter": " ",
|
350 |
+
"fewshot_delimiter": "\n\n",
|
351 |
+
"num_fewshot": 5,
|
352 |
+
"metric_list": [
|
353 |
+
{
|
354 |
+
"metric": "acc",
|
355 |
+
"aggregation": "mean",
|
356 |
+
"higher_is_better": true
|
357 |
+
}
|
358 |
+
],
|
359 |
+
"output_type": "multiple_choice",
|
360 |
+
"repeats": 1,
|
361 |
+
"should_decontaminate": true,
|
362 |
+
"doc_to_decontamination_query": "sentence"
|
363 |
+
}
|
364 |
+
},
|
365 |
+
"versions": {
|
366 |
+
"arc_challenge": "Yaml",
|
367 |
+
"arc_easy": "Yaml",
|
368 |
+
"boolq": "Yaml",
|
369 |
+
"hellaswag": "Yaml",
|
370 |
+
"lambada_openai": "Yaml",
|
371 |
+
"openbookqa": "Yaml",
|
372 |
+
"piqa": "Yaml",
|
373 |
+
"sciq": "Yaml",
|
374 |
+
"wikitext": "Yaml",
|
375 |
+
"winogrande": "Yaml"
|
376 |
+
},
|
377 |
+
"config": {
|
378 |
+
"model": "hf",
|
379 |
+
"model_args": "pretrained=lomahony/eleuther-pythia12b-hh-sft,parallelize=True",
|
380 |
+
"batch_size": "8",
|
381 |
+
"batch_sizes": [],
|
382 |
+
"device": null,
|
383 |
+
"use_cache": null,
|
384 |
+
"limit": null,
|
385 |
+
"bootstrap_iters": 100000
|
386 |
+
},
|
387 |
+
"git_hash": "4cda3a1c"
|
388 |
+
}
|