Josephgflowers commited on
Commit
e2e36fe
1 Parent(s): 3d16397

Training in progress, step 12822

Browse files
README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: Josephgflowers/TinyLlama-Cinder-Math-Train
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: TinyLlama-Cinder-Tiny-Agent
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # TinyLlama-Cinder-Tiny-Agent
15
+
16
+ This model is a fine-tuned version of [Josephgflowers/TinyLlama-Cinder-Math-Train](https://huggingface.co/Josephgflowers/TinyLlama-Cinder-Math-Train) on an unknown dataset.
17
+
18
+ ## Model description
19
+
20
+ More information needed
21
+
22
+ ## Intended uses & limitations
23
+
24
+ More information needed
25
+
26
+ ## Training and evaluation data
27
+
28
+ More information needed
29
+
30
+ ## Training procedure
31
+
32
+ ### Training hyperparameters
33
+
34
+ The following hyperparameters were used during training:
35
+ - learning_rate: 5e-05
36
+ - train_batch_size: 12
37
+ - eval_batch_size: 32
38
+ - seed: 42
39
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
40
+ - lr_scheduler_type: linear
41
+ - num_epochs: 1.0
42
+ - mixed_precision_training: Native AMP
43
+
44
+ ### Training results
45
+
46
+
47
+
48
+ ### Framework versions
49
+
50
+ - Transformers 4.41.0.dev0
51
+ - Pytorch 2.2.2+cu121
52
+ - Datasets 2.19.1
53
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 8.2628573134848e+17,
4
+ "train_loss": 0.7277259675255335,
5
+ "train_runtime": 75113.2183,
6
+ "train_samples": 65000,
7
+ "train_samples_per_second": 0.865,
8
+ "train_steps_per_second": 0.072
9
+ }
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Josephgflowers/TinyLlama-Cinder-Tiny-Agent",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 2048,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5632,
14
+ "max_position_embeddings": 2048,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 22,
19
+ "num_key_value_heads": 4,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.41.0.dev0",
27
+ "use_cache": false,
28
+ "vocab_size": 32000
29
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "eos_token_id": 2,
4
+ "max_length": 2048,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.41.0.dev0"
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c86cd1858d9891e7cd52ccb63a868d208a7a0731b84df044c8506a2f0a7e1fb
3
+ size 4400216536
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "bos_token": "<s>",
31
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "model_max_length": 2048,
35
+ "pad_token": "</s>",
36
+ "padding_side": "right",
37
+ "sp_model_kwargs": {},
38
+ "tokenizer_class": "LlamaTokenizer",
39
+ "unk_token": "<unk>",
40
+ "use_default_system_prompt": false
41
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 8.2628573134848e+17,
4
+ "train_loss": 0.7277259675255335,
5
+ "train_runtime": 75113.2183,
6
+ "train_samples": 65000,
7
+ "train_samples_per_second": 0.865,
8
+ "train_steps_per_second": 0.072
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 5417,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.018460402436773122,
13
+ "grad_norm": 72293.6015625,
14
+ "learning_rate": 4.907697987816135e-05,
15
+ "loss": 0.7984,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.036920804873546244,
20
+ "grad_norm": 86618.71875,
21
+ "learning_rate": 4.815395975632269e-05,
22
+ "loss": 0.7874,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 0.05538120731031936,
27
+ "grad_norm": 82291.1953125,
28
+ "learning_rate": 4.723093963448403e-05,
29
+ "loss": 0.7683,
30
+ "step": 300
31
+ },
32
+ {
33
+ "epoch": 0.07384160974709249,
34
+ "grad_norm": 72406.7265625,
35
+ "learning_rate": 4.6307919512645376e-05,
36
+ "loss": 0.7721,
37
+ "step": 400
38
+ },
39
+ {
40
+ "epoch": 0.0923020121838656,
41
+ "grad_norm": 82859.0,
42
+ "learning_rate": 4.538489939080672e-05,
43
+ "loss": 0.7672,
44
+ "step": 500
45
+ },
46
+ {
47
+ "epoch": 0.11076241462063872,
48
+ "grad_norm": 72674.453125,
49
+ "learning_rate": 4.4461879268968066e-05,
50
+ "loss": 0.7741,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 0.12922281705741184,
55
+ "grad_norm": 74598.8828125,
56
+ "learning_rate": 4.3538859147129405e-05,
57
+ "loss": 0.7684,
58
+ "step": 700
59
+ },
60
+ {
61
+ "epoch": 0.14768321949418498,
62
+ "grad_norm": 75325.09375,
63
+ "learning_rate": 4.261583902529076e-05,
64
+ "loss": 0.7706,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 0.1661436219309581,
69
+ "grad_norm": 80050.4921875,
70
+ "learning_rate": 4.1692818903452095e-05,
71
+ "loss": 0.771,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 0.1846040243677312,
76
+ "grad_norm": 71205.3359375,
77
+ "learning_rate": 4.076979878161344e-05,
78
+ "loss": 0.7458,
79
+ "step": 1000
80
+ },
81
+ {
82
+ "epoch": 0.20306442680450434,
83
+ "grad_norm": 71098.4765625,
84
+ "learning_rate": 3.9846778659774785e-05,
85
+ "loss": 0.7632,
86
+ "step": 1100
87
+ },
88
+ {
89
+ "epoch": 0.22152482924127745,
90
+ "grad_norm": 79350.5859375,
91
+ "learning_rate": 3.892375853793613e-05,
92
+ "loss": 0.7715,
93
+ "step": 1200
94
+ },
95
+ {
96
+ "epoch": 0.23998523167805058,
97
+ "grad_norm": 78053.2578125,
98
+ "learning_rate": 3.800073841609747e-05,
99
+ "loss": 0.7582,
100
+ "step": 1300
101
+ },
102
+ {
103
+ "epoch": 0.2584456341148237,
104
+ "grad_norm": 72609.0078125,
105
+ "learning_rate": 3.707771829425882e-05,
106
+ "loss": 0.7547,
107
+ "step": 1400
108
+ },
109
+ {
110
+ "epoch": 0.27690603655159685,
111
+ "grad_norm": 75624.9453125,
112
+ "learning_rate": 3.615469817242016e-05,
113
+ "loss": 0.7432,
114
+ "step": 1500
115
+ },
116
+ {
117
+ "epoch": 0.29536643898836995,
118
+ "grad_norm": 67118.828125,
119
+ "learning_rate": 3.5231678050581504e-05,
120
+ "loss": 0.7522,
121
+ "step": 1600
122
+ },
123
+ {
124
+ "epoch": 0.31382684142514305,
125
+ "grad_norm": 77685.6171875,
126
+ "learning_rate": 3.430865792874285e-05,
127
+ "loss": 0.7529,
128
+ "step": 1700
129
+ },
130
+ {
131
+ "epoch": 0.3322872438619162,
132
+ "grad_norm": 80151.4609375,
133
+ "learning_rate": 3.338563780690419e-05,
134
+ "loss": 0.7558,
135
+ "step": 1800
136
+ },
137
+ {
138
+ "epoch": 0.3507476462986893,
139
+ "grad_norm": 68374.609375,
140
+ "learning_rate": 3.246261768506554e-05,
141
+ "loss": 0.7547,
142
+ "step": 1900
143
+ },
144
+ {
145
+ "epoch": 0.3692080487354624,
146
+ "grad_norm": 69225.65625,
147
+ "learning_rate": 3.153959756322688e-05,
148
+ "loss": 0.7283,
149
+ "step": 2000
150
+ },
151
+ {
152
+ "epoch": 0.38766845117223553,
153
+ "grad_norm": 93264.3984375,
154
+ "learning_rate": 3.061657744138822e-05,
155
+ "loss": 0.7368,
156
+ "step": 2100
157
+ },
158
+ {
159
+ "epoch": 0.4061288536090087,
160
+ "grad_norm": 81487.3125,
161
+ "learning_rate": 2.9693557319549568e-05,
162
+ "loss": 0.7531,
163
+ "step": 2200
164
+ },
165
+ {
166
+ "epoch": 0.4245892560457818,
167
+ "grad_norm": 72823.8125,
168
+ "learning_rate": 2.8770537197710913e-05,
169
+ "loss": 0.7216,
170
+ "step": 2300
171
+ },
172
+ {
173
+ "epoch": 0.4430496584825549,
174
+ "grad_norm": 73118.8203125,
175
+ "learning_rate": 2.7847517075872255e-05,
176
+ "loss": 0.7356,
177
+ "step": 2400
178
+ },
179
+ {
180
+ "epoch": 0.46151006091932806,
181
+ "grad_norm": 68678.484375,
182
+ "learning_rate": 2.69244969540336e-05,
183
+ "loss": 0.7308,
184
+ "step": 2500
185
+ },
186
+ {
187
+ "epoch": 0.47997046335610116,
188
+ "grad_norm": 73918.640625,
189
+ "learning_rate": 2.6001476832194942e-05,
190
+ "loss": 0.7235,
191
+ "step": 2600
192
+ },
193
+ {
194
+ "epoch": 0.49843086579287427,
195
+ "grad_norm": 66172.796875,
196
+ "learning_rate": 2.5078456710356284e-05,
197
+ "loss": 0.7283,
198
+ "step": 2700
199
+ },
200
+ {
201
+ "epoch": 0.5168912682296474,
202
+ "grad_norm": 66544.1015625,
203
+ "learning_rate": 2.415543658851763e-05,
204
+ "loss": 0.7228,
205
+ "step": 2800
206
+ },
207
+ {
208
+ "epoch": 0.5353516706664205,
209
+ "grad_norm": 66939.625,
210
+ "learning_rate": 2.3232416466678974e-05,
211
+ "loss": 0.7095,
212
+ "step": 2900
213
+ },
214
+ {
215
+ "epoch": 0.5538120731031937,
216
+ "grad_norm": 67865.15625,
217
+ "learning_rate": 2.230939634484032e-05,
218
+ "loss": 0.7248,
219
+ "step": 3000
220
+ },
221
+ {
222
+ "epoch": 0.5722724755399667,
223
+ "grad_norm": 63117.76953125,
224
+ "learning_rate": 2.138637622300166e-05,
225
+ "loss": 0.7147,
226
+ "step": 3100
227
+ },
228
+ {
229
+ "epoch": 0.5907328779767399,
230
+ "grad_norm": 59398.1796875,
231
+ "learning_rate": 2.0463356101163006e-05,
232
+ "loss": 0.7031,
233
+ "step": 3200
234
+ },
235
+ {
236
+ "epoch": 0.6091932804135131,
237
+ "grad_norm": 63816.08203125,
238
+ "learning_rate": 1.954033597932435e-05,
239
+ "loss": 0.7144,
240
+ "step": 3300
241
+ },
242
+ {
243
+ "epoch": 0.6276536828502861,
244
+ "grad_norm": 70370.5625,
245
+ "learning_rate": 1.8617315857485696e-05,
246
+ "loss": 0.7141,
247
+ "step": 3400
248
+ },
249
+ {
250
+ "epoch": 0.6461140852870593,
251
+ "grad_norm": 63610.734375,
252
+ "learning_rate": 1.7694295735647038e-05,
253
+ "loss": 0.694,
254
+ "step": 3500
255
+ },
256
+ {
257
+ "epoch": 0.6645744877238324,
258
+ "grad_norm": 59664.05078125,
259
+ "learning_rate": 1.6771275613808383e-05,
260
+ "loss": 0.7086,
261
+ "step": 3600
262
+ },
263
+ {
264
+ "epoch": 0.6830348901606055,
265
+ "grad_norm": 64207.88671875,
266
+ "learning_rate": 1.5848255491969728e-05,
267
+ "loss": 0.7138,
268
+ "step": 3700
269
+ },
270
+ {
271
+ "epoch": 0.7014952925973786,
272
+ "grad_norm": 79748.2734375,
273
+ "learning_rate": 1.4925235370131068e-05,
274
+ "loss": 0.6985,
275
+ "step": 3800
276
+ },
277
+ {
278
+ "epoch": 0.7199556950341518,
279
+ "grad_norm": 72216.671875,
280
+ "learning_rate": 1.4002215248292413e-05,
281
+ "loss": 0.7019,
282
+ "step": 3900
283
+ },
284
+ {
285
+ "epoch": 0.7384160974709248,
286
+ "grad_norm": 74815.6328125,
287
+ "learning_rate": 1.3079195126453758e-05,
288
+ "loss": 0.687,
289
+ "step": 4000
290
+ },
291
+ {
292
+ "epoch": 0.756876499907698,
293
+ "grad_norm": 61482.41796875,
294
+ "learning_rate": 1.2156175004615102e-05,
295
+ "loss": 0.6983,
296
+ "step": 4100
297
+ },
298
+ {
299
+ "epoch": 0.7753369023444711,
300
+ "grad_norm": 73800.75,
301
+ "learning_rate": 1.1233154882776445e-05,
302
+ "loss": 0.7004,
303
+ "step": 4200
304
+ },
305
+ {
306
+ "epoch": 0.7937973047812442,
307
+ "grad_norm": 75985.421875,
308
+ "learning_rate": 1.0310134760937789e-05,
309
+ "loss": 0.7097,
310
+ "step": 4300
311
+ },
312
+ {
313
+ "epoch": 0.8122577072180174,
314
+ "grad_norm": 67175.03125,
315
+ "learning_rate": 9.387114639099132e-06,
316
+ "loss": 0.6781,
317
+ "step": 4400
318
+ },
319
+ {
320
+ "epoch": 0.8307181096547904,
321
+ "grad_norm": 68520.875,
322
+ "learning_rate": 8.464094517260476e-06,
323
+ "loss": 0.6857,
324
+ "step": 4500
325
+ },
326
+ {
327
+ "epoch": 0.8491785120915636,
328
+ "grad_norm": 65612.703125,
329
+ "learning_rate": 7.541074395421821e-06,
330
+ "loss": 0.6971,
331
+ "step": 4600
332
+ },
333
+ {
334
+ "epoch": 0.8676389145283367,
335
+ "grad_norm": 63280.83203125,
336
+ "learning_rate": 6.618054273583164e-06,
337
+ "loss": 0.6942,
338
+ "step": 4700
339
+ },
340
+ {
341
+ "epoch": 0.8860993169651098,
342
+ "grad_norm": 68478.2890625,
343
+ "learning_rate": 5.6950341517445085e-06,
344
+ "loss": 0.7028,
345
+ "step": 4800
346
+ },
347
+ {
348
+ "epoch": 0.904559719401883,
349
+ "grad_norm": 68026.8125,
350
+ "learning_rate": 4.772014029905853e-06,
351
+ "loss": 0.6694,
352
+ "step": 4900
353
+ },
354
+ {
355
+ "epoch": 0.9230201218386561,
356
+ "grad_norm": 64797.25390625,
357
+ "learning_rate": 3.848993908067195e-06,
358
+ "loss": 0.704,
359
+ "step": 5000
360
+ },
361
+ {
362
+ "epoch": 0.9414805242754292,
363
+ "grad_norm": 71872.890625,
364
+ "learning_rate": 2.9259737862285397e-06,
365
+ "loss": 0.6946,
366
+ "step": 5100
367
+ },
368
+ {
369
+ "epoch": 0.9599409267122023,
370
+ "grad_norm": 74217.3125,
371
+ "learning_rate": 2.002953664389884e-06,
372
+ "loss": 0.6796,
373
+ "step": 5200
374
+ },
375
+ {
376
+ "epoch": 0.9784013291489755,
377
+ "grad_norm": 68286.15625,
378
+ "learning_rate": 1.0799335425512278e-06,
379
+ "loss": 0.6966,
380
+ "step": 5300
381
+ },
382
+ {
383
+ "epoch": 0.9968617315857485,
384
+ "grad_norm": 137991.28125,
385
+ "learning_rate": 1.5691342071257153e-07,
386
+ "loss": 0.7014,
387
+ "step": 5400
388
+ },
389
+ {
390
+ "epoch": 1.0,
391
+ "step": 5417,
392
+ "total_flos": 8.2628573134848e+17,
393
+ "train_loss": 0.7277259675255335,
394
+ "train_runtime": 75113.2183,
395
+ "train_samples_per_second": 0.865,
396
+ "train_steps_per_second": 0.072
397
+ }
398
+ ],
399
+ "logging_steps": 100,
400
+ "max_steps": 5417,
401
+ "num_input_tokens_seen": 0,
402
+ "num_train_epochs": 1,
403
+ "save_steps": 5417,
404
+ "stateful_callbacks": {
405
+ "TrainerControl": {
406
+ "args": {
407
+ "should_epoch_stop": false,
408
+ "should_evaluate": false,
409
+ "should_log": false,
410
+ "should_save": true,
411
+ "should_training_stop": true
412
+ },
413
+ "attributes": {}
414
+ }
415
+ },
416
+ "total_flos": 8.2628573134848e+17,
417
+ "train_batch_size": 12,
418
+ "trial_name": null,
419
+ "trial_params": null
420
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c44c38eb09a6b2599756766edbc167c8760d5ab86cbaedecff96098292537cf
3
+ size 5112