Centaur31 commited on
Commit
4b33e25
1 Parent(s): 12a7b2c

Upload 14 files

Browse files
README.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Used [run.sh](https://huggingface.co/madlag/bert-large-uncased-whole-word-masking-finetuned-squadv2/blob/main/run.sh) used to train using transformers/example/question_answering code.
2
+
3
+ Evaluation results : F1= 85.85 , a much better result than the original 81.9 from the BERT paper, due to the use of the "whole-word-masking" variation.
4
+ ```
5
+ {
6
+ "HasAns_exact": 80.58367071524967,
7
+ "HasAns_f1": 86.64594807945029,
8
+ "HasAns_total": 5928,
9
+ "NoAns_exact": 85.06307821698907,
10
+ "NoAns_f1": 85.06307821698907,
11
+ "NoAns_total": 5945,
12
+ "best_exact": 82.82658131895899,
13
+ "best_exact_thresh": 0.0,
14
+ "best_f1": 85.85337995578023,
15
+ "best_f1_thresh": 0.0,
16
+ "epoch": 2.0,
17
+ "eval_samples": 12134,
18
+ "exact": 82.82658131895899,
19
+ "f1": 85.85337995578037,
20
+ "total": 11873
21
+ }
22
+ ```
all_results.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "HasAns_exact": 80.58367071524967,
3
+ "HasAns_f1": 86.64594807945029,
4
+ "HasAns_total": 5928,
5
+ "NoAns_exact": 85.06307821698907,
6
+ "NoAns_f1": 85.06307821698907,
7
+ "NoAns_total": 5945,
8
+ "best_exact": 82.82658131895899,
9
+ "best_exact_thresh": 0.0,
10
+ "best_f1": 85.85337995578023,
11
+ "best_f1_thresh": 0.0,
12
+ "epoch": 2.0,
13
+ "eval_samples": 12134,
14
+ "exact": 82.82658131895899,
15
+ "f1": 85.85337995578037,
16
+ "init_mem_cpu_alloc_delta": -231366656,
17
+ "init_mem_cpu_peaked_delta": 1333211136,
18
+ "init_mem_gpu_alloc_delta": 1337192960,
19
+ "init_mem_gpu_peaked_delta": 0,
20
+ "total": 11873,
21
+ "train_mem_cpu_alloc_delta": 887136256,
22
+ "train_mem_cpu_peaked_delta": 380289024,
23
+ "train_mem_gpu_alloc_delta": 4029973504,
24
+ "train_mem_gpu_peaked_delta": 16453983232,
25
+ "train_runtime": 13072.0089,
26
+ "train_samples": 131754,
27
+ "train_samples_per_second": 1.68
28
+ }
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-large-uncased-whole-word-masking",
3
+ "architectures": [
4
+ "BertForQuestionAnswering"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4096,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 16,
17
+ "num_hidden_layers": 24,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "transformers_version": "4.5.1",
21
+ "type_vocab_size": 2,
22
+ "use_cache": true,
23
+ "vocab_size": 30522
24
+ }
eval_results.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "HasAns_exact": 80.58367071524967,
3
+ "HasAns_f1": 86.64594807945029,
4
+ "HasAns_total": 5928,
5
+ "NoAns_exact": 85.06307821698907,
6
+ "NoAns_f1": 85.06307821698907,
7
+ "NoAns_total": 5945,
8
+ "best_exact": 82.82658131895899,
9
+ "best_exact_thresh": 0.0,
10
+ "best_f1": 85.85337995578023,
11
+ "best_f1_thresh": 0.0,
12
+ "epoch": 2.0,
13
+ "eval_samples": 12134,
14
+ "exact": 82.82658131895899,
15
+ "f1": 85.85337995578037,
16
+ "total": 11873
17
+ }
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:086eb7284497813d2fa0e20c058a2bed213eaf3149765c9ce85ba4012bc02d60
3
+ size 1336391324
gitattributes.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.arrow filter=lfs diff=lfs merge=lfs -text
10
+ *.ftz filter=lfs diff=lfs merge=lfs -text
11
+ *.joblib filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.pb filter=lfs diff=lfs merge=lfs -text
15
+ *.pt filter=lfs diff=lfs merge=lfs -text
16
+ *.pth filter=lfs diff=lfs merge=lfs -text
17
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afd0115a49f1d98662f68862670f761a35a905346a6cc82c645ba29a952c9bb2
3
+ size 1336547639
run.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python run_qa.py \
2
+ --model_name_or_path bert-large-uncased-whole-word-masking \
3
+ --dataset_name squad_v2 \
4
+ --do_train \
5
+ --do_eval \
6
+ --save_steps 2500 \
7
+ --eval_steps 2500 \
8
+ --evaluation_strategy steps \
9
+ --per_device_train_batch_size 12 \
10
+ --learning_rate 3e-5 \
11
+ --num_train_epochs 2 \
12
+ --max_seq_length 384 \
13
+ --doc_stride 128 \
14
+ --output_dir /data_2to/devel_data/nn_pruning/output/teacher/squadv2-large-wwm \
15
+ --version_2_with_negative 1
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-large-uncased-whole-word-masking"}
train_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "init_mem_cpu_alloc_delta": -231366656,
4
+ "init_mem_cpu_peaked_delta": 1333211136,
5
+ "init_mem_gpu_alloc_delta": 1337192960,
6
+ "init_mem_gpu_peaked_delta": 0,
7
+ "train_mem_cpu_alloc_delta": 887136256,
8
+ "train_mem_cpu_peaked_delta": 380289024,
9
+ "train_mem_gpu_alloc_delta": 4029973504,
10
+ "train_mem_gpu_peaked_delta": 16453983232,
11
+ "train_runtime": 13072.0089,
12
+ "train_samples": 131754,
13
+ "train_samples_per_second": 1.68
14
+ }
trainer_state.json ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
+ "global_step": 21960,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.05,
12
+ "learning_rate": 2.9316939890710385e-05,
13
+ "loss": 2.1044,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.09,
18
+ "learning_rate": 2.8633879781420765e-05,
19
+ "loss": 1.5614,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.14,
24
+ "learning_rate": 2.795081967213115e-05,
25
+ "loss": 1.4288,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 0.18,
30
+ "learning_rate": 2.7267759562841533e-05,
31
+ "loss": 1.3095,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.23,
36
+ "learning_rate": 2.6584699453551913e-05,
37
+ "loss": 1.2225,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "HasAns_exact": 71.89608636977059,
42
+ "HasAns_f1": 78.04168619043323,
43
+ "HasAns_total": 5928,
44
+ "NoAns_exact": 82.62405382674517,
45
+ "NoAns_f1": 82.62405382674517,
46
+ "NoAns_total": 5945,
47
+ "best_exact": 77.27617282910806,
48
+ "best_exact_thresh": 0.0,
49
+ "best_f1": 80.34457304277659,
50
+ "best_f1_thresh": 0.0,
51
+ "epoch": 0.23,
52
+ "exact": 77.26775035795502,
53
+ "f1": 80.33615057162366,
54
+ "step": 2500,
55
+ "total": 11873
56
+ },
57
+ {
58
+ "epoch": 0.27,
59
+ "learning_rate": 2.5901639344262294e-05,
60
+ "loss": 1.2062,
61
+ "step": 3000
62
+ },
63
+ {
64
+ "epoch": 0.32,
65
+ "learning_rate": 2.5218579234972678e-05,
66
+ "loss": 1.2019,
67
+ "step": 3500
68
+ },
69
+ {
70
+ "epoch": 0.36,
71
+ "learning_rate": 2.453551912568306e-05,
72
+ "loss": 1.1856,
73
+ "step": 4000
74
+ },
75
+ {
76
+ "epoch": 0.41,
77
+ "learning_rate": 2.3852459016393442e-05,
78
+ "loss": 1.1677,
79
+ "step": 4500
80
+ },
81
+ {
82
+ "epoch": 0.46,
83
+ "learning_rate": 2.3169398907103826e-05,
84
+ "loss": 1.1343,
85
+ "step": 5000
86
+ },
87
+ {
88
+ "HasAns_exact": 74.44331983805668,
89
+ "HasAns_f1": 80.60592872513861,
90
+ "HasAns_total": 5928,
91
+ "NoAns_exact": 83.65012615643398,
92
+ "NoAns_f1": 83.65012615643398,
93
+ "NoAns_total": 5945,
94
+ "best_exact": 79.05331424239871,
95
+ "best_exact_thresh": 0.0,
96
+ "best_f1": 82.13020681231532,
97
+ "best_f1_thresh": 0.0,
98
+ "epoch": 0.46,
99
+ "exact": 79.05331424239871,
100
+ "f1": 82.13020681231545,
101
+ "step": 5000,
102
+ "total": 11873
103
+ },
104
+ {
105
+ "epoch": 0.5,
106
+ "learning_rate": 2.248633879781421e-05,
107
+ "loss": 1.1489,
108
+ "step": 5500
109
+ },
110
+ {
111
+ "epoch": 0.55,
112
+ "learning_rate": 2.180327868852459e-05,
113
+ "loss": 1.1349,
114
+ "step": 6000
115
+ },
116
+ {
117
+ "epoch": 0.59,
118
+ "learning_rate": 2.1120218579234974e-05,
119
+ "loss": 1.1156,
120
+ "step": 6500
121
+ },
122
+ {
123
+ "epoch": 0.64,
124
+ "learning_rate": 2.0437158469945358e-05,
125
+ "loss": 1.1066,
126
+ "step": 7000
127
+ },
128
+ {
129
+ "epoch": 0.68,
130
+ "learning_rate": 1.975409836065574e-05,
131
+ "loss": 1.0877,
132
+ "step": 7500
133
+ },
134
+ {
135
+ "HasAns_exact": 80.61740890688259,
136
+ "HasAns_f1": 87.36413644383099,
137
+ "HasAns_total": 5928,
138
+ "NoAns_exact": 72.31286795626578,
139
+ "NoAns_f1": 72.31286795626578,
140
+ "NoAns_total": 5945,
141
+ "best_exact": 76.45919312726353,
142
+ "best_exact_thresh": 0.0,
143
+ "best_f1": 79.82772684570281,
144
+ "best_f1_thresh": 0.0,
145
+ "epoch": 0.68,
146
+ "exact": 76.45919312726353,
147
+ "f1": 79.82772684570291,
148
+ "step": 7500,
149
+ "total": 11873
150
+ },
151
+ {
152
+ "epoch": 0.73,
153
+ "learning_rate": 1.907103825136612e-05,
154
+ "loss": 1.0836,
155
+ "step": 8000
156
+ },
157
+ {
158
+ "epoch": 0.77,
159
+ "learning_rate": 1.8387978142076503e-05,
160
+ "loss": 1.0793,
161
+ "step": 8500
162
+ },
163
+ {
164
+ "epoch": 0.82,
165
+ "learning_rate": 1.7704918032786887e-05,
166
+ "loss": 1.0722,
167
+ "step": 9000
168
+ },
169
+ {
170
+ "epoch": 0.87,
171
+ "learning_rate": 1.7021857923497267e-05,
172
+ "loss": 1.0497,
173
+ "step": 9500
174
+ },
175
+ {
176
+ "epoch": 0.91,
177
+ "learning_rate": 1.633879781420765e-05,
178
+ "loss": 0.8443,
179
+ "step": 10000
180
+ },
181
+ {
182
+ "HasAns_exact": 77.7327935222672,
183
+ "HasAns_f1": 84.30659896226393,
184
+ "HasAns_total": 5928,
185
+ "NoAns_exact": 81.19428090832632,
186
+ "NoAns_f1": 81.19428090832632,
187
+ "NoAns_total": 5945,
188
+ "best_exact": 79.4660153288975,
189
+ "best_exact_thresh": 0.0,
190
+ "best_f1": 82.7482117955276,
191
+ "best_f1_thresh": 0.0,
192
+ "epoch": 0.91,
193
+ "exact": 79.4660153288975,
194
+ "f1": 82.74821179552765,
195
+ "step": 10000,
196
+ "total": 11873
197
+ },
198
+ {
199
+ "epoch": 0.96,
200
+ "learning_rate": 1.5655737704918035e-05,
201
+ "loss": 0.7997,
202
+ "step": 10500
203
+ },
204
+ {
205
+ "epoch": 1.0,
206
+ "learning_rate": 1.4972677595628415e-05,
207
+ "loss": 0.7642,
208
+ "step": 11000
209
+ },
210
+ {
211
+ "epoch": 1.05,
212
+ "learning_rate": 1.4289617486338798e-05,
213
+ "loss": 0.4993,
214
+ "step": 11500
215
+ },
216
+ {
217
+ "epoch": 1.09,
218
+ "learning_rate": 1.3606557377049181e-05,
219
+ "loss": 0.4945,
220
+ "step": 12000
221
+ },
222
+ {
223
+ "epoch": 1.14,
224
+ "learning_rate": 1.2923497267759564e-05,
225
+ "loss": 0.5155,
226
+ "step": 12500
227
+ },
228
+ {
229
+ "HasAns_exact": 75.79284750337382,
230
+ "HasAns_f1": 82.13405464912947,
231
+ "HasAns_total": 5928,
232
+ "NoAns_exact": 86.67788057190917,
233
+ "NoAns_f1": 86.67788057190917,
234
+ "NoAns_total": 5945,
235
+ "best_exact": 81.24315674218816,
236
+ "best_exact_thresh": 0.0,
237
+ "best_f1": 84.4092205811537,
238
+ "best_f1_thresh": 0.0,
239
+ "epoch": 1.14,
240
+ "exact": 81.24315674218816,
241
+ "f1": 84.40922058115372,
242
+ "step": 12500,
243
+ "total": 11873
244
+ },
245
+ {
246
+ "epoch": 1.18,
247
+ "learning_rate": 1.2240437158469946e-05,
248
+ "loss": 0.4964,
249
+ "step": 13000
250
+ },
251
+ {
252
+ "epoch": 1.23,
253
+ "learning_rate": 1.1557377049180328e-05,
254
+ "loss": 0.4947,
255
+ "step": 13500
256
+ },
257
+ {
258
+ "epoch": 1.28,
259
+ "learning_rate": 1.087431693989071e-05,
260
+ "loss": 0.5173,
261
+ "step": 14000
262
+ },
263
+ {
264
+ "epoch": 1.32,
265
+ "learning_rate": 1.0191256830601094e-05,
266
+ "loss": 0.4718,
267
+ "step": 14500
268
+ },
269
+ {
270
+ "epoch": 1.37,
271
+ "learning_rate": 9.508196721311476e-06,
272
+ "loss": 0.4768,
273
+ "step": 15000
274
+ },
275
+ {
276
+ "HasAns_exact": 77.83400809716599,
277
+ "HasAns_f1": 84.204371493218,
278
+ "HasAns_total": 5928,
279
+ "NoAns_exact": 85.23128679562657,
280
+ "NoAns_f1": 85.23128679562657,
281
+ "NoAns_total": 5945,
282
+ "best_exact": 81.53794323254444,
283
+ "best_exact_thresh": 0.0,
284
+ "best_f1": 84.71856432340547,
285
+ "best_f1_thresh": 0.0,
286
+ "epoch": 1.37,
287
+ "exact": 81.53794323254444,
288
+ "f1": 84.71856432340567,
289
+ "step": 15000,
290
+ "total": 11873
291
+ },
292
+ {
293
+ "epoch": 1.41,
294
+ "learning_rate": 8.825136612021857e-06,
295
+ "loss": 0.4937,
296
+ "step": 15500
297
+ },
298
+ {
299
+ "epoch": 1.46,
300
+ "learning_rate": 8.14207650273224e-06,
301
+ "loss": 0.4678,
302
+ "step": 16000
303
+ },
304
+ {
305
+ "epoch": 1.5,
306
+ "learning_rate": 7.459016393442623e-06,
307
+ "loss": 0.4922,
308
+ "step": 16500
309
+ },
310
+ {
311
+ "epoch": 1.55,
312
+ "learning_rate": 6.775956284153006e-06,
313
+ "loss": 0.4779,
314
+ "step": 17000
315
+ },
316
+ {
317
+ "epoch": 1.59,
318
+ "learning_rate": 6.092896174863388e-06,
319
+ "loss": 0.4787,
320
+ "step": 17500
321
+ },
322
+ {
323
+ "HasAns_exact": 79.30161943319838,
324
+ "HasAns_f1": 85.07291240565793,
325
+ "HasAns_total": 5928,
326
+ "NoAns_exact": 85.5004205214466,
327
+ "NoAns_f1": 85.5004205214466,
328
+ "NoAns_total": 5945,
329
+ "best_exact": 82.40545776130716,
330
+ "best_exact_thresh": 0.0,
331
+ "best_f1": 85.28697252090775,
332
+ "best_f1_thresh": 0.0,
333
+ "epoch": 1.59,
334
+ "exact": 82.40545776130716,
335
+ "f1": 85.28697252090787,
336
+ "step": 17500,
337
+ "total": 11873
338
+ },
339
+ {
340
+ "epoch": 1.64,
341
+ "learning_rate": 5.409836065573771e-06,
342
+ "loss": 0.4554,
343
+ "step": 18000
344
+ },
345
+ {
346
+ "epoch": 1.68,
347
+ "learning_rate": 4.726775956284153e-06,
348
+ "loss": 0.4661,
349
+ "step": 18500
350
+ },
351
+ {
352
+ "epoch": 1.73,
353
+ "learning_rate": 4.043715846994535e-06,
354
+ "loss": 0.449,
355
+ "step": 19000
356
+ },
357
+ {
358
+ "epoch": 1.78,
359
+ "learning_rate": 3.3606557377049183e-06,
360
+ "loss": 0.4426,
361
+ "step": 19500
362
+ },
363
+ {
364
+ "epoch": 1.82,
365
+ "learning_rate": 2.6775956284153005e-06,
366
+ "loss": 0.4732,
367
+ "step": 20000
368
+ },
369
+ {
370
+ "HasAns_exact": 81.29217273954116,
371
+ "HasAns_f1": 87.36118517518237,
372
+ "HasAns_total": 5928,
373
+ "NoAns_exact": 82.64087468460892,
374
+ "NoAns_f1": 82.64087468460892,
375
+ "NoAns_total": 5945,
376
+ "best_exact": 81.96748926134929,
377
+ "best_exact_thresh": 0.0,
378
+ "best_f1": 84.99765061218557,
379
+ "best_f1_thresh": 0.0,
380
+ "epoch": 1.82,
381
+ "exact": 81.96748926134929,
382
+ "f1": 84.99765061218568,
383
+ "step": 20000,
384
+ "total": 11873
385
+ },
386
+ {
387
+ "epoch": 1.87,
388
+ "learning_rate": 1.994535519125683e-06,
389
+ "loss": 0.4269,
390
+ "step": 20500
391
+ },
392
+ {
393
+ "epoch": 1.91,
394
+ "learning_rate": 1.3114754098360657e-06,
395
+ "loss": 0.4461,
396
+ "step": 21000
397
+ },
398
+ {
399
+ "epoch": 1.96,
400
+ "learning_rate": 6.284153005464482e-07,
401
+ "loss": 0.434,
402
+ "step": 21500
403
+ },
404
+ {
405
+ "epoch": 2.0,
406
+ "step": 21960,
407
+ "total_flos": 2.0283616700399e+17,
408
+ "train_runtime": 13072.0089,
409
+ "train_samples_per_second": 1.68
410
+ }
411
+ ],
412
+ "max_steps": 21960,
413
+ "num_train_epochs": 2,
414
+ "total_flos": 2.0283616700399e+17,
415
+ "trial_name": null,
416
+ "trial_params": null
417
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0dcc35c32983a4419cf8338e2803e51d726a45e398611409ecc0aa1a3bbfcf2
3
+ size 2415
vocab.txt ADDED
The diff for this file is too large to render. See raw diff