csikasote commited on
Commit
7110be8
1 Parent(s): e2b8a4b

End of training

Browse files
Files changed (5) hide show
  1. README.md +5 -3
  2. all_results.json +15 -0
  3. eval_results.json +9 -0
  4. train_results.json +9 -0
  5. trainer_state.json +259 -0
README.md CHANGED
@@ -3,6 +3,8 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: facebook/wav2vec2-xls-r-1b
5
  tags:
 
 
6
  - generated_from_trainer
7
  metrics:
8
  - wer
@@ -16,10 +18,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # xls-r-1b-bem-natbed-native-model
18
 
19
- This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.7276
22
- - Wer: 0.6730
23
 
24
  ## Model description
25
 
 
3
  license: apache-2.0
4
  base_model: facebook/wav2vec2-xls-r-1b
5
  tags:
6
+ - automatic-speech-recognition
7
+ - natbed
8
  - generated_from_trainer
9
  metrics:
10
  - wer
 
18
 
19
  # xls-r-1b-bem-natbed-native-model
20
 
21
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on the NATBED - BEM dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 0.6841
24
+ - Wer: 0.7487
25
 
26
  ## Model description
27
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.303370786516854,
3
+ "eval_loss": 0.6841106414794922,
4
+ "eval_runtime": 49.0856,
5
+ "eval_samples": 650,
6
+ "eval_samples_per_second": 13.242,
7
+ "eval_steps_per_second": 1.671,
8
+ "eval_wer": 0.7486554887693768,
9
+ "total_flos": 1.4772414251163556e+19,
10
+ "train_loss": 1.0161105111929087,
11
+ "train_runtime": 3259.7695,
12
+ "train_samples": 2845,
13
+ "train_samples_per_second": 26.183,
14
+ "train_steps_per_second": 1.638
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.303370786516854,
3
+ "eval_loss": 0.6841106414794922,
4
+ "eval_runtime": 49.0856,
5
+ "eval_samples": 650,
6
+ "eval_samples_per_second": 13.242,
7
+ "eval_steps_per_second": 1.671,
8
+ "eval_wer": 0.7486554887693768
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.303370786516854,
3
+ "total_flos": 1.4772414251163556e+19,
4
+ "train_loss": 1.0161105111929087,
5
+ "train_runtime": 3259.7695,
6
+ "train_samples": 2845,
7
+ "train_samples_per_second": 26.183,
8
+ "train_steps_per_second": 1.638
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6841106414794922,
3
+ "best_model_checkpoint": "/scratch/skscla001/results/xls-r-1b-bem-natbed-native-model/checkpoint-1000",
4
+ "epoch": 7.303370786516854,
5
+ "eval_steps": 100,
6
+ "global_step": 1300,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.5617977528089888,
13
+ "grad_norm": 4.858776092529297,
14
+ "learning_rate": 0.00029099999999999997,
15
+ "loss": 4.5137,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.5617977528089888,
20
+ "eval_loss": 2.554856777191162,
21
+ "eval_runtime": 50.0091,
22
+ "eval_samples_per_second": 12.998,
23
+ "eval_steps_per_second": 1.64,
24
+ "eval_wer": 1.0,
25
+ "step": 100
26
+ },
27
+ {
28
+ "epoch": 1.1235955056179776,
29
+ "grad_norm": 3.9340174198150635,
30
+ "learning_rate": 0.00029444656488549615,
31
+ "loss": 1.3916,
32
+ "step": 200
33
+ },
34
+ {
35
+ "epoch": 1.1235955056179776,
36
+ "eval_loss": 1.088287115097046,
37
+ "eval_runtime": 49.3058,
38
+ "eval_samples_per_second": 13.183,
39
+ "eval_steps_per_second": 1.663,
40
+ "eval_wer": 0.9840240430243594,
41
+ "step": 200
42
+ },
43
+ {
44
+ "epoch": 1.6853932584269664,
45
+ "grad_norm": 1.637488603591919,
46
+ "learning_rate": 0.00028872137404580147,
47
+ "loss": 0.9962,
48
+ "step": 300
49
+ },
50
+ {
51
+ "epoch": 1.6853932584269664,
52
+ "eval_loss": 0.8152701258659363,
53
+ "eval_runtime": 50.2981,
54
+ "eval_samples_per_second": 12.923,
55
+ "eval_steps_per_second": 1.63,
56
+ "eval_wer": 0.8190446061372983,
57
+ "step": 300
58
+ },
59
+ {
60
+ "epoch": 2.247191011235955,
61
+ "grad_norm": 1.2791478633880615,
62
+ "learning_rate": 0.00028299618320610685,
63
+ "loss": 0.8625,
64
+ "step": 400
65
+ },
66
+ {
67
+ "epoch": 2.247191011235955,
68
+ "eval_loss": 0.8690391182899475,
69
+ "eval_runtime": 49.7576,
70
+ "eval_samples_per_second": 13.063,
71
+ "eval_steps_per_second": 1.648,
72
+ "eval_wer": 0.8418222081619741,
73
+ "step": 400
74
+ },
75
+ {
76
+ "epoch": 2.808988764044944,
77
+ "grad_norm": 1.049700379371643,
78
+ "learning_rate": 0.00027727099236641217,
79
+ "loss": 0.8168,
80
+ "step": 500
81
+ },
82
+ {
83
+ "epoch": 2.808988764044944,
84
+ "eval_loss": 0.7395117282867432,
85
+ "eval_runtime": 49.4925,
86
+ "eval_samples_per_second": 13.133,
87
+ "eval_steps_per_second": 1.657,
88
+ "eval_wer": 0.7390066434672572,
89
+ "step": 500
90
+ },
91
+ {
92
+ "epoch": 3.370786516853933,
93
+ "grad_norm": 2.349531650543213,
94
+ "learning_rate": 0.00027154580152671755,
95
+ "loss": 0.7197,
96
+ "step": 600
97
+ },
98
+ {
99
+ "epoch": 3.370786516853933,
100
+ "eval_loss": 0.7596462965011597,
101
+ "eval_runtime": 49.3475,
102
+ "eval_samples_per_second": 13.172,
103
+ "eval_steps_per_second": 1.662,
104
+ "eval_wer": 0.7366339765896868,
105
+ "step": 600
106
+ },
107
+ {
108
+ "epoch": 3.932584269662921,
109
+ "grad_norm": 0.8924151062965393,
110
+ "learning_rate": 0.00026582061068702287,
111
+ "loss": 0.6848,
112
+ "step": 700
113
+ },
114
+ {
115
+ "epoch": 3.932584269662921,
116
+ "eval_loss": 0.7033310532569885,
117
+ "eval_runtime": 50.4297,
118
+ "eval_samples_per_second": 12.889,
119
+ "eval_steps_per_second": 1.626,
120
+ "eval_wer": 0.7228725086997786,
121
+ "step": 700
122
+ },
123
+ {
124
+ "epoch": 4.49438202247191,
125
+ "grad_norm": 1.047180414199829,
126
+ "learning_rate": 0.0002600954198473282,
127
+ "loss": 0.6134,
128
+ "step": 800
129
+ },
130
+ {
131
+ "epoch": 4.49438202247191,
132
+ "eval_loss": 0.830004870891571,
133
+ "eval_runtime": 50.1876,
134
+ "eval_samples_per_second": 12.951,
135
+ "eval_steps_per_second": 1.634,
136
+ "eval_wer": 0.7662132236633976,
137
+ "step": 800
138
+ },
139
+ {
140
+ "epoch": 5.056179775280899,
141
+ "grad_norm": 0.8339403867721558,
142
+ "learning_rate": 0.00025437022900763357,
143
+ "loss": 0.6303,
144
+ "step": 900
145
+ },
146
+ {
147
+ "epoch": 5.056179775280899,
148
+ "eval_loss": 0.73649001121521,
149
+ "eval_runtime": 49.3956,
150
+ "eval_samples_per_second": 13.159,
151
+ "eval_steps_per_second": 1.66,
152
+ "eval_wer": 0.7896235368554255,
153
+ "step": 900
154
+ },
155
+ {
156
+ "epoch": 5.617977528089888,
157
+ "grad_norm": 0.746986448764801,
158
+ "learning_rate": 0.0002486450381679389,
159
+ "loss": 0.5467,
160
+ "step": 1000
161
+ },
162
+ {
163
+ "epoch": 5.617977528089888,
164
+ "eval_loss": 0.6841106414794922,
165
+ "eval_runtime": 49.7948,
166
+ "eval_samples_per_second": 13.054,
167
+ "eval_steps_per_second": 1.647,
168
+ "eval_wer": 0.7486554887693768,
169
+ "step": 1000
170
+ },
171
+ {
172
+ "epoch": 6.179775280898877,
173
+ "grad_norm": 0.8229517936706543,
174
+ "learning_rate": 0.00024291984732824427,
175
+ "loss": 0.5194,
176
+ "step": 1100
177
+ },
178
+ {
179
+ "epoch": 6.179775280898877,
180
+ "eval_loss": 0.7867633700370789,
181
+ "eval_runtime": 49.8846,
182
+ "eval_samples_per_second": 13.03,
183
+ "eval_steps_per_second": 1.644,
184
+ "eval_wer": 0.694875039544448,
185
+ "step": 1100
186
+ },
187
+ {
188
+ "epoch": 6.741573033707866,
189
+ "grad_norm": 0.969409704208374,
190
+ "learning_rate": 0.0002371946564885496,
191
+ "loss": 0.4617,
192
+ "step": 1200
193
+ },
194
+ {
195
+ "epoch": 6.741573033707866,
196
+ "eval_loss": 0.7563472986221313,
197
+ "eval_runtime": 49.4271,
198
+ "eval_samples_per_second": 13.151,
199
+ "eval_steps_per_second": 1.659,
200
+ "eval_wer": 0.7277760202467574,
201
+ "step": 1200
202
+ },
203
+ {
204
+ "epoch": 7.303370786516854,
205
+ "grad_norm": 0.5057498812675476,
206
+ "learning_rate": 0.00023146946564885494,
207
+ "loss": 0.4525,
208
+ "step": 1300
209
+ },
210
+ {
211
+ "epoch": 7.303370786516854,
212
+ "eval_loss": 0.7276196479797363,
213
+ "eval_runtime": 49.3168,
214
+ "eval_samples_per_second": 13.18,
215
+ "eval_steps_per_second": 1.663,
216
+ "eval_wer": 0.6730465042708004,
217
+ "step": 1300
218
+ },
219
+ {
220
+ "epoch": 7.303370786516854,
221
+ "step": 1300,
222
+ "total_flos": 1.4772414251163556e+19,
223
+ "train_loss": 1.0161105111929087,
224
+ "train_runtime": 3259.7695,
225
+ "train_samples_per_second": 26.183,
226
+ "train_steps_per_second": 1.638
227
+ }
228
+ ],
229
+ "logging_steps": 100,
230
+ "max_steps": 5340,
231
+ "num_input_tokens_seen": 0,
232
+ "num_train_epochs": 30,
233
+ "save_steps": 200,
234
+ "stateful_callbacks": {
235
+ "EarlyStoppingCallback": {
236
+ "args": {
237
+ "early_stopping_patience": 3,
238
+ "early_stopping_threshold": 0.0
239
+ },
240
+ "attributes": {
241
+ "early_stopping_patience_counter": 2
242
+ }
243
+ },
244
+ "TrainerControl": {
245
+ "args": {
246
+ "should_epoch_stop": false,
247
+ "should_evaluate": false,
248
+ "should_log": false,
249
+ "should_save": true,
250
+ "should_training_stop": false
251
+ },
252
+ "attributes": {}
253
+ }
254
+ },
255
+ "total_flos": 1.4772414251163556e+19,
256
+ "train_batch_size": 8,
257
+ "trial_name": null,
258
+ "trial_params": null
259
+ }