csikasote commited on
Commit
9f4f5cb
1 Parent(s): b9959d9

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,8 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: facebook/wav2vec2-xls-r-1b
5
  tags:
 
 
6
  - generated_from_trainer
7
  metrics:
8
  - wer
@@ -16,10 +18,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # xls-r-1b-bem-natbed-combined-model
18
 
19
- This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.7916
22
- - Wer: 0.7758
23
 
24
  ## Model description
25
 
 
3
  license: apache-2.0
4
  base_model: facebook/wav2vec2-xls-r-1b
5
  tags:
6
+ - automatic-speech-recognition
7
+ - natbed
8
  - generated_from_trainer
9
  metrics:
10
  - wer
 
18
 
19
  # xls-r-1b-bem-natbed-combined-model
20
 
21
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on the NATBED - BEM dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 0.7801
24
+ - Wer: 0.7879
25
 
26
  ## Model description
27
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.3794614902943017,
3
+ "eval_loss": 0.780085027217865,
4
+ "eval_runtime": 90.2213,
5
+ "eval_samples": 1358,
6
+ "eval_samples_per_second": 15.052,
7
+ "eval_steps_per_second": 1.884,
8
+ "eval_wer": 0.7878639188747137,
9
+ "total_flos": 9.705775703537089e+18,
10
+ "train_loss": 1.14412841796875,
11
+ "train_runtime": 3792.6045,
12
+ "train_samples": 6387,
13
+ "train_samples_per_second": 50.522,
14
+ "train_steps_per_second": 6.312
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.3794614902943017,
3
+ "eval_loss": 0.780085027217865,
4
+ "eval_runtime": 90.2213,
5
+ "eval_samples": 1358,
6
+ "eval_samples_per_second": 15.052,
7
+ "eval_steps_per_second": 1.884,
8
+ "eval_wer": 0.7878639188747137
9
+ }
runs/Sep29_08-03-03_08560ebc7a23/events.out.tfevents.1727600998.08560ebc7a23.5836.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f75875bcc0eed12510ed705b33469727371d453f994fbb6360e274f520a56420
3
+ size 406
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.3794614902943017,
3
+ "total_flos": 9.705775703537089e+18,
4
+ "train_loss": 1.14412841796875,
5
+ "train_runtime": 3792.6045,
6
+ "train_samples": 6387,
7
+ "train_samples_per_second": 50.522,
8
+ "train_steps_per_second": 6.312
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.780085027217865,
3
+ "best_model_checkpoint": "./xls-r-1b-bem-natbed-combined-model/checkpoint-1600",
4
+ "epoch": 2.3794614902943017,
5
+ "eval_steps": 100,
6
+ "global_step": 1900,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.12523481527864747,
13
+ "eval_loss": 1.372534990310669,
14
+ "eval_runtime": 91.0174,
15
+ "eval_samples_per_second": 14.92,
16
+ "eval_steps_per_second": 1.868,
17
+ "eval_wer": 0.9804546941445862,
18
+ "step": 100
19
+ },
20
+ {
21
+ "epoch": 0.25046963055729493,
22
+ "eval_loss": 0.9491279721260071,
23
+ "eval_runtime": 89.1948,
24
+ "eval_samples_per_second": 15.225,
25
+ "eval_steps_per_second": 1.906,
26
+ "eval_wer": 0.8541053320248609,
27
+ "step": 200
28
+ },
29
+ {
30
+ "epoch": 0.37570444583594237,
31
+ "eval_loss": 0.9524340033531189,
32
+ "eval_runtime": 89.2642,
33
+ "eval_samples_per_second": 15.213,
34
+ "eval_steps_per_second": 1.904,
35
+ "eval_wer": 0.8137062479555119,
36
+ "step": 300
37
+ },
38
+ {
39
+ "epoch": 0.5009392611145899,
40
+ "eval_loss": 1.0354979038238525,
41
+ "eval_runtime": 88.9821,
42
+ "eval_samples_per_second": 15.262,
43
+ "eval_steps_per_second": 1.91,
44
+ "eval_wer": 0.9016192345436702,
45
+ "step": 400
46
+ },
47
+ {
48
+ "epoch": 0.6261740763932373,
49
+ "grad_norm": 3.467607259750366,
50
+ "learning_rate": 0.00029759999999999997,
51
+ "loss": 1.7945,
52
+ "step": 500
53
+ },
54
+ {
55
+ "epoch": 0.6261740763932373,
56
+ "eval_loss": 0.9610921144485474,
57
+ "eval_runtime": 89.3241,
58
+ "eval_samples_per_second": 15.203,
59
+ "eval_steps_per_second": 1.903,
60
+ "eval_wer": 0.8788027477919529,
61
+ "step": 500
62
+ },
63
+ {
64
+ "epoch": 0.7514088916718847,
65
+ "eval_loss": 0.9790019392967224,
66
+ "eval_runtime": 88.7534,
67
+ "eval_samples_per_second": 15.301,
68
+ "eval_steps_per_second": 1.915,
69
+ "eval_wer": 0.864245992803402,
70
+ "step": 600
71
+ },
72
+ {
73
+ "epoch": 0.8766437069505323,
74
+ "eval_loss": 0.9877487421035767,
75
+ "eval_runtime": 88.7715,
76
+ "eval_samples_per_second": 15.298,
77
+ "eval_steps_per_second": 1.915,
78
+ "eval_wer": 0.8602387962054302,
79
+ "step": 700
80
+ },
81
+ {
82
+ "epoch": 1.0018785222291797,
83
+ "eval_loss": 0.9603913426399231,
84
+ "eval_runtime": 88.9263,
85
+ "eval_samples_per_second": 15.271,
86
+ "eval_steps_per_second": 1.912,
87
+ "eval_wer": 0.8925417075564279,
88
+ "step": 800
89
+ },
90
+ {
91
+ "epoch": 1.127113337507827,
92
+ "eval_loss": 0.8880479335784912,
93
+ "eval_runtime": 90.5701,
94
+ "eval_samples_per_second": 14.994,
95
+ "eval_steps_per_second": 1.877,
96
+ "eval_wer": 0.8327608766764802,
97
+ "step": 900
98
+ },
99
+ {
100
+ "epoch": 1.2523481527864746,
101
+ "grad_norm": 1.2925059795379639,
102
+ "learning_rate": 0.00029365187713310576,
103
+ "loss": 0.9885,
104
+ "step": 1000
105
+ },
106
+ {
107
+ "epoch": 1.2523481527864746,
108
+ "eval_loss": 0.8917101621627808,
109
+ "eval_runtime": 89.0247,
110
+ "eval_samples_per_second": 15.254,
111
+ "eval_steps_per_second": 1.91,
112
+ "eval_wer": 0.8368498527968596,
113
+ "step": 1000
114
+ },
115
+ {
116
+ "epoch": 1.3775829680651221,
117
+ "eval_loss": 0.9034265875816345,
118
+ "eval_runtime": 89.348,
119
+ "eval_samples_per_second": 15.199,
120
+ "eval_steps_per_second": 1.903,
121
+ "eval_wer": 0.8306346090938829,
122
+ "step": 1100
123
+ },
124
+ {
125
+ "epoch": 1.5028177833437697,
126
+ "eval_loss": 0.8478356003761292,
127
+ "eval_runtime": 89.5799,
128
+ "eval_samples_per_second": 15.16,
129
+ "eval_steps_per_second": 1.898,
130
+ "eval_wer": 0.7937520444880601,
131
+ "step": 1200
132
+ },
133
+ {
134
+ "epoch": 1.628052598622417,
135
+ "eval_loss": 0.8666252493858337,
136
+ "eval_runtime": 89.9747,
137
+ "eval_samples_per_second": 15.093,
138
+ "eval_steps_per_second": 1.889,
139
+ "eval_wer": 0.8627739614000655,
140
+ "step": 1300
141
+ },
142
+ {
143
+ "epoch": 1.7532874139010644,
144
+ "eval_loss": 0.833095371723175,
145
+ "eval_runtime": 89.2147,
146
+ "eval_samples_per_second": 15.222,
147
+ "eval_steps_per_second": 1.906,
148
+ "eval_wer": 0.8218024206738632,
149
+ "step": 1400
150
+ },
151
+ {
152
+ "epoch": 1.878522229179712,
153
+ "grad_norm": 7.0910844802856445,
154
+ "learning_rate": 0.0002872525597269624,
155
+ "loss": 0.8854,
156
+ "step": 1500
157
+ },
158
+ {
159
+ "epoch": 1.878522229179712,
160
+ "eval_loss": 0.840486466884613,
161
+ "eval_runtime": 89.4023,
162
+ "eval_samples_per_second": 15.19,
163
+ "eval_steps_per_second": 1.902,
164
+ "eval_wer": 0.8045469414458619,
165
+ "step": 1500
166
+ },
167
+ {
168
+ "epoch": 2.0037570444583594,
169
+ "eval_loss": 0.780085027217865,
170
+ "eval_runtime": 91.0684,
171
+ "eval_samples_per_second": 14.912,
172
+ "eval_steps_per_second": 1.867,
173
+ "eval_wer": 0.7878639188747137,
174
+ "step": 1600
175
+ },
176
+ {
177
+ "epoch": 2.128991859737007,
178
+ "eval_loss": 0.8304778933525085,
179
+ "eval_runtime": 89.8902,
180
+ "eval_samples_per_second": 15.107,
181
+ "eval_steps_per_second": 1.891,
182
+ "eval_wer": 0.7917075564278705,
183
+ "step": 1700
184
+ },
185
+ {
186
+ "epoch": 2.254226675015654,
187
+ "eval_loss": 0.7971779108047485,
188
+ "eval_runtime": 89.8082,
189
+ "eval_samples_per_second": 15.121,
190
+ "eval_steps_per_second": 1.893,
191
+ "eval_wer": 0.7910533202486097,
192
+ "step": 1800
193
+ },
194
+ {
195
+ "epoch": 2.3794614902943017,
196
+ "eval_loss": 0.7915882468223572,
197
+ "eval_runtime": 90.3859,
198
+ "eval_samples_per_second": 15.024,
199
+ "eval_steps_per_second": 1.881,
200
+ "eval_wer": 0.7758423290807982,
201
+ "step": 1900
202
+ },
203
+ {
204
+ "epoch": 2.3794614902943017,
205
+ "step": 1900,
206
+ "total_flos": 9.705775703537089e+18,
207
+ "train_loss": 1.14412841796875,
208
+ "train_runtime": 3792.6045,
209
+ "train_samples_per_second": 50.522,
210
+ "train_steps_per_second": 6.312
211
+ }
212
+ ],
213
+ "logging_steps": 500,
214
+ "max_steps": 23940,
215
+ "num_input_tokens_seen": 0,
216
+ "num_train_epochs": 30,
217
+ "save_steps": 400,
218
+ "stateful_callbacks": {
219
+ "EarlyStoppingCallback": {
220
+ "args": {
221
+ "early_stopping_patience": 3,
222
+ "early_stopping_threshold": 0.0
223
+ },
224
+ "attributes": {
225
+ "early_stopping_patience_counter": 0
226
+ }
227
+ },
228
+ "TrainerControl": {
229
+ "args": {
230
+ "should_epoch_stop": false,
231
+ "should_evaluate": false,
232
+ "should_log": false,
233
+ "should_save": true,
234
+ "should_training_stop": false
235
+ },
236
+ "attributes": {}
237
+ }
238
+ },
239
+ "total_flos": 9.705775703537089e+18,
240
+ "train_batch_size": 4,
241
+ "trial_name": null,
242
+ "trial_params": null
243
+ }