FarhadMadadzade commited on
Commit
76cf0d6
1 Parent(s): dd169d3

End of training

Browse files
Files changed (2) hide show
  1. README.md +92 -0
  2. generation_config.json +256 -0
README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ base_model: openai/small
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - QEC
9
+ metrics:
10
+ - wer
11
+ model-index:
12
+ - name: whisper-small-quartr
13
+ results:
14
+ - task:
15
+ name: Automatic Speech Recognition
16
+ type: automatic-speech-recognition
17
+ dataset:
18
+ name: Quartr Earnings Calls
19
+ type: QEC
20
+ args: 'config: en, split: test'
21
+ metrics:
22
+ - name: Wer
23
+ type: wer
24
+ value: 32.834424695977546
25
+ ---
26
+
27
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
28
+ should probably proofread and complete it, then remove this comment. -->
29
+
30
+ # whisper-small-quartr
31
+
32
+ This model is a fine-tuned version of [openai/small](https://huggingface.co/openai/small) on the Quartr Earnings Calls dataset.
33
+ It achieves the following results on the evaluation set:
34
+ - Loss: 0.6429
35
+ - Wer: 32.8344
36
+
37
+ ## Model description
38
+
39
+ More information needed
40
+
41
+ ## Intended uses & limitations
42
+
43
+ More information needed
44
+
45
+ ## Training and evaluation data
46
+
47
+ More information needed
48
+
49
+ ## Training procedure
50
+
51
+ ### Training hyperparameters
52
+
53
+ The following hyperparameters were used during training:
54
+ - learning_rate: 8.120528078446462e-06
55
+ - train_batch_size: 8
56
+ - eval_batch_size: 8
57
+ - seed: 42
58
+ - gradient_accumulation_steps: 2
59
+ - total_train_batch_size: 16
60
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
61
+ - lr_scheduler_type: cosine_with_restarts
62
+ - lr_scheduler_warmup_steps: 84
63
+ - training_steps: 1500
64
+ - mixed_precision_training: Native AMP
65
+
66
+ ### Training results
67
+
68
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
69
+ |:-------------:|:-----:|:----:|:---------------:|:-------:|
70
+ | 0.6771 | 0.32 | 100 | 0.6577 | 25.7437 |
71
+ | 0.6533 | 0.64 | 200 | 0.6025 | 34.6804 |
72
+ | 0.5793 | 0.96 | 300 | 0.5784 | 24.3530 |
73
+ | 0.3872 | 1.28 | 400 | 0.5856 | 32.9592 |
74
+ | 0.4447 | 1.61 | 500 | 0.5646 | 23.2429 |
75
+ | 0.414 | 1.93 | 600 | 0.5616 | 70.7016 |
76
+ | 0.2489 | 2.25 | 700 | 0.5816 | 35.2666 |
77
+ | 0.2863 | 2.57 | 800 | 0.5853 | 24.7583 |
78
+ | 0.2698 | 2.89 | 900 | 0.5844 | 32.7409 |
79
+ | 0.1646 | 3.21 | 1000 | 0.6182 | 27.7892 |
80
+ | 0.174 | 3.53 | 1100 | 0.6228 | 36.2021 |
81
+ | 0.2021 | 3.85 | 1200 | 0.6269 | 35.9775 |
82
+ | 0.2239 | 4.17 | 1300 | 0.6367 | 35.2666 |
83
+ | 0.1551 | 4.49 | 1400 | 0.6420 | 32.4478 |
84
+ | 0.1351 | 4.82 | 1500 | 0.6429 | 32.8344 |
85
+
86
+
87
+ ### Framework versions
88
+
89
+ - Transformers 4.40.0.dev0
90
+ - Pytorch 2.2.1+cu121
91
+ - Datasets 2.18.1.dev0
92
+ - Tokenizers 0.15.2
generation_config.json ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alignment_heads": [
3
+ [
4
+ 5,
5
+ 3
6
+ ],
7
+ [
8
+ 5,
9
+ 9
10
+ ],
11
+ [
12
+ 8,
13
+ 0
14
+ ],
15
+ [
16
+ 8,
17
+ 4
18
+ ],
19
+ [
20
+ 8,
21
+ 7
22
+ ],
23
+ [
24
+ 8,
25
+ 8
26
+ ],
27
+ [
28
+ 9,
29
+ 0
30
+ ],
31
+ [
32
+ 9,
33
+ 7
34
+ ],
35
+ [
36
+ 9,
37
+ 9
38
+ ],
39
+ [
40
+ 10,
41
+ 5
42
+ ]
43
+ ],
44
+ "begin_suppress_tokens": [
45
+ 220,
46
+ 50257
47
+ ],
48
+ "bos_token_id": 50257,
49
+ "decoder_start_token_id": 50258,
50
+ "eos_token_id": 50257,
51
+ "is_multilingual": true,
52
+ "lang_to_id": {
53
+ "<|af|>": 50327,
54
+ "<|am|>": 50334,
55
+ "<|ar|>": 50272,
56
+ "<|as|>": 50350,
57
+ "<|az|>": 50304,
58
+ "<|ba|>": 50355,
59
+ "<|be|>": 50330,
60
+ "<|bg|>": 50292,
61
+ "<|bn|>": 50302,
62
+ "<|bo|>": 50347,
63
+ "<|br|>": 50309,
64
+ "<|bs|>": 50315,
65
+ "<|ca|>": 50270,
66
+ "<|cs|>": 50283,
67
+ "<|cy|>": 50297,
68
+ "<|da|>": 50285,
69
+ "<|de|>": 50261,
70
+ "<|el|>": 50281,
71
+ "<|en|>": 50259,
72
+ "<|es|>": 50262,
73
+ "<|et|>": 50307,
74
+ "<|eu|>": 50310,
75
+ "<|fa|>": 50300,
76
+ "<|fi|>": 50277,
77
+ "<|fo|>": 50338,
78
+ "<|fr|>": 50265,
79
+ "<|gl|>": 50319,
80
+ "<|gu|>": 50333,
81
+ "<|haw|>": 50352,
82
+ "<|ha|>": 50354,
83
+ "<|he|>": 50279,
84
+ "<|hi|>": 50276,
85
+ "<|hr|>": 50291,
86
+ "<|ht|>": 50339,
87
+ "<|hu|>": 50286,
88
+ "<|hy|>": 50312,
89
+ "<|id|>": 50275,
90
+ "<|is|>": 50311,
91
+ "<|it|>": 50274,
92
+ "<|ja|>": 50266,
93
+ "<|jw|>": 50356,
94
+ "<|ka|>": 50329,
95
+ "<|kk|>": 50316,
96
+ "<|km|>": 50323,
97
+ "<|kn|>": 50306,
98
+ "<|ko|>": 50264,
99
+ "<|la|>": 50294,
100
+ "<|lb|>": 50345,
101
+ "<|ln|>": 50353,
102
+ "<|lo|>": 50336,
103
+ "<|lt|>": 50293,
104
+ "<|lv|>": 50301,
105
+ "<|mg|>": 50349,
106
+ "<|mi|>": 50295,
107
+ "<|mk|>": 50308,
108
+ "<|ml|>": 50296,
109
+ "<|mn|>": 50314,
110
+ "<|mr|>": 50320,
111
+ "<|ms|>": 50282,
112
+ "<|mt|>": 50343,
113
+ "<|my|>": 50346,
114
+ "<|ne|>": 50313,
115
+ "<|nl|>": 50271,
116
+ "<|nn|>": 50342,
117
+ "<|no|>": 50288,
118
+ "<|oc|>": 50328,
119
+ "<|pa|>": 50321,
120
+ "<|pl|>": 50269,
121
+ "<|ps|>": 50340,
122
+ "<|pt|>": 50267,
123
+ "<|ro|>": 50284,
124
+ "<|ru|>": 50263,
125
+ "<|sa|>": 50344,
126
+ "<|sd|>": 50332,
127
+ "<|si|>": 50322,
128
+ "<|sk|>": 50298,
129
+ "<|sl|>": 50305,
130
+ "<|sn|>": 50324,
131
+ "<|so|>": 50326,
132
+ "<|sq|>": 50317,
133
+ "<|sr|>": 50303,
134
+ "<|su|>": 50357,
135
+ "<|sv|>": 50273,
136
+ "<|sw|>": 50318,
137
+ "<|ta|>": 50287,
138
+ "<|te|>": 50299,
139
+ "<|tg|>": 50331,
140
+ "<|th|>": 50289,
141
+ "<|tk|>": 50341,
142
+ "<|tl|>": 50348,
143
+ "<|tr|>": 50268,
144
+ "<|tt|>": 50351,
145
+ "<|uk|>": 50280,
146
+ "<|ur|>": 50290,
147
+ "<|uz|>": 50337,
148
+ "<|vi|>": 50278,
149
+ "<|yi|>": 50335,
150
+ "<|yo|>": 50325,
151
+ "<|zh|>": 50260
152
+ },
153
+ "language": "english",
154
+ "max_initial_timestamp_index": 50,
155
+ "max_length": 448,
156
+ "no_timestamps_token_id": 50363,
157
+ "pad_token_id": 50257,
158
+ "prev_sot_token_id": 50361,
159
+ "return_timestamps": false,
160
+ "suppress_tokens": [
161
+ 1,
162
+ 2,
163
+ 7,
164
+ 8,
165
+ 9,
166
+ 10,
167
+ 14,
168
+ 25,
169
+ 26,
170
+ 27,
171
+ 28,
172
+ 29,
173
+ 31,
174
+ 58,
175
+ 59,
176
+ 60,
177
+ 61,
178
+ 62,
179
+ 63,
180
+ 90,
181
+ 91,
182
+ 92,
183
+ 93,
184
+ 359,
185
+ 503,
186
+ 522,
187
+ 542,
188
+ 873,
189
+ 893,
190
+ 902,
191
+ 918,
192
+ 922,
193
+ 931,
194
+ 1350,
195
+ 1853,
196
+ 1982,
197
+ 2460,
198
+ 2627,
199
+ 3246,
200
+ 3253,
201
+ 3268,
202
+ 3536,
203
+ 3846,
204
+ 3961,
205
+ 4183,
206
+ 4667,
207
+ 6585,
208
+ 6647,
209
+ 7273,
210
+ 9061,
211
+ 9383,
212
+ 10428,
213
+ 10929,
214
+ 11938,
215
+ 12033,
216
+ 12331,
217
+ 12562,
218
+ 13793,
219
+ 14157,
220
+ 14635,
221
+ 15265,
222
+ 15618,
223
+ 16553,
224
+ 16604,
225
+ 18362,
226
+ 18956,
227
+ 20075,
228
+ 21675,
229
+ 22520,
230
+ 26130,
231
+ 26161,
232
+ 26435,
233
+ 28279,
234
+ 29464,
235
+ 31650,
236
+ 32302,
237
+ 32470,
238
+ 36865,
239
+ 42863,
240
+ 47425,
241
+ 49870,
242
+ 50254,
243
+ 50258,
244
+ 50358,
245
+ 50359,
246
+ 50360,
247
+ 50361,
248
+ 50362
249
+ ],
250
+ "task": "transcribe",
251
+ "task_to_id": {
252
+ "transcribe": 50359,
253
+ "translate": 50358
254
+ },
255
+ "transformers_version": "4.40.0.dev0"
256
+ }