zuazo commited on
Commit
4e64cff
1 Parent(s): 9e2de35

End of training

Browse files
README.md CHANGED
@@ -1,21 +1,24 @@
1
  ---
 
 
2
  license: apache-2.0
3
  base_model: openai/whisper-large
4
  tags:
 
5
  - generated_from_trainer
6
  datasets:
7
- - common_voice_13_0
8
  metrics:
9
  - wer
10
  model-index:
11
- - name: openai/whisper-large
12
  results:
13
  - task:
14
  name: Automatic Speech Recognition
15
  type: automatic-speech-recognition
16
  dataset:
17
- name: common_voice_13_0
18
- type: common_voice_13_0
19
  config: eu
20
  split: test
21
  args: eu
@@ -28,9 +31,9 @@ model-index:
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
  should probably proofread and complete it, then remove this comment. -->
30
 
31
- # openai/whisper-large
32
 
33
- This model is a fine-tuned version of [openai/whisper-large](https://huggingface.co/openai/whisper-large) on the common_voice_13_0 dataset.
34
  It achieves the following results on the evaluation set:
35
  - Loss: 0.4369
36
  - Wer: 12.2342
 
1
  ---
2
+ language:
3
+ - eu
4
  license: apache-2.0
5
  base_model: openai/whisper-large
6
  tags:
7
+ - whisper-event
8
  - generated_from_trainer
9
  datasets:
10
+ - mozilla-foundation/common_voice_13_0
11
  metrics:
12
  - wer
13
  model-index:
14
+ - name: Whisper Large Basque
15
  results:
16
  - task:
17
  name: Automatic Speech Recognition
18
  type: automatic-speech-recognition
19
  dataset:
20
+ name: mozilla-foundation/common_voice_13_0 eu
21
+ type: mozilla-foundation/common_voice_13_0
22
  config: eu
23
  split: test
24
  args: eu
 
31
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
32
  should probably proofread and complete it, then remove this comment. -->
33
 
34
+ # Whisper Large Basque
35
 
36
+ This model is a fine-tuned version of [openai/whisper-large](https://huggingface.co/openai/whisper-large) on the mozilla-foundation/common_voice_13_0 eu dataset.
37
  It achieves the following results on the evaluation set:
38
  - Loss: 0.4369
39
  - Wer: 12.2342
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 97.0,
3
+ "eval_loss": 0.4369344115257263,
4
+ "eval_runtime": 2347.3427,
5
+ "eval_samples_per_second": 2.808,
6
+ "eval_steps_per_second": 0.176,
7
+ "eval_wer": 12.234193365466401,
8
+ "train_loss": 0.011489377881600376,
9
+ "train_runtime": 415456.4714,
10
+ "train_samples_per_second": 3.081,
11
+ "train_steps_per_second": 0.048
12
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 97.0,
3
+ "eval_loss": 0.4369344115257263,
4
+ "eval_runtime": 2347.3427,
5
+ "eval_samples_per_second": 2.808,
6
+ "eval_steps_per_second": 0.176,
7
+ "eval_wer": 12.234193365466401
8
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 97.0,
3
+ "train_loss": 0.011489377881600376,
4
+ "train_runtime": 415456.4714,
5
+ "train_samples_per_second": 3.081,
6
+ "train_steps_per_second": 0.048
7
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff