ylacombe HF staff commited on
Commit
53d1dcc
1 Parent(s): a2ffea0

End of training

Browse files
README.md CHANGED
@@ -1,6 +1,10 @@
1
  ---
 
 
2
  base_model: ylacombe/w2v-bert-2.0
3
  tags:
 
 
4
  - generated_from_trainer
5
  datasets:
6
  - common_voice_16_0
@@ -13,11 +17,11 @@ model-index:
13
  name: Automatic Speech Recognition
14
  type: automatic-speech-recognition
15
  dataset:
16
- name: common_voice_16_0
17
  type: common_voice_16_0
18
  config: ps
19
  split: test
20
- args: ps
21
  metrics:
22
  - name: Wer
23
  type: wer
@@ -29,9 +33,9 @@ should probably proofread and complete it, then remove this comment. -->
29
 
30
  # wav2vec2-common_voice-ps-demo
31
 
32
- This model is a fine-tuned version of [ylacombe/w2v-bert-2.0](https://huggingface.co/ylacombe/w2v-bert-2.0) on the common_voice_16_0 dataset.
33
  It achieves the following results on the evaluation set:
34
- - Loss: 3.0949
35
  - Wer: 0.9484
36
 
37
  ## Model description
 
1
  ---
2
+ language:
3
+ - ps
4
  base_model: ylacombe/w2v-bert-2.0
5
  tags:
6
+ - automatic-speech-recognition
7
+ - mozilla-foundation/common_voice_16_0
8
  - generated_from_trainer
9
  datasets:
10
  - common_voice_16_0
 
17
  name: Automatic Speech Recognition
18
  type: automatic-speech-recognition
19
  dataset:
20
+ name: MOZILLA-FOUNDATION/COMMON_VOICE_16_0 - PS
21
  type: common_voice_16_0
22
  config: ps
23
  split: test
24
+ args: 'Config: ps, Training split: train+validation, Eval split: test'
25
  metrics:
26
  - name: Wer
27
  type: wer
 
33
 
34
  # wav2vec2-common_voice-ps-demo
35
 
36
+ This model is a fine-tuned version of [ylacombe/w2v-bert-2.0](https://huggingface.co/ylacombe/w2v-bert-2.0) on the MOZILLA-FOUNDATION/COMMON_VOICE_16_0 - PS dataset.
37
  It achieves the following results on the evaluation set:
38
+ - Loss: 3.0510
39
  - Wer: 0.9484
40
 
41
  ## Model description
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 14.77,
3
+ "eval_loss": 3.051039218902588,
4
+ "eval_runtime": 4.949,
5
+ "eval_samples": 195,
6
+ "eval_samples_per_second": 39.402,
7
+ "eval_steps_per_second": 5.052,
8
+ "eval_wer": 0.9484029484029484,
9
+ "train_loss": 6.593796793619791,
10
+ "train_runtime": 936.6313,
11
+ "train_samples": 1027,
12
+ "train_samples_per_second": 16.447,
13
+ "train_steps_per_second": 0.512
14
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 14.77,
3
+ "eval_loss": 3.051039218902588,
4
+ "eval_runtime": 4.949,
5
+ "eval_samples": 195,
6
+ "eval_samples_per_second": 39.402,
7
+ "eval_steps_per_second": 5.052,
8
+ "eval_wer": 0.9484029484029484
9
+ }
runs/Jan01_22-09-15_vorace/events.out.tfevents.1704148023.vorace.125432.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5674dd5f310deb160584533a32537701dce7577b642d8dcd00939a7eaa9b0c4
3
+ size 358
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 14.77,
3
+ "train_loss": 6.593796793619791,
4
+ "train_runtime": 936.6313,
5
+ "train_samples": 1027,
6
+ "train_samples_per_second": 16.447,
7
+ "train_steps_per_second": 0.512
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 14.76923076923077,
5
+ "eval_steps": 100,
6
+ "global_step": 480,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 3.08,
13
+ "eval_loss": 10.369086265563965,
14
+ "eval_runtime": 5.0307,
15
+ "eval_samples_per_second": 38.762,
16
+ "eval_steps_per_second": 4.97,
17
+ "eval_wer": 1.0,
18
+ "step": 100
19
+ },
20
+ {
21
+ "epoch": 6.15,
22
+ "eval_loss": 3.5670173168182373,
23
+ "eval_runtime": 4.9472,
24
+ "eval_samples_per_second": 39.416,
25
+ "eval_steps_per_second": 5.053,
26
+ "eval_wer": 1.0,
27
+ "step": 200
28
+ },
29
+ {
30
+ "epoch": 9.23,
31
+ "eval_loss": 3.113863945007324,
32
+ "eval_runtime": 4.8913,
33
+ "eval_samples_per_second": 39.866,
34
+ "eval_steps_per_second": 5.111,
35
+ "eval_wer": 0.9484029484029484,
36
+ "step": 300
37
+ },
38
+ {
39
+ "epoch": 12.31,
40
+ "eval_loss": 3.094938039779663,
41
+ "eval_runtime": 4.8858,
42
+ "eval_samples_per_second": 39.912,
43
+ "eval_steps_per_second": 5.117,
44
+ "eval_wer": 0.9484029484029484,
45
+ "step": 400
46
+ },
47
+ {
48
+ "epoch": 14.77,
49
+ "step": 480,
50
+ "total_flos": 2.1645545063012813e+18,
51
+ "train_loss": 6.593796793619791,
52
+ "train_runtime": 936.6313,
53
+ "train_samples_per_second": 16.447,
54
+ "train_steps_per_second": 0.512
55
+ }
56
+ ],
57
+ "logging_steps": 500,
58
+ "max_steps": 480,
59
+ "num_input_tokens_seen": 0,
60
+ "num_train_epochs": 15,
61
+ "save_steps": 400,
62
+ "total_flos": 2.1645545063012813e+18,
63
+ "train_batch_size": 16,
64
+ "trial_name": null,
65
+ "trial_params": null
66
+ }