Radamés Ajna lmz commited on
Commit
d624cad
0 Parent(s):

Duplicate from lmz/candle-whisper

Browse files

Co-authored-by: Laurent Mazare <[email protected]>

.gitattributes ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model-tiny-q4k.gguf filter=lfs diff=lfs merge=lfs -text
37
+ model-tiny-q8k.gguf filter=lfs diff=lfs merge=lfs -text
38
+ model-tiny-q40.gguf filter=lfs diff=lfs merge=lfs -text
39
+ model-tiny-en-q40.gguf filter=lfs diff=lfs merge=lfs -text
40
+ model-tiny-en-q80.gguf filter=lfs diff=lfs merge=lfs -text
41
+ model-tiny-q80.gguf filter=lfs diff=lfs merge=lfs -text
config-tiny-en.json ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/whisper-tiny.en",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "architectures": [
6
+ "WhisperForConditionalGeneration"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "begin_suppress_tokens": [
10
+ 220,
11
+ 50256
12
+ ],
13
+ "bos_token_id": 50257,
14
+ "d_model": 384,
15
+ "decoder_attention_heads": 6,
16
+ "decoder_ffn_dim": 1536,
17
+ "decoder_layerdrop": 0.0,
18
+ "decoder_layers": 4,
19
+ "decoder_start_token_id": 50257,
20
+ "dropout": 0.0,
21
+ "encoder_attention_heads": 6,
22
+ "encoder_ffn_dim": 1536,
23
+ "encoder_layerdrop": 0.0,
24
+ "encoder_layers": 4,
25
+ "eos_token_id": 50256,
26
+ "forced_decoder_ids": [
27
+ [
28
+ 1,
29
+ 50362
30
+ ]
31
+ ],
32
+ "init_std": 0.02,
33
+ "is_encoder_decoder": true,
34
+ "max_length": 448,
35
+ "max_source_positions": 1500,
36
+ "max_target_positions": 448,
37
+ "model_type": "whisper",
38
+ "num_hidden_layers": 4,
39
+ "num_mel_bins": 80,
40
+ "pad_token_id": 50256,
41
+ "scale_embedding": false,
42
+ "suppress_tokens": [
43
+ 1,
44
+ 2,
45
+ 7,
46
+ 8,
47
+ 9,
48
+ 10,
49
+ 14,
50
+ 25,
51
+ 26,
52
+ 27,
53
+ 28,
54
+ 29,
55
+ 31,
56
+ 58,
57
+ 59,
58
+ 60,
59
+ 61,
60
+ 62,
61
+ 63,
62
+ 90,
63
+ 91,
64
+ 92,
65
+ 93,
66
+ 357,
67
+ 366,
68
+ 438,
69
+ 532,
70
+ 685,
71
+ 705,
72
+ 796,
73
+ 930,
74
+ 1058,
75
+ 1220,
76
+ 1267,
77
+ 1279,
78
+ 1303,
79
+ 1343,
80
+ 1377,
81
+ 1391,
82
+ 1635,
83
+ 1782,
84
+ 1875,
85
+ 2162,
86
+ 2361,
87
+ 2488,
88
+ 3467,
89
+ 4008,
90
+ 4211,
91
+ 4600,
92
+ 4808,
93
+ 5299,
94
+ 5855,
95
+ 6329,
96
+ 7203,
97
+ 9609,
98
+ 9959,
99
+ 10563,
100
+ 10786,
101
+ 11420,
102
+ 11709,
103
+ 11907,
104
+ 13163,
105
+ 13697,
106
+ 13700,
107
+ 14808,
108
+ 15306,
109
+ 16410,
110
+ 16791,
111
+ 17992,
112
+ 19203,
113
+ 19510,
114
+ 20724,
115
+ 22305,
116
+ 22935,
117
+ 27007,
118
+ 30109,
119
+ 30420,
120
+ 33409,
121
+ 34949,
122
+ 40283,
123
+ 40493,
124
+ 40549,
125
+ 47282,
126
+ 49146,
127
+ 50257,
128
+ 50357,
129
+ 50358,
130
+ 50359,
131
+ 50360,
132
+ 50361
133
+ ],
134
+ "torch_dtype": "float32",
135
+ "transformers_version": "4.27.0.dev0",
136
+ "use_cache": true,
137
+ "vocab_size": 51864
138
+ }
config-tiny.json ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/whisper-tiny",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "architectures": [
6
+ "WhisperForConditionalGeneration"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "begin_suppress_tokens": [
10
+ 220,
11
+ 50257
12
+ ],
13
+ "bos_token_id": 50257,
14
+ "d_model": 384,
15
+ "decoder_attention_heads": 6,
16
+ "decoder_ffn_dim": 1536,
17
+ "decoder_layerdrop": 0.0,
18
+ "decoder_layers": 4,
19
+ "decoder_start_token_id": 50258,
20
+ "dropout": 0.0,
21
+ "encoder_attention_heads": 6,
22
+ "encoder_ffn_dim": 1536,
23
+ "encoder_layerdrop": 0.0,
24
+ "encoder_layers": 4,
25
+ "eos_token_id": 50257,
26
+ "forced_decoder_ids": [
27
+ [
28
+ 1,
29
+ 50259
30
+ ],
31
+ [
32
+ 2,
33
+ 50359
34
+ ],
35
+ [
36
+ 3,
37
+ 50363
38
+ ]
39
+ ],
40
+ "init_std": 0.02,
41
+ "is_encoder_decoder": true,
42
+ "max_length": 448,
43
+ "max_source_positions": 1500,
44
+ "max_target_positions": 448,
45
+ "model_type": "whisper",
46
+ "num_hidden_layers": 4,
47
+ "num_mel_bins": 80,
48
+ "pad_token_id": 50257,
49
+ "scale_embedding": false,
50
+ "suppress_tokens": [
51
+ 1,
52
+ 2,
53
+ 7,
54
+ 8,
55
+ 9,
56
+ 10,
57
+ 14,
58
+ 25,
59
+ 26,
60
+ 27,
61
+ 28,
62
+ 29,
63
+ 31,
64
+ 58,
65
+ 59,
66
+ 60,
67
+ 61,
68
+ 62,
69
+ 63,
70
+ 90,
71
+ 91,
72
+ 92,
73
+ 93,
74
+ 359,
75
+ 503,
76
+ 522,
77
+ 542,
78
+ 873,
79
+ 893,
80
+ 902,
81
+ 918,
82
+ 922,
83
+ 931,
84
+ 1350,
85
+ 1853,
86
+ 1982,
87
+ 2460,
88
+ 2627,
89
+ 3246,
90
+ 3253,
91
+ 3268,
92
+ 3536,
93
+ 3846,
94
+ 3961,
95
+ 4183,
96
+ 4667,
97
+ 6585,
98
+ 6647,
99
+ 7273,
100
+ 9061,
101
+ 9383,
102
+ 10428,
103
+ 10929,
104
+ 11938,
105
+ 12033,
106
+ 12331,
107
+ 12562,
108
+ 13793,
109
+ 14157,
110
+ 14635,
111
+ 15265,
112
+ 15618,
113
+ 16553,
114
+ 16604,
115
+ 18362,
116
+ 18956,
117
+ 20075,
118
+ 21675,
119
+ 22520,
120
+ 26130,
121
+ 26161,
122
+ 26435,
123
+ 28279,
124
+ 29464,
125
+ 31650,
126
+ 32302,
127
+ 32470,
128
+ 36865,
129
+ 42863,
130
+ 47425,
131
+ 49870,
132
+ 50254,
133
+ 50258,
134
+ 50358,
135
+ 50359,
136
+ 50360,
137
+ 50361,
138
+ 50362
139
+ ],
140
+ "torch_dtype": "float32",
141
+ "transformers_version": "4.27.0.dev0",
142
+ "use_cache": true,
143
+ "vocab_size": 51865
144
+ }
model-tiny-en-q40.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71822f27635e77a62376b751363f32b81eb36cb02d3c5e13d7c7375e68aebfce
3
+ size 23251776
model-tiny-en-q80.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07997efe0b8d3577fc45d9b307ef578a01346f4088a5da4bd92377aaa3f72986
3
+ size 41841216
model-tiny-q40.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:330cbde1517a775d09df5c40a26c0b8caf531d9ceee3c17194f7a5707c43cda9
3
+ size 23252000
model-tiny-q4k.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b283f56d775d9253fbece81414f3b4000940dd2fea82c852ab101a9df4e96033
3
+ size 134835552
model-tiny-q80.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edcc907db61aef092f1244dc1e53c55056b472be343e9bbb4dc12ebd4740392f
3
+ size 41841632
model-tiny-q8k.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1835b240c61ae7255f409fcf470a494e169c09e31d8e5745de975d310b38dfbf
3
+ size 137563488
model-tiny.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ebd0e69e78190ffe1438491fa05cc1f5c1aa3a4c4db3bc1723adbb551ea2395
3
+ size 151061672
tokenizer-tiny-en.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer-tiny.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff