gary109 commited on
Commit
e92b371
1 Parent(s): 8b4f6a3

language model

Browse files
alphabet.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"labels": [" ", "#", "1", "2", "3", "4", "5", "a", "b", "c", "d", "e", "f", "g", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
language_model/5gram_correct.arpa ADDED
The diff for this file is too large to render. See raw diff
 
language_model/attrs.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
language_model/unigrams.txt ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ </s>
2
+ <s>
3
+ a#2
4
+ a#2.
5
+ a#3
6
+ a#3.
7
+ a#4
8
+ a#4.
9
+ a#5
10
+ a#5.
11
+ a1
12
+ a2
13
+ a3
14
+ a3.
15
+ a4
16
+ a4.
17
+ a5
18
+ b2
19
+ b3
20
+ b3.
21
+ b4
22
+ b4.
23
+ b5
24
+ c#3
25
+ c#3.
26
+ c#4
27
+ c#4.
28
+ c#5
29
+ c#5.
30
+ c3
31
+ c4
32
+ c4.
33
+ c5
34
+ c5.
35
+ d#3
36
+ d#3.
37
+ d#4
38
+ d#4.
39
+ d#5
40
+ d2
41
+ d3
42
+ d3.
43
+ d4
44
+ d4.
45
+ d5
46
+ d5.
47
+ e2
48
+ e3
49
+ e3.
50
+ e4
51
+ e4.
52
+ e5
53
+ f#2
54
+ f#3
55
+ f#3.
56
+ f#4
57
+ f#4.
58
+ f#5
59
+ f1
60
+ f2
61
+ f3
62
+ f3.
63
+ f4
64
+ f4.
65
+ f5
66
+ g#2
67
+ g#3
68
+ g#3.
69
+ g#4
70
+ g#4.
71
+ g#5
72
+ g2
73
+ g3
74
+ g3.
75
+ g4
76
+ g4.
77
+ g5
special_tokens_map.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "</s>",
12
+ "lstrip": false,
13
+ "normalized": true,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ {
18
+ "content": "<s>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ {
25
+ "content": "</s>",
26
+ "lstrip": false,
27
+ "normalized": true,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ {
32
+ "content": "<s>",
33
+ "lstrip": false,
34
+ "normalized": true,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ },
38
+ {
39
+ "content": "</s>",
40
+ "lstrip": false,
41
+ "normalized": true,
42
+ "rstrip": false,
43
+ "single_word": false
44
+ },
45
+ {
46
+ "content": "<s>",
47
+ "lstrip": false,
48
+ "normalized": true,
49
+ "rstrip": false,
50
+ "single_word": false
51
+ },
52
+ {
53
+ "content": "</s>",
54
+ "lstrip": false,
55
+ "normalized": true,
56
+ "rstrip": false,
57
+ "single_word": false
58
+ },
59
+ {
60
+ "content": "<s>",
61
+ "lstrip": false,
62
+ "normalized": true,
63
+ "rstrip": false,
64
+ "single_word": false
65
+ },
66
+ {
67
+ "content": "</s>",
68
+ "lstrip": false,
69
+ "normalized": true,
70
+ "rstrip": false,
71
+ "single_word": false
72
+ },
73
+ {
74
+ "content": "<s>",
75
+ "lstrip": false,
76
+ "normalized": true,
77
+ "rstrip": false,
78
+ "single_word": false
79
+ },
80
+ {
81
+ "content": "</s>",
82
+ "lstrip": false,
83
+ "normalized": true,
84
+ "rstrip": false,
85
+ "single_word": false
86
+ },
87
+ {
88
+ "content": "<s>",
89
+ "lstrip": false,
90
+ "normalized": true,
91
+ "rstrip": false,
92
+ "single_word": false
93
+ },
94
+ {
95
+ "content": "</s>",
96
+ "lstrip": false,
97
+ "normalized": true,
98
+ "rstrip": false,
99
+ "single_word": false
100
+ }
101
+ ],
102
+ "bos_token": "<s>",
103
+ "eos_token": "</s>",
104
+ "pad_token": "[PAD]",
105
+ "unk_token": "[UNK]"
106
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "do_lower_case": false,
4
+ "eos_token": "</s>",
5
+ "name_or_path": "gary109/ai-light-dance_singing2_ft_wav2vec2-large-xlsr-53-5gram-v4-1",
6
+ "pad_token": "[PAD]",
7
+ "processor_class": "Wav2Vec2ProcessorWithLM",
8
+ "replace_word_delimiter_char": " ",
9
+ "special_tokens_map_file": null,
10
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
11
+ "trust_remote_code": false,
12
+ "unk_token": "[UNK]",
13
+ "word_delimiter_token": "|"
14
+ }
vocab.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "#": 1,
3
+ "1": 2,
4
+ "2": 3,
5
+ "3": 4,
6
+ "4": 5,
7
+ "5": 6,
8
+ "[PAD]": 15,
9
+ "[UNK]": 14,
10
+ "a": 7,
11
+ "b": 8,
12
+ "c": 9,
13
+ "d": 10,
14
+ "e": 11,
15
+ "f": 12,
16
+ "g": 13,
17
+ "|": 0
18
+ }