nataligzraryan commited on
Commit
ed2a157
1 Parent(s): c27c5bf

Upload tokenizer

Browse files
Files changed (5) hide show
  1. README.md +1 -1
  2. added_tokens.json +5 -0
  3. special_tokens_map.json +19 -0
  4. tokenizer_config.json +56 -0
  5. vocab.json +47 -0
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
  license: cc-by-nc-4.0
3
- base_model: facebook/mms-1b-all
4
  tags:
5
  - generated_from_trainer
6
  datasets:
7
  - common_voice_16_0
 
8
  model-index:
9
  - name: runs
10
  results: []
 
1
  ---
2
  license: cc-by-nc-4.0
 
3
  tags:
4
  - generated_from_trainer
5
  datasets:
6
  - common_voice_16_0
7
+ base_model: facebook/mms-1b-all
8
  model-index:
9
  - name: runs
10
  results: []
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "</s>": 44,
3
+ "<mask>": 45,
4
+ "<s>": 43
5
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "mask_token": "<mask>",
17
+ "pad_token": "[PAD]",
18
+ "unk_token": "[UNK]"
19
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "41": {
4
+ "content": "[UNK]",
5
+ "lstrip": true,
6
+ "normalized": false,
7
+ "rstrip": true,
8
+ "single_word": false,
9
+ "special": false
10
+ },
11
+ "42": {
12
+ "content": "[PAD]",
13
+ "lstrip": true,
14
+ "normalized": false,
15
+ "rstrip": true,
16
+ "single_word": false,
17
+ "special": false
18
+ },
19
+ "43": {
20
+ "content": "<s>",
21
+ "lstrip": false,
22
+ "normalized": true,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "44": {
28
+ "content": "</s>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "45": {
36
+ "content": "<mask>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": true,
46
+ "do_lower_case": false,
47
+ "eos_token": "</s>",
48
+ "mask_token": "<mask>",
49
+ "model_max_length": 1000000000000000019884624838656,
50
+ "pad_token": "[PAD]",
51
+ "replace_word_delimiter_char": " ",
52
+ "target_lang": "hy",
53
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
54
+ "unk_token": "[UNK]",
55
+ "word_delimiter_token": "|"
56
+ }
vocab.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "hy": {
3
+ "-": 1,
4
+ "[PAD]": 42,
5
+ "[UNK]": 41,
6
+ "|": 0,
7
+ "ա": 2,
8
+ "բ": 3,
9
+ "գ": 4,
10
+ "դ": 5,
11
+ "ե": 6,
12
+ "զ": 7,
13
+ "է": 8,
14
+ "ը": 9,
15
+ "թ": 10,
16
+ "ժ": 11,
17
+ "ի": 12,
18
+ "լ": 13,
19
+ "խ": 14,
20
+ "ծ": 15,
21
+ "կ": 16,
22
+ "հ": 17,
23
+ "ձ": 18,
24
+ "ղ": 19,
25
+ "ճ": 20,
26
+ "մ": 21,
27
+ "յ": 22,
28
+ "ն": 23,
29
+ "շ": 24,
30
+ "ո": 25,
31
+ "չ": 26,
32
+ "պ": 27,
33
+ "ջ": 28,
34
+ "ռ": 29,
35
+ "ս": 30,
36
+ "վ": 31,
37
+ "տ": 32,
38
+ "ր": 33,
39
+ "ց": 34,
40
+ "ւ": 35,
41
+ "փ": 36,
42
+ "ք": 37,
43
+ "օ": 38,
44
+ "ֆ": 39,
45
+ "և": 40
46
+ }
47
+ }