khushi1234455687 commited on
Commit
9ad4534
1 Parent(s): 09d1ff1

Upload tokenizer

Browse files
Files changed (5) hide show
  1. README.md +7 -7
  2. added_tokens.json +4 -0
  3. special_tokens_map.json +6 -0
  4. tokenizer_config.json +47 -0
  5. vocab.json +114 -0
README.md CHANGED
@@ -1,19 +1,19 @@
1
  ---
2
- library_name: transformers
3
- license: apache-2.0
4
  base_model: facebook/wav2vec2-large-xlsr-53
5
- tags:
6
- - generated_from_trainer
7
  datasets:
8
  - fleurs
 
 
9
  metrics:
10
  - wer
 
 
11
  model-index:
12
  - name: wav2vec2-large-xlsr-53-Hindi-Version2
13
  results:
14
  - task:
15
- name: Automatic Speech Recognition
16
  type: automatic-speech-recognition
 
17
  dataset:
18
  name: fleurs
19
  type: fleurs
@@ -21,9 +21,9 @@ model-index:
21
  split: None
22
  args: hi_in
23
  metrics:
24
- - name: Wer
25
- type: wer
26
  value: 0.3987113150444206
 
27
  ---
28
 
29
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
1
  ---
 
 
2
  base_model: facebook/wav2vec2-large-xlsr-53
 
 
3
  datasets:
4
  - fleurs
5
+ library_name: transformers
6
+ license: apache-2.0
7
  metrics:
8
  - wer
9
+ tags:
10
+ - generated_from_trainer
11
  model-index:
12
  - name: wav2vec2-large-xlsr-53-Hindi-Version2
13
  results:
14
  - task:
 
15
  type: automatic-speech-recognition
16
+ name: Automatic Speech Recognition
17
  dataset:
18
  name: fleurs
19
  type: fleurs
 
21
  split: None
22
  args: hi_in
23
  metrics:
24
+ - type: wer
 
25
  value: 0.3987113150444206
26
+ name: Wer
27
  ---
28
 
29
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "</s>": 113,
3
+ "<s>": 112
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "[UNK]"
6
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "110": {
4
+ "content": "[UNK]",
5
+ "lstrip": true,
6
+ "normalized": false,
7
+ "rstrip": true,
8
+ "single_word": false,
9
+ "special": false
10
+ },
11
+ "111": {
12
+ "content": "[PAD]",
13
+ "lstrip": true,
14
+ "normalized": false,
15
+ "rstrip": true,
16
+ "single_word": false,
17
+ "special": false
18
+ },
19
+ "112": {
20
+ "content": "<s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "113": {
28
+ "content": "</s>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "bos_token": "<s>",
37
+ "clean_up_tokenization_spaces": true,
38
+ "do_lower_case": false,
39
+ "eos_token": "</s>",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "[PAD]",
42
+ "replace_word_delimiter_char": " ",
43
+ "target_lang": null,
44
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
45
+ "unk_token": "[UNK]",
46
+ "word_delimiter_token": "|"
47
+ }
vocab.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "$": 5,
3
+ "'": 29,
4
+ "+": 90,
5
+ "/": 45,
6
+ "0": 49,
7
+ "1": 24,
8
+ "2": 34,
9
+ "3": 44,
10
+ "4": 61,
11
+ "5": 53,
12
+ "6": 43,
13
+ "7": 96,
14
+ "8": 42,
15
+ "9": 21,
16
+ "[PAD]": 111,
17
+ "[UNK]": 110,
18
+ "a": 97,
19
+ "b": 71,
20
+ "c": 57,
21
+ "d": 76,
22
+ "e": 15,
23
+ "f": 89,
24
+ "g": 64,
25
+ "h": 9,
26
+ "i": 55,
27
+ "j": 20,
28
+ "k": 6,
29
+ "l": 41,
30
+ "m": 75,
31
+ "n": 72,
32
+ "o": 95,
33
+ "p": 83,
34
+ "q": 2,
35
+ "r": 65,
36
+ "s": 31,
37
+ "t": 78,
38
+ "u": 99,
39
+ "v": 88,
40
+ "w": 47,
41
+ "x": 80,
42
+ "y": 73,
43
+ "z": 81,
44
+ "|": 107,
45
+ "£": 19,
46
+ "¥": 98,
47
+ "°": 1,
48
+ "á": 85,
49
+ "õ": 37,
50
+ "ú": 12,
51
+ "ँ": 91,
52
+ "ं": 103,
53
+ "ः": 11,
54
+ "अ": 23,
55
+ "आ": 60,
56
+ "इ": 105,
57
+ "ई": 0,
58
+ "उ": 50,
59
+ "ऊ": 46,
60
+ "ए": 22,
61
+ "ऐ": 33,
62
+ "ऑ": 58,
63
+ "ओ": 30,
64
+ "औ": 104,
65
+ "क": 101,
66
+ "ख": 92,
67
+ "ग": 26,
68
+ "घ": 109,
69
+ "च": 62,
70
+ "छ": 32,
71
+ "ज": 18,
72
+ "झ": 69,
73
+ "ञ": 68,
74
+ "ट": 3,
75
+ "ठ": 39,
76
+ "ड": 87,
77
+ "ढ": 35,
78
+ "ण": 106,
79
+ "त": 79,
80
+ "थ": 48,
81
+ "द": 27,
82
+ "ध": 10,
83
+ "न": 25,
84
+ "प": 70,
85
+ "फ": 108,
86
+ "ब": 16,
87
+ "भ": 38,
88
+ "म": 84,
89
+ "य": 28,
90
+ "र": 36,
91
+ "ल": 52,
92
+ "व": 74,
93
+ "श": 77,
94
+ "ष": 54,
95
+ "स": 100,
96
+ "ह": 7,
97
+ "़": 8,
98
+ "ा": 51,
99
+ "ि": 67,
100
+ "ी": 4,
101
+ "ु": 13,
102
+ "ू": 94,
103
+ "ृ": 86,
104
+ "े": 14,
105
+ "ै": 56,
106
+ "ॉ": 17,
107
+ "ो": 93,
108
+ "ौ": 102,
109
+ "्": 40,
110
+ "।": 59,
111
+ "​": 63,
112
+ "‍": 82,
113
+ "—": 66
114
+ }