rohitp1 commited on
Commit
df090da
1 Parent(s): 9428215

Upload tokenizer

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +6 -0
  2. tokenizer_config.json +16 -0
  3. vocab.json +34 -0
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "<pad>",
5
+ "unk_token": "<unk>"
6
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
+ "do_lower_case": false,
5
+ "do_normalize": true,
6
+ "eos_token": "</s>",
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "name_or_path": "rohitp1/dgx1_w2v2_large_finetune_teacher_babble_noise_libri_360_hours_50_epochs_batch_16",
9
+ "pad_token": "<pad>",
10
+ "replace_word_delimiter_char": " ",
11
+ "return_attention_mask": true,
12
+ "special_tokens_map_file": "/data/home/rohitprasad1/.cache/huggingface/hub/models--rohitp1--dgx1_w2v2_large_finetune_teacher_babble_noise_libri_360_hours_50_epochs_batch_16/snapshots/8aa902a47022aa3ddd8ff0d854e5753d5537a53e/special_tokens_map.json",
13
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
14
+ "unk_token": "<unk>",
15
+ "word_delimiter_token": "|"
16
+ }
vocab.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "'": 27,
3
+ "</s>": 2,
4
+ "<pad>": 0,
5
+ "<s>": 1,
6
+ "<unk>": 3,
7
+ "A": 7,
8
+ "B": 24,
9
+ "C": 19,
10
+ "D": 14,
11
+ "E": 5,
12
+ "F": 20,
13
+ "G": 21,
14
+ "H": 11,
15
+ "I": 10,
16
+ "J": 29,
17
+ "K": 26,
18
+ "L": 15,
19
+ "M": 17,
20
+ "N": 9,
21
+ "O": 8,
22
+ "P": 23,
23
+ "Q": 30,
24
+ "R": 13,
25
+ "S": 12,
26
+ "T": 6,
27
+ "U": 16,
28
+ "V": 25,
29
+ "W": 18,
30
+ "X": 28,
31
+ "Y": 22,
32
+ "Z": 31,
33
+ "|": 4
34
+ }