Upload lm-boosted decoder
Browse files- alphabet.json +1 -0
- language_model/attrs.json +1 -0
- language_model/parl_3gram_correct.bin +3 -0
- language_model/unigrams.txt +0 -0
- preprocessor_config.json +1 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -1
alphabet.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"labels": ["", "<s>", "</s>", "\u2047", "E", "N", "A", "I", "T", "O", "D", "R", " ", "L", "S", "H", "G", "M", "K", "V", "J", "W", "Z", "U", "B", "C", "P", "F", "Y", "\u00c9", "'", "X", "\u00cb", "Q", "-", "\u00ca", "\u00c0", "\u00c4", "\u00c8", "\u00cf", "\u00c2", "\u00db", "\u00d6", "\u00d4", "\u00dc", "\u00ce", "\u00c7", "\u00c6", "\u00d9", "\u0152"], "is_bpe": false}
|
language_model/attrs.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
|
language_model/parl_3gram_correct.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:867272965bac6d65e8bb5d2a53081c1a5274ee4eb5c6a2ec9857eb8737bfe9d0
|
3 |
+
size 466477546
|
language_model/unigrams.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
preprocessor_config.json
CHANGED
@@ -4,6 +4,7 @@
|
|
4 |
"feature_size": 1,
|
5 |
"padding_side": "right",
|
6 |
"padding_value": 0.0,
|
|
|
7 |
"return_attention_mask": true,
|
8 |
"sampling_rate": 16000
|
9 |
}
|
|
|
4 |
"feature_size": 1,
|
5 |
"padding_side": "right",
|
6 |
"padding_value": 0.0,
|
7 |
+
"processor_class": "Wav2Vec2ProcessorWithLM",
|
8 |
"return_attention_mask": true,
|
9 |
"sampling_rate": 16000
|
10 |
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": true, "word_delimiter_token": "|", "special_tokens_map_file": "/home/patrick/hugging_face/add_wav2vec/hf/wav2vec2-large-xlsr-53-dutch/special_tokens_map.json", "tokenizer_file": null, "name_or_path": "facebook/wav2vec2-large-xlsr-53-dutch", "tokenizer_class": "Wav2Vec2CTCTokenizer", "processor_class": "Wav2Vec2ProcessorWithLM"}
|
vocab.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"
|
|
|
1 |
+
{"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "E": 4, "N": 5, "A": 6, "I": 7, "T": 8, "O": 9, "D": 10, "R": 11, "|": 12, "L": 13, "S": 14, "H": 15, "G": 16, "M": 17, "K": 18, "V": 19, "J": 20, "W": 21, "Z": 22, "U": 23, "B": 24, "C": 25, "P": 26, "F": 27, "Y": 28, "É": 29, "'": 30, "X": 31, "Ë": 32, "Q": 33, "-": 34, "Ê": 35, "À": 36, "Ä": 37, "È": 38, "Ï": 39, "Â": 40, "Û": 41, "Ö": 42, "Ô": 43, "Ü": 44, "Î": 45, "Ç": 46, "Æ": 47, "Ù": 48, "Œ": 49}
|