deberta-large_spell_5k_2_p3 / tokenizer_config.json
stuartmesham's picture
Upload with huggingface_hub
4c8cb15
{
"add_bos_token": false,
"add_prefix_space": true,
"add_special_tokens": false,
"bos_token": {
"__type": "AddedToken",
"content": "[CLS]",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"cls_token": {
"__type": "AddedToken",
"content": "[CLS]",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"do_lower_case": false,
"eos_token": {
"__type": "AddedToken",
"content": "[SEP]",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"errors": "replace",
"mask_token": {
"__type": "AddedToken",
"content": "[MASK]",
"lstrip": true,
"normalized": true,
"rstrip": false,
"single_word": false
},
"model_max_length": 512,
"name_or_path": "model_saves/deberta-large_spell_5k_2_p2",
"pad_token": {
"__type": "AddedToken",
"content": "[PAD]",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"sep_token": {
"__type": "AddedToken",
"content": "[SEP]",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"special_tokens_map_file": null,
"tokenizer_class": "DebertaTokenizer",
"unk_token": {
"__type": "AddedToken",
"content": "[UNK]",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"vocab_type": "gpt2"
}