roberta-base-japanese / tokenizer_config.json
kiyono's picture
fix zenkaku option
626ec58
raw
history blame contribute delete
698 Bytes
{
"bos_token": "<s>",
"eos_token": "</s>",
"unk_token": "<unk>",
"sep_token": "</s>",
"cls_token": "<s>",
"pad_token": "<pad>",
"mask_token": {
"content": "<mask>",
"single_word": false,
"lstrip": true,
"rstrip": false,
"normalized": true,
"__type": "AddedToken"
},
"sp_model_kwargs": {},
"do_lower_case": false,
"do_zenkaku": true,
"do_word_tokenize": true,
"do_subword_tokenize": true,
"word_tokenizer_type": "mecab",
"subword_tokenizer_type": "bpe",
"never_split": null,
"mecab_kwargs": {
"mecab_dic": "unidic_lite"
},
"special_tokens_map_file": null,
"tokenizer_file": null,
"tokenizer_class": "RobertaJapaneseTokenizer"
}