huseinzol05 commited on
Commit
196add0
1 Parent(s): a8b8545

Upload tokenizer

Browse files
Files changed (3) hide show
  1. merges.txt +1 -1
  2. tokenizer.json +1 -0
  3. tokenizer_config.json +1 -2
merges.txt CHANGED
@@ -1,4 +1,4 @@
1
- #version: 0.2 - Trained by `huggingface/tokenizers`
2
  a n
3
  Ġ t
4
  i n
 
1
+ #version: 0.2
2
  a n
3
  Ġ t
4
  i n
tokenizer.json CHANGED
@@ -948,6 +948,7 @@
948
  "continuing_subword_prefix": "",
949
  "end_of_word_suffix": "",
950
  "fuse_unk": false,
 
951
  "vocab": {
952
  "<s>": 0,
953
  "<pad>": 1,
 
948
  "continuing_subword_prefix": "",
949
  "end_of_word_suffix": "",
950
  "fuse_unk": false,
951
+ "byte_fallback": false,
952
  "vocab": {
953
  "<s>": 0,
954
  "<pad>": 1,
tokenizer_config.json CHANGED
@@ -9,6 +9,7 @@
9
  "rstrip": false,
10
  "single_word": false
11
  },
 
12
  "eos_token": {
13
  "__type": "AddedToken",
14
  "content": "<|endoftext|>",
@@ -19,9 +20,7 @@
19
  },
20
  "errors": "replace",
21
  "model_max_length": 1000000000000000019884624838656,
22
- "name_or_path": "./out-small-1.1",
23
  "pad_token": null,
24
- "special_tokens_map_file": "./out-small-1.1/special_tokens_map.json",
25
  "tokenizer_class": "GPT2Tokenizer",
26
  "unk_token": {
27
  "__type": "AddedToken",
 
9
  "rstrip": false,
10
  "single_word": false
11
  },
12
+ "clean_up_tokenization_spaces": true,
13
  "eos_token": {
14
  "__type": "AddedToken",
15
  "content": "<|endoftext|>",
 
20
  },
21
  "errors": "replace",
22
  "model_max_length": 1000000000000000019884624838656,
 
23
  "pad_token": null,
 
24
  "tokenizer_class": "GPT2Tokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",