Nehc commited on
Commit
7ca7f18
1 Parent(s): 7edd20e

1 epoch, 1536t seq length

Browse files
Files changed (5) hide show
  1. added_tokens.json +1 -0
  2. merges.txt +1 -1
  3. tokenizer.json +0 -0
  4. tokenizer_config.json +1 -1
  5. vocab.json +0 -0
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<|endoftext|>": 50257}
merges.txt CHANGED
@@ -1,4 +1,4 @@
1
- #version: 0.2
2
  Ġ Ð
3
  Ð ¾
4
  Ð µ
 
1
+ #version: 0.2 - Trained by `huggingface/tokenizers`
2
  Ġ Ð
3
  Ð ¾
4
  Ð µ
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"errors": "replace", "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "special_tokens_map_file": null, "name_or_path": "workflow", "tokenizer_file": "workflow\\tokenizer.json", "tokenizer_class": "GPT2Tokenizer"}
 
1
+ {"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "special_tokens_map_file": null, "name_or_path": "workflow", "errors": "replace", "tokenizer_class": "GPT2Tokenizer"}
vocab.json CHANGED
The diff for this file is too large to render. See raw diff