1 epoch, 1536t seq length
Browse files- added_tokens.json +1 -0
- merges.txt +1 -1
- tokenizer.json +0 -0
- tokenizer_config.json +1 -1
- vocab.json +0 -0
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<|endoftext|>": 50257}
|
merges.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
#version: 0.2
|
2 |
Ġ Ð
|
3 |
Ð ¾
|
4 |
Ð µ
|
|
|
1 |
+
#version: 0.2 - Trained by `huggingface/tokenizers`
|
2 |
Ġ Ð
|
3 |
Ð ¾
|
4 |
Ð µ
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"
|
|
|
1 |
+
{"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "special_tokens_map_file": null, "name_or_path": "workflow", "errors": "replace", "tokenizer_class": "GPT2Tokenizer"}
|
vocab.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|