Text Generation
Transformers
PyTorch
rwkv
uncensored
Inference Endpoints
Ian Walton commited on
Commit
29f4fd5
1 Parent(s): 541d020

Add tokenizer and model config.

Browse files
config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RwkvForCausalLM"
4
+ ],
5
+ "attention_hidden_size": 2048,
6
+ "bos_token_id": 2,
7
+ "context_length": 1024,
8
+ "eos_token_id": 0,
9
+ "hidden_size": 2048,
10
+ "intermediate_size": 8192,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "rwkv",
13
+ "num_hidden_layers": 24,
14
+ "rescale_every": 6,
15
+ "tie_word_embeddings": false,
16
+ "torch_dtype": "float32",
17
+ "transformers_version": "4.29.0.dev0",
18
+ "use_cache": true,
19
+ "vocab_size": 50277
20
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.29.0.dev0"
6
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|endoftext|>",
6
+ "model_max_length": 1000000000000000019884624838656,
7
+ "tokenizer_class": "GPTNeoXTokenizer",
8
+ "unk_token": "<|endoftext|>"
9
+ }