RichardErkhov
commited on
Commit
•
078ec1a
1
Parent(s):
0251e2a
uploaded model
Browse files- config.json +100 -0
config.json
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "GPT-Neo-2.7B-Picard",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPTNeoForCausalLM"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0,
|
8 |
+
"attention_layers": [
|
9 |
+
"global",
|
10 |
+
"local",
|
11 |
+
"global",
|
12 |
+
"local",
|
13 |
+
"global",
|
14 |
+
"local",
|
15 |
+
"global",
|
16 |
+
"local",
|
17 |
+
"global",
|
18 |
+
"local",
|
19 |
+
"global",
|
20 |
+
"local",
|
21 |
+
"global",
|
22 |
+
"local",
|
23 |
+
"global",
|
24 |
+
"local",
|
25 |
+
"global",
|
26 |
+
"local",
|
27 |
+
"global",
|
28 |
+
"local",
|
29 |
+
"global",
|
30 |
+
"local",
|
31 |
+
"global",
|
32 |
+
"local",
|
33 |
+
"global",
|
34 |
+
"local",
|
35 |
+
"global",
|
36 |
+
"local",
|
37 |
+
"global",
|
38 |
+
"local",
|
39 |
+
"global",
|
40 |
+
"local"
|
41 |
+
],
|
42 |
+
"attention_types": [
|
43 |
+
[
|
44 |
+
[
|
45 |
+
"global",
|
46 |
+
"local"
|
47 |
+
],
|
48 |
+
16
|
49 |
+
]
|
50 |
+
],
|
51 |
+
"bos_token_id": 50256,
|
52 |
+
"classifier_dropout": 0.1,
|
53 |
+
"embed_dropout": 0,
|
54 |
+
"eos_token_id": 50256,
|
55 |
+
"hidden_size": 2560,
|
56 |
+
"initializer_range": 0.02,
|
57 |
+
"intermediate_size": null,
|
58 |
+
"layer_norm_epsilon": 1e-05,
|
59 |
+
"max_position_embeddings": 2048,
|
60 |
+
"model_type": "gpt_neo",
|
61 |
+
"num_heads": 20,
|
62 |
+
"num_layers": 32,
|
63 |
+
"quantization_config": {
|
64 |
+
"_load_in_4bit": true,
|
65 |
+
"_load_in_8bit": false,
|
66 |
+
"bnb_4bit_compute_dtype": "float32",
|
67 |
+
"bnb_4bit_quant_storage": "uint8",
|
68 |
+
"bnb_4bit_quant_type": "fp4",
|
69 |
+
"bnb_4bit_use_double_quant": false,
|
70 |
+
"llm_int8_enable_fp32_cpu_offload": false,
|
71 |
+
"llm_int8_has_fp16_weight": false,
|
72 |
+
"llm_int8_skip_modules": null,
|
73 |
+
"llm_int8_threshold": 6.0,
|
74 |
+
"load_in_4bit": true,
|
75 |
+
"load_in_8bit": false,
|
76 |
+
"quant_method": "bitsandbytes"
|
77 |
+
},
|
78 |
+
"rep_pen": 1.75,
|
79 |
+
"resid_dropout": 0,
|
80 |
+
"summary_activation": null,
|
81 |
+
"summary_first_dropout": 0.1,
|
82 |
+
"summary_proj_to_labels": true,
|
83 |
+
"summary_type": "cls_index",
|
84 |
+
"summary_use_proj": true,
|
85 |
+
"task_specific_params": {
|
86 |
+
"text-generation": {
|
87 |
+
"do_sample": true,
|
88 |
+
"max_length": 50,
|
89 |
+
"temperature": 0.9
|
90 |
+
}
|
91 |
+
},
|
92 |
+
"temp": 0.7,
|
93 |
+
"tokenizer_class": "GPT2Tokenizer",
|
94 |
+
"torch_dtype": "float16",
|
95 |
+
"transformers_version": "4.39.3",
|
96 |
+
"use_cache": false,
|
97 |
+
"vocab_size": 50257,
|
98 |
+
"welcome": "You are currently running novel-writing model `Picard, version 1.`\n\n This model is made by [Mr. Seeker](https://www.patreon.com/mrseeker)\n\n### How to use this model\n\nPicard is designed to generate stories and novels. Use the authors note to give it a certain genre to follow, use memory to give an overview of the story and use World Information to give it specific details about the characters. To start off, give the AI an idea of what you are writing about by setting the scene. Give the AI around 10 sentences that make your story really interesting to read. Introduce your character, describe the world, blow something up, or let the AI use its creative mind.",
|
99 |
+
"window_size": 256
|
100 |
+
}
|