AlexWortega commited on
Commit
4a468c4
1 Parent(s): dfa3bb3

Upload LlamaForCausalLM

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "Vikhrmodels/Vikhr-7b-0.2",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -10,20 +10,20 @@
10
  "hidden_act": "silu",
11
  "hidden_size": 4096,
12
  "initializer_range": 0.02,
13
- "intermediate_size": 11008,
14
- "max_position_embeddings": 2048,
15
  "model_type": "llama",
16
  "num_attention_heads": 32,
17
  "num_hidden_layers": 32,
18
- "num_key_value_heads": 32,
19
- "pad_token_id": 0,
20
  "pretraining_tp": 1,
21
  "rms_norm_eps": 1e-05,
22
  "rope_scaling": null,
23
  "rope_theta": 10000.0,
 
24
  "tie_word_embeddings": false,
25
  "torch_dtype": "bfloat16",
26
- "transformers_version": "4.39.1",
27
- "use_cache": false,
28
- "vocab_size": 60258
29
  }
 
1
  {
2
+ "_name_or_path": "AlexWortega/v4",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
10
  "hidden_act": "silu",
11
  "hidden_size": 4096,
12
  "initializer_range": 0.02,
13
+ "intermediate_size": 14336,
14
+ "max_position_embeddings": 32768,
15
  "model_type": "llama",
16
  "num_attention_heads": 32,
17
  "num_hidden_layers": 32,
18
+ "num_key_value_heads": 8,
 
19
  "pretraining_tp": 1,
20
  "rms_norm_eps": 1e-05,
21
  "rope_scaling": null,
22
  "rope_theta": 10000.0,
23
+ "sliding_window": 4096,
24
  "tie_word_embeddings": false,
25
  "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.39.3",
27
+ "use_cache": true,
28
+ "vocab_size": 40002
29
  }
generation_config.json CHANGED
@@ -2,7 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
- "pad_token_id": 0,
6
- "transformers_version": "4.39.1",
7
- "use_cache": false
8
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
+ "transformers_version": "4.39.3"
 
 
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:88aa6434480d8cdd983dc47ae2520b527f51486dc6c9c33acd1ff7c1e38420a5
3
- size 4979633592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b1b4aaba96519930d4d94186ba3d11855149ca04779df9c74e63aa0cd362d1c
3
+ size 4891274096
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2a0aee196878f8a241d4aa98dd82abe72934c87423a73dc204cb608b23218f9
3
- size 4957876888
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41cf28dc730105259f0f14bb7f89e2adde6d2e6aabfe2968a5e64708586a1db5
3
+ size 4915916176
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e038ddce280fccdad76371f29f847b2670948302b4a61f723c737a14a6df771
3
- size 4002333672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b7f220d3c530e549aa7be37d651aa1c8c9147b7f7ba0ee1fc8b02b69c91b439
3
+ size 4807412520
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 13939810304
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
@@ -23,11 +23,11 @@
23
  "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
  "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
  "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
- "model.layers.10.input_layernorm.weight": "model-00001-of-00003.safetensors",
27
- "model.layers.10.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
28
  "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
- "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
- "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
31
  "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
  "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
  "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
@@ -39,7 +39,7 @@
39
  "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
  "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
  "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
- "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
43
  "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
  "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
  "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
@@ -131,33 +131,33 @@
131
  "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
  "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
  "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
- "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
- "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
  "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
  "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
- "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
  "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
  "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
  "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
- "model.layers.22.input_layernorm.weight": "model-00002-of-00003.safetensors",
144
- "model.layers.22.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
145
- "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
146
- "model.layers.22.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
147
- "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
148
- "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
- "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
- "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
- "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
  "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
  "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
  "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
  "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
  "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
- "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
158
- "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
159
- "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
160
- "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
161
  "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
  "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
  "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 14614568960
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
 
23
  "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
  "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
  "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
  "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
  "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
  "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
  "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
 
39
  "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
  "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
  "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
  "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
  "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
  "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
 
131
  "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
  "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
  "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
136
  "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
  "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
139
  "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
  "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
  "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
152
  "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
  "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
  "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
  "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
  "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
  "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
  "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
  "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",