PathFinderKR commited on
Commit
14db81a
1 Parent(s): 824ff27

Upload LlamaForCausalLM

Browse files
config.json CHANGED
@@ -6,12 +6,13 @@
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
  "bos_token_id": 128000,
9
- "eos_token_id": 128001,
10
  "hidden_act": "silu",
11
  "hidden_size": 4096,
12
  "initializer_range": 0.02,
13
  "intermediate_size": 14336,
14
  "max_position_embeddings": 8192,
 
15
  "model_type": "llama",
16
  "num_attention_heads": 32,
17
  "num_hidden_layers": 32,
@@ -22,7 +23,7 @@
22
  "rope_theta": 500000.0,
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "float16",
25
- "transformers_version": "4.40.0",
26
  "use_cache": true,
27
- "vocab_size": 128256
28
  }
 
6
  "attention_bias": false,
7
  "attention_dropout": 0.0,
8
  "bos_token_id": 128000,
9
+ "eos_token_id": 128009,
10
  "hidden_act": "silu",
11
  "hidden_size": 4096,
12
  "initializer_range": 0.02,
13
  "intermediate_size": 14336,
14
  "max_position_embeddings": 8192,
15
+ "mlp_bias": false,
16
  "model_type": "llama",
17
  "num_attention_heads": 32,
18
  "num_hidden_layers": 32,
 
23
  "rope_theta": 500000.0,
24
  "tie_word_embeddings": false,
25
  "torch_dtype": "float16",
26
+ "transformers_version": "4.41.0",
27
  "use_cache": true,
28
+ "vocab_size": 145793
29
  }
generation_config.json CHANGED
@@ -8,5 +8,5 @@
8
  "max_length": 4096,
9
  "temperature": 0.6,
10
  "top_p": 0.9,
11
- "transformers_version": "4.40.0"
12
  }
 
8
  "max_length": 4096,
9
  "temperature": 0.6,
10
  "top_p": 0.9,
11
+ "transformers_version": "4.41.0"
12
  }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6874219e25322d93b521837d439ad75545f5b941c4ab3a1237f0b44363d4c2cc
3
- size 4976698592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44541e8056bd668b5582963714515bb10533a31c01b51384af759c1f123f22b1
3
+ size 4885463832
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4122d4e3d1cba1b1a79f188cc3ad944528f747f155b5d4cb196e67787c02e577
3
- size 4999802616
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d717090d6e117e98d32cd104b05e0a87636e71e8dc966be0280e696690adbb88
3
+ size 4915916056
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f397fe539eac0f6b4ccb9646b84f57451dec1f243f66c5efc54dd73b6c9453a7
3
- size 4915916080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37a05ce080f5195ddb6cbd8f58da44eaf487615454d10e51de57b5a637bcf6fa
3
+ size 4999819232
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:86dab6d3f24a91f09ec84c04931fb4f465e131135f372a75f44f2f388503e2a6
3
- size 1168138808
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60ac679e18a59c3eb3e3bd7f5d58ecf3d2b93d379e1fb2a78cc71feff86b0207
3
+ size 1546683160
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 16060522496
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00004-of-00004.safetensors",
@@ -104,11 +104,11 @@
104
  "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
105
  "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
106
  "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
107
- "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
108
- "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
109
  "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
110
  "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
111
- "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
112
  "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
113
  "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
114
  "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
@@ -124,13 +124,13 @@
124
  "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
125
  "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
126
  "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
127
- "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
128
  "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
129
  "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
130
- "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
131
- "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
132
- "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
133
- "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
134
  "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
135
  "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
136
  "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
@@ -232,8 +232,8 @@
232
  "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
233
  "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
234
  "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
235
- "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
236
- "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
237
  "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
238
  "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
239
  "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
@@ -275,11 +275,11 @@
275
  "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
276
  "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
277
  "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
278
- "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
279
- "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
280
  "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
281
- "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
282
- "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
283
  "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
284
  "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
285
  "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 16347848704
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00004-of-00004.safetensors",
 
104
  "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
105
  "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
106
  "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
109
  "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
110
  "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
112
  "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
113
  "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
114
  "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
 
124
  "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
125
  "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
126
  "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
128
  "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
129
  "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
134
  "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
135
  "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
136
  "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
 
232
  "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
233
  "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
234
  "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
237
  "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
238
  "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
239
  "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
 
275
  "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
276
  "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
277
  "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
280
  "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
283
  "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
284
  "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
285
  "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",