wilzh40 commited on
Commit
ffda1fb
1 Parent(s): 156173b

Upload MistralForCausalLM

Browse files
config.json CHANGED
@@ -1,27 +1,25 @@
1
  {
2
- "_name_or_path": "codellama/CodeLlama-7b-hf",
3
  "architectures": [
4
- "LlamaForCausalLM"
5
  ],
6
- "attention_bias": false,
7
  "bos_token_id": 1,
8
  "eos_token_id": 2,
9
  "hidden_act": "silu",
10
  "hidden_size": 4096,
11
  "initializer_range": 0.02,
12
- "intermediate_size": 11008,
13
- "max_position_embeddings": 16384,
14
- "model_type": "llama",
15
  "num_attention_heads": 32,
16
  "num_hidden_layers": 32,
17
- "num_key_value_heads": 32,
18
- "pretraining_tp": 1,
19
  "rms_norm_eps": 1e-05,
20
- "rope_scaling": null,
21
- "rope_theta": 1000000,
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "float16",
24
  "transformers_version": "4.35.2",
25
  "use_cache": true,
26
- "vocab_size": 32016
27
  }
 
1
  {
2
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
3
  "architectures": [
4
+ "MistralForCausalLM"
5
  ],
 
6
  "bos_token_id": 1,
7
  "eos_token_id": 2,
8
  "hidden_act": "silu",
9
  "hidden_size": 4096,
10
  "initializer_range": 0.02,
11
+ "intermediate_size": 14336,
12
+ "max_position_embeddings": 32768,
13
+ "model_type": "mistral",
14
  "num_attention_heads": 32,
15
  "num_hidden_layers": 32,
16
+ "num_key_value_heads": 8,
 
17
  "rms_norm_eps": 1e-05,
18
+ "rope_theta": 10000.0,
19
+ "sliding_window": 4096,
20
  "tie_word_embeddings": false,
21
  "torch_dtype": "float16",
22
  "transformers_version": "4.35.2",
23
  "use_cache": true,
24
+ "vocab_size": 32000
25
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:430a5f0f26f51c534feb90ee77c7d09aca503a832b4d8489c6dda7d3aacc10ad
3
- size 4939116320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77c79484ba363b5f28fa02884be1c0851bd55ef7bf8a7bf59b3ca595eda11be6
3
+ size 4943162240
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87329c01529394b334f8eabb5b4cf344359c2ed60ba07bd36788728f7bf09a91
3
- size 4947390768
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe6e95bd8594296699686dcca0da98f0434321ef01d2b0b3911ad793b9741164
3
+ size 4999819232
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f9a1d9f237267298a517d053f6d15cf2620e3d10ea2e7003492fee8e31b1db14
3
- size 3590619808
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b453ec6227016e3989557a1927d152de8434c69ffac13cd04adb11a7bfa07dc
3
+ size 4540516256
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 13477093376
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
@@ -23,24 +23,24 @@
23
  "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
  "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
  "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
- "model.layers.10.input_layernorm.weight": "model-00001-of-00003.safetensors",
27
- "model.layers.10.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
28
  "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
  "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
- "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
31
  "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
  "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
  "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
  "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
  "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
  "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
- "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
38
  "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
  "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
- "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
41
- "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
42
- "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
43
- "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
44
  "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
  "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
  "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
@@ -140,24 +140,24 @@
140
  "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
  "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
- "model.layers.22.input_layernorm.weight": "model-00002-of-00003.safetensors",
144
- "model.layers.22.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
145
- "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
146
- "model.layers.22.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
147
- "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
148
  "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
  "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
  "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
  "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
  "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
  "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
- "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
155
- "model.layers.23.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
156
  "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
- "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
158
- "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
159
- "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
160
- "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
161
  "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
  "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
  "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 14483464192
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
 
23
  "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
  "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
  "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
  "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
  "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
  "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
  "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
  "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
  "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
  "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
  "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
  "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
  "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
  "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
  "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
  "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
140
  "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
  "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
  "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
  "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
  "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
  "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
  "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
  "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
  "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
  "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
  "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
  "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",