ifuseok commited on
Commit
28024cf
1 Parent(s): 9868aa3

Upload 10 files

Browse files
config.json CHANGED
@@ -1,12 +1,11 @@
1
  {
2
- "_name_or_path": "ifuseok/sft-solar-10.7b-v2.1",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
6
  "attention_bias": false,
7
- "attention_dropout": 0.0,
8
  "bos_token_id": 1,
9
- "eos_token_id": 32000,
10
  "hidden_act": "silu",
11
  "hidden_size": 4096,
12
  "initializer_range": 0.02,
@@ -16,6 +15,7 @@
16
  "num_attention_heads": 32,
17
  "num_hidden_layers": 48,
18
  "num_key_value_heads": 8,
 
19
  "pretraining_tp": 1,
20
  "rms_norm_eps": 1e-05,
21
  "rope_scaling": null,
@@ -24,5 +24,5 @@
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.34.1",
26
  "use_cache": false,
27
- "vocab_size": 48000
28
  }
 
1
  {
2
+ "_name_or_path": "ifuseok/sft-solar-10.7b-v1.1",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
6
  "attention_bias": false,
 
7
  "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
  "hidden_act": "silu",
10
  "hidden_size": 4096,
11
  "initializer_range": 0.02,
 
15
  "num_attention_heads": 32,
16
  "num_hidden_layers": 48,
17
  "num_key_value_heads": 8,
18
+ "pad_token_id": 2,
19
  "pretraining_tp": 1,
20
  "rms_norm_eps": 1e-05,
21
  "rope_scaling": null,
 
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.34.1",
26
  "use_cache": false,
27
+ "vocab_size": 32000
28
  }
generation_config.json CHANGED
@@ -2,6 +2,7 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
 
5
  "transformers_version": "4.34.1",
6
  "use_cache": false
7
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
+ "pad_token_id": 2,
6
  "transformers_version": "4.34.1",
7
  "use_cache": false
8
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f830cc5110f0120e577070e1fff6987a7b7bde0c314b760f6306a484cb31c98
3
- size 9990167144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e88527c86783c60a55a7e42cfb6250721ab64f7adf3ccd99f4cb4ecade66e1d6
3
+ size 9942981696
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7386e746505b5712108074be279554d85e97f70642f8bd92afe4b1cf0c8295ac
3
- size 9915718912
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:915d1745a194f6628b932d4ac8e08d8df46bdbb667ad1da0f1abff885c5ddba1
3
+ size 9999621616
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6151cdcc19178a2549e71d42bd5db34fdade0a80893821ab9b0266368cb935ff
3
- size 1819356784
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0a8309a88b412642c68849aed72ad9e938834edac0646777b5262ebc1349766
3
+ size 1520495544
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 21725192192
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
@@ -145,10 +145,10 @@
145
  "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
146
  "model.layers.22.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
147
  "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
148
- "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
- "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
- "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
- "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
  "model.layers.23.input_layernorm.weight": "model-00002-of-00003.safetensors",
153
  "model.layers.23.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
154
  "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
@@ -356,11 +356,11 @@
356
  "model.layers.43.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
357
  "model.layers.43.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
358
  "model.layers.43.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
359
- "model.layers.44.input_layernorm.weight": "model-00003-of-00003.safetensors",
360
- "model.layers.44.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
361
  "model.layers.44.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
362
  "model.layers.44.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
363
- "model.layers.44.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
364
  "model.layers.44.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
365
  "model.layers.44.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
366
  "model.layers.44.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
@@ -370,10 +370,10 @@
370
  "model.layers.45.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
371
  "model.layers.45.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
372
  "model.layers.45.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
373
- "model.layers.45.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
374
  "model.layers.45.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
375
- "model.layers.45.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
376
- "model.layers.45.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
377
  "model.layers.46.input_layernorm.weight": "model-00003-of-00003.safetensors",
378
  "model.layers.46.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
379
  "model.layers.46.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 21463048192
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
 
145
  "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
146
  "model.layers.22.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
147
  "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
152
  "model.layers.23.input_layernorm.weight": "model-00002-of-00003.safetensors",
153
  "model.layers.23.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
154
  "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
356
  "model.layers.43.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
357
  "model.layers.43.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
358
  "model.layers.43.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
359
+ "model.layers.44.input_layernorm.weight": "model-00002-of-00003.safetensors",
360
+ "model.layers.44.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
361
  "model.layers.44.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
362
  "model.layers.44.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
363
+ "model.layers.44.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
364
  "model.layers.44.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
365
  "model.layers.44.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
366
  "model.layers.44.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
 
370
  "model.layers.45.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
371
  "model.layers.45.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
372
  "model.layers.45.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
373
+ "model.layers.45.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
374
  "model.layers.45.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
375
+ "model.layers.45.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
376
+ "model.layers.45.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
377
  "model.layers.46.input_layernorm.weight": "model-00003-of-00003.safetensors",
378
  "model.layers.46.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
379
  "model.layers.46.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
special_tokens_map.json CHANGED
@@ -7,7 +7,7 @@
7
  "single_word": false
8
  },
9
  "eos_token": {
10
- "content": "<|im_end|>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
 
7
  "single_word": false
8
  },
9
  "eos_token": {
10
+ "content": "</s>",
11
  "lstrip": false,
12
  "normalized": false,
13
  "rstrip": false,
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
The diff for this file is too large to render. See raw diff