ByteBrew23 commited on
Commit
5c1ef63
1 Parent(s): f91bdb6

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - merge
4
+ - mergekit
5
+ - lazymergekit
6
+ - Qwen/CodeQwen1.5-7B-Chat
7
+ base_model:
8
+ - Qwen/CodeQwen1.5-7B-Chat
9
+ ---
10
+
11
+ # SuperCode
12
+
13
+ SuperCode is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
14
+ * [Qwen/CodeQwen1.5-7B-Chat](https://huggingface.co/Qwen/CodeQwen1.5-7B-Chat)
15
+
16
+ ## 🧩 Configuration
17
+
18
+ ```yaml
19
+ models:
20
+ - model: Qwen/CodeQwen1.5-7B-Chat
21
+ #no parameters necessary for base model
22
+ - model: Qwen/CodeQwen1.5-7B-Chat
23
+ parameters:
24
+ density: 0.5
25
+ weight: 0.5
26
+
27
+ merge_method: ties
28
+ base_model: Qwen/CodeQwen1.5-7B-Chat
29
+ parameters:
30
+ normalize: false
31
+ int8_mask: true
32
+ dtype: float16
33
+ ```
34
+
35
+ ## 💻 Usage
36
+
37
+ ```python
38
+ !pip install -qU transformers accelerate
39
+
40
+ from transformers import AutoTokenizer
41
+ import transformers
42
+ import torch
43
+
44
+ model = "ByteBrew23/SuperCode"
45
+ messages = [{"role": "user", "content": "What is a large language model?"}]
46
+
47
+ tokenizer = AutoTokenizer.from_pretrained(model)
48
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
49
+ pipeline = transformers.pipeline(
50
+ "text-generation",
51
+ model=model,
52
+ torch_dtype=torch.float16,
53
+ device_map="auto",
54
+ )
55
+
56
+ outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
57
+ print(outputs[0]["generated_text"])
58
+ ```
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/CodeQwen1.5-7B-Chat",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 2,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 13440,
13
+ "max_position_embeddings": 65536,
14
+ "max_window_layers": 28,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 32,
17
+ "num_hidden_layers": 32,
18
+ "num_key_value_heads": 4,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_theta": 1000000,
21
+ "rotary_emb_base": 1000000,
22
+ "seq_length": 65536,
23
+ "sliding_window": 65536,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "float16",
26
+ "transformers_version": "4.40.1",
27
+ "use_cache": true,
28
+ "use_sliding_window": false,
29
+ "vocab_size": 92416
30
+ }
mergekit_config.yml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ models:
3
+ - model: Qwen/CodeQwen1.5-7B-Chat
4
+ #no parameters necessary for base model
5
+ - model: Qwen/CodeQwen1.5-7B-Chat
6
+ parameters:
7
+ density: 0.5
8
+ weight: 0.5
9
+
10
+ merge_method: ties
11
+ base_model: Qwen/CodeQwen1.5-7B-Chat
12
+ parameters:
13
+ normalize: false
14
+ int8_mask: true
15
+ dtype: float16
model-00001-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a962c4b86d0793124150db53908faac2a4c46f8275f1252ca210b74d2da33f9
3
+ size 1919979168
model-00002-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:079decb4776fa5c5809f7d377c2b4ca2997b5e0c282c71449dd519c35e9572af
3
+ size 1991376088
model-00003-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81177f85c3b85fa2722bdf0046575087d34799227e938898756201b3d513d31e
3
+ size 1995571640
model-00004-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd96987d4d5eab1ea5bd0ef5623fc125375de4fa07463580923606aeacd09580
3
+ size 1914830008
model-00005-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:708372603fe1e350280f7970c850a0597d2f0f44e3043b734e7d1b4ad4ff81db
3
+ size 1919033888
model-00006-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1247a4bfb4455823dce9ca896ce6217fa1151ca3c3f8982a3c0f37abbbf3a5f6
3
+ size 1919033896
model-00007-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9d41da33c67b2cb6e61f4ca92e3d28dbdb1e33be47f938744e3cb9e5370e3e5
3
+ size 1991376056
model-00008-of-00008.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef9ba538e753dbeee40d85506f0494a4adaf16fa1df084e417b98bc19f89bee2
3
+ size 849412136
model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.0.4.2", "total_size": 14500569088}, "weight_map": {"lm_head.weight": "model-00001-of-00008.safetensors", "model.embed_tokens.weight": "model-00001-of-00008.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00008.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00001-of-00008.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00008.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00001-of-00008.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00008.safetensors", "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00008.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00008.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00008.safetensors", "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00008.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00008.safetensors", "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00008.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00008.safetensors", "model.layers.1.input_layernorm.weight": "model-00001-of-00008.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00002-of-00008.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00002-of-00008.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00002-of-00008.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.1.self_attn.k_proj.bias": "model-00002-of-00008.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00002-of-00008.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00002-of-00008.safetensors", "model.layers.1.self_attn.q_proj.bias": "model-00002-of-00008.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00002-of-00008.safetensors", "model.layers.1.self_attn.v_proj.bias": "model-00002-of-00008.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00002-of-00008.safetensors", "model.layers.10.input_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00002-of-00008.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00008.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00002-of-00008.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00008.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00008.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00008.safetensors", "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00008.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00008.safetensors", "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00008.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00008.safetensors", "model.layers.11.input_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00002-of-00008.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00008.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00002-of-00008.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00008.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00008.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00008.safetensors", "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00008.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00008.safetensors", "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00008.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00008.safetensors", "model.layers.12.input_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00002-of-00008.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00008.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00002-of-00008.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00008.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00008.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00008.safetensors", "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00008.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00008.safetensors", "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00008.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00008.safetensors", "model.layers.13.input_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00002-of-00008.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00008.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00002-of-00008.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00008.safetensors", "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00008.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00008.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00008.safetensors", "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00008.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00003-of-00008.safetensors", "model.layers.13.self_attn.v_proj.bias": "model-00003-of-00008.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00003-of-00008.safetensors", "model.layers.14.input_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00003-of-00008.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00003-of-00008.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00003-of-00008.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.14.self_attn.k_proj.bias": "model-00003-of-00008.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00003-of-00008.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00003-of-00008.safetensors", "model.layers.14.self_attn.q_proj.bias": "model-00003-of-00008.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00003-of-00008.safetensors", "model.layers.14.self_attn.v_proj.bias": "model-00003-of-00008.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00003-of-00008.safetensors", "model.layers.15.input_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00003-of-00008.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00003-of-00008.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00003-of-00008.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.15.self_attn.k_proj.bias": "model-00003-of-00008.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00003-of-00008.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00003-of-00008.safetensors", "model.layers.15.self_attn.q_proj.bias": "model-00003-of-00008.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00003-of-00008.safetensors", "model.layers.15.self_attn.v_proj.bias": "model-00003-of-00008.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00003-of-00008.safetensors", "model.layers.16.input_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00003-of-00008.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00003-of-00008.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00003-of-00008.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.16.self_attn.k_proj.bias": "model-00003-of-00008.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00003-of-00008.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00008.safetensors", "model.layers.16.self_attn.q_proj.bias": "model-00003-of-00008.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00003-of-00008.safetensors", "model.layers.16.self_attn.v_proj.bias": "model-00003-of-00008.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00003-of-00008.safetensors", "model.layers.17.input_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00003-of-00008.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00008.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00003-of-00008.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.17.self_attn.k_proj.bias": "model-00003-of-00008.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00008.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00008.safetensors", "model.layers.17.self_attn.q_proj.bias": "model-00003-of-00008.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00008.safetensors", "model.layers.17.self_attn.v_proj.bias": "model-00003-of-00008.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00008.safetensors", "model.layers.18.input_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00003-of-00008.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00008.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00003-of-00008.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00008.safetensors", "model.layers.18.self_attn.k_proj.bias": "model-00003-of-00008.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00008.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00004-of-00008.safetensors", "model.layers.18.self_attn.q_proj.bias": "model-00004-of-00008.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00004-of-00008.safetensors", "model.layers.18.self_attn.v_proj.bias": "model-00004-of-00008.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00004-of-00008.safetensors", "model.layers.19.input_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00004-of-00008.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00004-of-00008.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00004-of-00008.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.19.self_attn.k_proj.bias": "model-00004-of-00008.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00004-of-00008.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00004-of-00008.safetensors", "model.layers.19.self_attn.q_proj.bias": "model-00004-of-00008.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00004-of-00008.safetensors", "model.layers.19.self_attn.v_proj.bias": "model-00004-of-00008.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00004-of-00008.safetensors", "model.layers.2.input_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00004-of-00008.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00004-of-00008.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00004-of-00008.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.2.self_attn.k_proj.bias": "model-00004-of-00008.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00004-of-00008.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00004-of-00008.safetensors", "model.layers.2.self_attn.q_proj.bias": "model-00004-of-00008.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00004-of-00008.safetensors", "model.layers.2.self_attn.v_proj.bias": "model-00004-of-00008.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00004-of-00008.safetensors", "model.layers.20.input_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00004-of-00008.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00004-of-00008.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00004-of-00008.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.20.self_attn.k_proj.bias": "model-00004-of-00008.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00004-of-00008.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00004-of-00008.safetensors", "model.layers.20.self_attn.q_proj.bias": "model-00004-of-00008.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00004-of-00008.safetensors", "model.layers.20.self_attn.v_proj.bias": "model-00004-of-00008.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00004-of-00008.safetensors", "model.layers.21.input_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00004-of-00008.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00004-of-00008.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00004-of-00008.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.21.self_attn.k_proj.bias": "model-00004-of-00008.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00004-of-00008.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00004-of-00008.safetensors", "model.layers.21.self_attn.q_proj.bias": "model-00004-of-00008.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00004-of-00008.safetensors", "model.layers.21.self_attn.v_proj.bias": "model-00004-of-00008.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00004-of-00008.safetensors", "model.layers.22.input_layernorm.weight": "model-00004-of-00008.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00004-of-00008.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00004-of-00008.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00005-of-00008.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.22.self_attn.k_proj.bias": "model-00005-of-00008.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00005-of-00008.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00005-of-00008.safetensors", "model.layers.22.self_attn.q_proj.bias": "model-00005-of-00008.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00005-of-00008.safetensors", "model.layers.22.self_attn.v_proj.bias": "model-00005-of-00008.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00005-of-00008.safetensors", "model.layers.23.input_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00005-of-00008.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00005-of-00008.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00005-of-00008.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.23.self_attn.k_proj.bias": "model-00005-of-00008.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00005-of-00008.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00005-of-00008.safetensors", "model.layers.23.self_attn.q_proj.bias": "model-00005-of-00008.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00005-of-00008.safetensors", "model.layers.23.self_attn.v_proj.bias": "model-00005-of-00008.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00005-of-00008.safetensors", "model.layers.24.input_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.24.mlp.down_proj.weight": "model-00005-of-00008.safetensors", "model.layers.24.mlp.gate_proj.weight": "model-00005-of-00008.safetensors", "model.layers.24.mlp.up_proj.weight": "model-00005-of-00008.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.24.self_attn.k_proj.bias": "model-00005-of-00008.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00005-of-00008.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00005-of-00008.safetensors", "model.layers.24.self_attn.q_proj.bias": "model-00005-of-00008.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00005-of-00008.safetensors", "model.layers.24.self_attn.v_proj.bias": "model-00005-of-00008.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00005-of-00008.safetensors", "model.layers.25.input_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.25.mlp.down_proj.weight": "model-00005-of-00008.safetensors", "model.layers.25.mlp.gate_proj.weight": "model-00005-of-00008.safetensors", "model.layers.25.mlp.up_proj.weight": "model-00005-of-00008.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.25.self_attn.k_proj.bias": "model-00005-of-00008.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00005-of-00008.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00005-of-00008.safetensors", "model.layers.25.self_attn.q_proj.bias": "model-00005-of-00008.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00005-of-00008.safetensors", "model.layers.25.self_attn.v_proj.bias": "model-00005-of-00008.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00005-of-00008.safetensors", "model.layers.26.input_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.26.mlp.down_proj.weight": "model-00005-of-00008.safetensors", "model.layers.26.mlp.gate_proj.weight": "model-00005-of-00008.safetensors", "model.layers.26.mlp.up_proj.weight": "model-00005-of-00008.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.26.self_attn.k_proj.bias": "model-00005-of-00008.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00005-of-00008.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00005-of-00008.safetensors", "model.layers.26.self_attn.q_proj.bias": "model-00005-of-00008.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00005-of-00008.safetensors", "model.layers.26.self_attn.v_proj.bias": "model-00005-of-00008.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00005-of-00008.safetensors", "model.layers.27.input_layernorm.weight": "model-00005-of-00008.safetensors", "model.layers.27.mlp.down_proj.weight": "model-00005-of-00008.safetensors", "model.layers.27.mlp.gate_proj.weight": "model-00006-of-00008.safetensors", "model.layers.27.mlp.up_proj.weight": "model-00006-of-00008.safetensors", "model.layers.27.post_attention_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.27.self_attn.k_proj.bias": "model-00006-of-00008.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00006-of-00008.safetensors", "model.layers.27.self_attn.o_proj.weight": "model-00006-of-00008.safetensors", "model.layers.27.self_attn.q_proj.bias": "model-00006-of-00008.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00006-of-00008.safetensors", "model.layers.27.self_attn.v_proj.bias": "model-00006-of-00008.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00006-of-00008.safetensors", "model.layers.28.input_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.28.mlp.down_proj.weight": "model-00006-of-00008.safetensors", "model.layers.28.mlp.gate_proj.weight": "model-00006-of-00008.safetensors", "model.layers.28.mlp.up_proj.weight": "model-00006-of-00008.safetensors", "model.layers.28.post_attention_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.28.self_attn.k_proj.bias": "model-00006-of-00008.safetensors", "model.layers.28.self_attn.k_proj.weight": "model-00006-of-00008.safetensors", "model.layers.28.self_attn.o_proj.weight": "model-00006-of-00008.safetensors", "model.layers.28.self_attn.q_proj.bias": "model-00006-of-00008.safetensors", "model.layers.28.self_attn.q_proj.weight": "model-00006-of-00008.safetensors", "model.layers.28.self_attn.v_proj.bias": "model-00006-of-00008.safetensors", "model.layers.28.self_attn.v_proj.weight": "model-00006-of-00008.safetensors", "model.layers.29.input_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.29.mlp.down_proj.weight": "model-00006-of-00008.safetensors", "model.layers.29.mlp.gate_proj.weight": "model-00006-of-00008.safetensors", "model.layers.29.mlp.up_proj.weight": "model-00006-of-00008.safetensors", "model.layers.29.post_attention_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.29.self_attn.k_proj.bias": "model-00006-of-00008.safetensors", "model.layers.29.self_attn.k_proj.weight": "model-00006-of-00008.safetensors", "model.layers.29.self_attn.o_proj.weight": "model-00006-of-00008.safetensors", "model.layers.29.self_attn.q_proj.bias": "model-00006-of-00008.safetensors", "model.layers.29.self_attn.q_proj.weight": "model-00006-of-00008.safetensors", "model.layers.29.self_attn.v_proj.bias": "model-00006-of-00008.safetensors", "model.layers.29.self_attn.v_proj.weight": "model-00006-of-00008.safetensors", "model.layers.3.input_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00006-of-00008.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00006-of-00008.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00006-of-00008.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.3.self_attn.k_proj.bias": "model-00006-of-00008.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00006-of-00008.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00006-of-00008.safetensors", "model.layers.3.self_attn.q_proj.bias": "model-00006-of-00008.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00006-of-00008.safetensors", "model.layers.3.self_attn.v_proj.bias": "model-00006-of-00008.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00006-of-00008.safetensors", "model.layers.30.input_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.30.mlp.down_proj.weight": "model-00006-of-00008.safetensors", "model.layers.30.mlp.gate_proj.weight": "model-00006-of-00008.safetensors", "model.layers.30.mlp.up_proj.weight": "model-00006-of-00008.safetensors", "model.layers.30.post_attention_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.30.self_attn.k_proj.bias": "model-00006-of-00008.safetensors", "model.layers.30.self_attn.k_proj.weight": "model-00006-of-00008.safetensors", "model.layers.30.self_attn.o_proj.weight": "model-00006-of-00008.safetensors", "model.layers.30.self_attn.q_proj.bias": "model-00006-of-00008.safetensors", "model.layers.30.self_attn.q_proj.weight": "model-00006-of-00008.safetensors", "model.layers.30.self_attn.v_proj.bias": "model-00006-of-00008.safetensors", "model.layers.30.self_attn.v_proj.weight": "model-00006-of-00008.safetensors", "model.layers.31.input_layernorm.weight": "model-00006-of-00008.safetensors", "model.layers.31.mlp.down_proj.weight": "model-00007-of-00008.safetensors", "model.layers.31.mlp.gate_proj.weight": "model-00007-of-00008.safetensors", "model.layers.31.mlp.up_proj.weight": "model-00007-of-00008.safetensors", "model.layers.31.post_attention_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.31.self_attn.k_proj.bias": "model-00007-of-00008.safetensors", "model.layers.31.self_attn.k_proj.weight": "model-00007-of-00008.safetensors", "model.layers.31.self_attn.o_proj.weight": "model-00007-of-00008.safetensors", "model.layers.31.self_attn.q_proj.bias": "model-00007-of-00008.safetensors", "model.layers.31.self_attn.q_proj.weight": "model-00007-of-00008.safetensors", "model.layers.31.self_attn.v_proj.bias": "model-00007-of-00008.safetensors", "model.layers.31.self_attn.v_proj.weight": "model-00007-of-00008.safetensors", "model.layers.4.input_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00007-of-00008.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00007-of-00008.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00007-of-00008.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.4.self_attn.k_proj.bias": "model-00007-of-00008.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00007-of-00008.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00007-of-00008.safetensors", "model.layers.4.self_attn.q_proj.bias": "model-00007-of-00008.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00007-of-00008.safetensors", "model.layers.4.self_attn.v_proj.bias": "model-00007-of-00008.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00007-of-00008.safetensors", "model.layers.5.input_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00007-of-00008.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00007-of-00008.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00007-of-00008.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.5.self_attn.k_proj.bias": "model-00007-of-00008.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00007-of-00008.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00007-of-00008.safetensors", "model.layers.5.self_attn.q_proj.bias": "model-00007-of-00008.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00007-of-00008.safetensors", "model.layers.5.self_attn.v_proj.bias": "model-00007-of-00008.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00007-of-00008.safetensors", "model.layers.6.input_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00007-of-00008.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00007-of-00008.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00007-of-00008.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.6.self_attn.k_proj.bias": "model-00007-of-00008.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00007-of-00008.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00007-of-00008.safetensors", "model.layers.6.self_attn.q_proj.bias": "model-00007-of-00008.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00007-of-00008.safetensors", "model.layers.6.self_attn.v_proj.bias": "model-00007-of-00008.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00007-of-00008.safetensors", "model.layers.7.input_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00007-of-00008.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00007-of-00008.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00007-of-00008.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00007-of-00008.safetensors", "model.layers.7.self_attn.k_proj.bias": "model-00007-of-00008.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00007-of-00008.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00007-of-00008.safetensors", "model.layers.7.self_attn.q_proj.bias": "model-00007-of-00008.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00008-of-00008.safetensors", "model.layers.7.self_attn.v_proj.bias": "model-00008-of-00008.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00008-of-00008.safetensors", "model.layers.8.input_layernorm.weight": "model-00008-of-00008.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00008-of-00008.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00008-of-00008.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00008-of-00008.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00008-of-00008.safetensors", "model.layers.8.self_attn.k_proj.bias": "model-00008-of-00008.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00008-of-00008.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00008-of-00008.safetensors", "model.layers.8.self_attn.q_proj.bias": "model-00008-of-00008.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00008-of-00008.safetensors", "model.layers.8.self_attn.v_proj.bias": "model-00008-of-00008.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00008-of-00008.safetensors", "model.layers.9.input_layernorm.weight": "model-00008-of-00008.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00008-of-00008.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00008-of-00008.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00008-of-00008.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00008-of-00008.safetensors", "model.layers.9.self_attn.k_proj.bias": "model-00008-of-00008.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00008-of-00008.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00008-of-00008.safetensors", "model.layers.9.self_attn.q_proj.bias": "model-00008-of-00008.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00008-of-00008.safetensors", "model.layers.9.self_attn.v_proj.bias": "model-00008-of-00008.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00008-of-00008.safetensors", "model.norm.weight": "model-00008-of-00008.safetensors"}}
special_tokens_map.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<fim_prefix>",
6
+ "<fim_middle>",
7
+ "<fim_suffix>",
8
+ "<fim_pad>"
9
+ ],
10
+ "bos_token": {
11
+ "content": "<|endoftext|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<fim_pad>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ "unk_token": {
32
+ "content": "<unk>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ }
38
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff