Severian commited on
Commit
efaeb71
1 Parent(s): 20b180a

Upload 21 files

Browse files
README.md CHANGED
@@ -1,3 +1,38 @@
1
  ---
2
- license: apache-2.0
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: transformers
3
+ tags: []
4
  ---
5
+
6
+ # Jamba-Small v1
7
+
8
+ This is a pruned version of AI21 Labs' Jamba-v0.1 model that is ~25% the size of Jamba-v0.1.
9
+
10
+
11
+
12
+ ## Model Details
13
+ Whereas Jamba-v0.1 contains 4 Jamba blocks, Jamba-Small contains only 1 Jamba block.
14
+ Jamba-Small's Jamba blocks follow the same structure seen in Jamba-v0.1, with a 1:7 ratio of attention-to-Mamba layers and MoE applied every 2 layers.
15
+
16
+ Jamba-Small's weights are initialized from various layers in the original Jamba-v0.1 model. For v1, the layer weights are mapped as follows (left is Jamba-Small layer number, right is Jamba-v0.1 layer number):
17
+ ```
18
+ 0: 0
19
+ 1: 1
20
+ 2: 2
21
+ 3: 3
22
+ 4: 4
23
+ 5: 5
24
+ 6: 30
25
+ 7: 31
26
+ ```
27
+
28
+ Note that no additional fine-tuning has been performed on this model. As such, its performance is exceptionally poor. This should not be used in production without additional training.
29
+
30
+ ### Model Description
31
+
32
+ - **Developed by:** Nathan Brown (OxxoCodes)
33
+ - **Compute provided by:** Clemson Palmetto Cluster
34
+ - **Model type:** Joint Attention and Mamba (Jamba)
35
+ - **Language(s) (NLP):** English
36
+ - **License:** Apache 2.0
37
+ - **Original model:** [Jamba-v0.1](https://huggingface.co/ai21labs/Jamba-v0.1)
38
+ - **Jamba paper:** [https://arxiv.org/pdf/2403.19887.pdf](https://arxiv.org/pdf/2403.19887.pdf)
config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ai21labs/Jamba-v0.1",
3
+ "architectures": [
4
+ "JambaForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "attn_layer_offset": 4,
8
+ "attn_layer_period": 8,
9
+ "auto_map": {
10
+ "AutoConfig": "ai21labs/Jamba-v0.1--configuration_jamba.JambaConfig",
11
+ "AutoModel": "ai21labs/Jamba-v0.1--modeling_jamba.JambaModel",
12
+ "AutoModelForCausalLM": "ai21labs/Jamba-v0.1--modeling_jamba.JambaForCausalLM",
13
+ "AutoModelForSequenceClassification": "ai21labs/Jamba-v0.1--model.JambaForSequenceClassification"
14
+ },
15
+ "bos_token_id": 1,
16
+ "calc_logits_for_entire_prompt": false,
17
+ "eos_token_id": 2,
18
+ "expert_layer_offset": 1,
19
+ "expert_layer_period": 2,
20
+ "hidden_act": "silu",
21
+ "hidden_size": 4096,
22
+ "initializer_range": 0.02,
23
+ "intermediate_size": 14336,
24
+ "mamba_conv_bias": true,
25
+ "mamba_d_conv": 4,
26
+ "mamba_d_state": 16,
27
+ "mamba_dt_rank": 256,
28
+ "mamba_expand": 2,
29
+ "mamba_inner_layernorms": true,
30
+ "mamba_proj_bias": false,
31
+ "model_type": "jamba",
32
+ "n_ctx": 262144,
33
+ "num_attention_heads": 32,
34
+ "num_experts": 16,
35
+ "num_experts_per_tok": 2,
36
+ "num_hidden_layers": 32,
37
+ "num_key_value_heads": 8,
38
+ "output_router_logits": false,
39
+ "pad_token_id": 0,
40
+ "quantization_config": {
41
+ "_load_in_4bit": true,
42
+ "_load_in_8bit": false,
43
+ "bnb_4bit_compute_dtype": "bfloat16",
44
+ "bnb_4bit_quant_storage": "bfloat16",
45
+ "bnb_4bit_quant_type": "nf4",
46
+ "bnb_4bit_use_double_quant": true,
47
+ "llm_int8_enable_fp32_cpu_offload": false,
48
+ "llm_int8_has_fp16_weight": false,
49
+ "llm_int8_skip_modules": null,
50
+ "llm_int8_threshold": 6.0,
51
+ "load_in_4bit": true,
52
+ "load_in_8bit": false,
53
+ "quant_method": "bitsandbytes"
54
+ },
55
+ "rms_norm_eps": 1e-06,
56
+ "router_aux_loss_coef": 0.001,
57
+ "sliding_window": null,
58
+ "tie_word_embeddings": false,
59
+ "torch_dtype": "bfloat16",
60
+ "transformers_version": "4.40.0.dev0",
61
+ "use_cache": false,
62
+ "use_mamba_kernels": true,
63
+ "vocab_size": 65536
64
+ }
configuration_jamba.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 AI21 Labs Ltd. and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Jamba model configuration"""
16
+ import math
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class JambaConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`JambaModel`]. It is used to instantiate a
28
+ Jamba model according to the specified arguments, defining the model architecture. Instantiating a configuration
29
+ with the defaults will yield a similar configuration to that of the jamba-small architecture.
30
+
31
+ [ai21labs/jamba-small](https://huggingface.co/ai21labs/Jamba-v0.1)
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 65536):
39
+ Vocabulary size of the Jamba model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`JambaModel`]
41
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
42
+ Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
43
+ model has a output word embedding layer.
44
+ hidden_size (`int`, *optional*, defaults to 4096):
45
+ Dimension of the hidden representations.
46
+ intermediate_size (`int`, *optional*, defaults to 14336):
47
+ Dimension of the MLP representations.
48
+ num_hidden_layers (`int`, *optional*, defaults to 32):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 32):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ num_key_value_heads (`int`, *optional*, defaults to 8):
53
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
54
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
55
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
56
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
57
+ by meanpooling all the original heads within that group. For more details checkout [this
58
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
59
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
60
+ The non-linear activation function (function or string) in the decoder.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`.
68
+ calc_logits_for_entire_prompt (`bool`, *optional*, defaults to `False`):
69
+ Whether or not to calculate logits for entire prompt during generation. If `False`, only the logits of the
70
+ last prompt token will be calculated, which are the only logits needed for generation. For long sequences,
71
+ the logits for the entire sequence may use a lot of memory so setting `calc_logits_for_entire_prompt=False`
72
+ will reduce memory footprint significantly.
73
+ Note: some generation features may not be available if this is set to `False`.
74
+ output_router_logits (`bool`, *optional*, defaults to `False`):
75
+ Whether or not the router logits should be returned by the model. Enabling this will also
76
+ allow the model to output the auxiliary loss. See [here]() for more details
77
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
78
+ The aux loss factor for the total loss.
79
+ pad_token_id (`int`, *optional*, defaults to 0):
80
+ The id of the padding token.
81
+ bos_token_id (`int`, *optional*, defaults to 1):
82
+ The id of the "beginning-of-sequence" token.
83
+ eos_token_id (`int`, *optional*, defaults to 2):
84
+ The id of the "end-of-sequence" token.
85
+ sliding_window (`int`, *optional*):
86
+ Sliding window attention window size. If not specified, will default to `None`.
87
+ n_ctx (`int`, *optional*, defaults to 262144):
88
+ This value doesn't have any real effect. The maximum sequence length that this model is intended to be
89
+ used with. It can be used with longer sequences, but performance may degrade.
90
+ attention_dropout (`float`, *optional*, defaults to 0.0):
91
+ The dropout ratio for the attention probabilities.
92
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
93
+ The number of experts to root per-token, can be also interpreted as the `top-p` routing
94
+ parameter
95
+ num_experts (`int`, *optional*, defaults to 16):
96
+ Number of experts per Sparse MLP layer.
97
+ expert_layer_period (`int`, *optional*, defaults to 2):
98
+ Once in this many layers, we will have an expert layer
99
+ expert_layer_offset (`int`, *optional*, defaults to 1):
100
+ The first layer index that contains an expert mlp layer
101
+ attn_layer_period (`int`, *optional*, defaults to 8):
102
+ Once in this many layers, we will have a vanilla attention layer
103
+ attn_layer_offset (`int`, *optional*, defaults to 4):
104
+ The first layer index that contains a vanilla attention mlp layer
105
+ use_mamba_kernels (`bool`, *optional*, defaults to `True`):
106
+ Flag indicating whether or not to use the fast mamba kernels. These are available only if `mamba-ssm` and
107
+ `causal-conv1d` are installed, and the mamba modules are running on a CUDA device. Raises ValueError if
108
+ `True` and kernels are not available
109
+ mamba_d_state (`int`, *optional*, defaults to 16):
110
+ The dimension the mamba state space latents
111
+ mamba_d_conv (`int`, *optional*, defaults to 4):
112
+ The size of the mamba convolution kernel
113
+ mamba_expand (`int`, *optional*, defaults to 2):
114
+ Expanding factor (relative to hidden_size) used to determine the mamba intermediate size
115
+ mamba_dt_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
116
+ Rank of the the mamba discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
117
+ mamba_conv_bias (`bool`, *optional*, defaults to `True`):
118
+ Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block.
119
+ mamba_proj_bias (`bool`, *optional*, defaults to `False`):
120
+ Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the mamba mixer block
121
+ mamba_inner_layernorms (`bool`, *optional*, defaults to `True`):
122
+ Flag indicating whether or not to apply layernorms to internal mamba activations
123
+
124
+ """
125
+
126
+ model_type = "jamba"
127
+ keys_to_ignore_at_inference = ["past_key_values"]
128
+
129
+ def __init__(
130
+ self,
131
+ vocab_size=65536,
132
+ tie_word_embeddings=False,
133
+ hidden_size=4096,
134
+ intermediate_size=14336,
135
+ num_hidden_layers=32,
136
+ num_attention_heads=32,
137
+ num_key_value_heads=8,
138
+ hidden_act="silu",
139
+ initializer_range=0.02,
140
+ rms_norm_eps=1e-6,
141
+ use_cache=True,
142
+ calc_logits_for_entire_prompt=False,
143
+ output_router_logits=False,
144
+ router_aux_loss_coef=0.001,
145
+ pad_token_id=0,
146
+ bos_token_id=1,
147
+ eos_token_id=2,
148
+ sliding_window=None,
149
+ n_ctx=262144,
150
+ attention_dropout=0.0,
151
+ num_experts_per_tok=2,
152
+ num_experts=16,
153
+ expert_layer_period=2,
154
+ expert_layer_offset=1,
155
+ attn_layer_period=8,
156
+ attn_layer_offset=4,
157
+ use_mamba_kernels=True,
158
+ mamba_d_state=16,
159
+ mamba_d_conv=4,
160
+ mamba_expand=2,
161
+ mamba_dt_rank="auto",
162
+ mamba_conv_bias=True,
163
+ mamba_proj_bias=False,
164
+ mamba_inner_layernorms=True,
165
+ **kwargs,
166
+ ):
167
+ self.vocab_size = vocab_size
168
+ self.tie_word_embeddings = tie_word_embeddings
169
+ self.hidden_size = hidden_size
170
+ self.intermediate_size = intermediate_size
171
+ self.num_hidden_layers = num_hidden_layers
172
+ self.num_attention_heads = num_attention_heads
173
+ self.sliding_window = sliding_window
174
+ self.n_ctx = n_ctx
175
+ self.attention_dropout = attention_dropout
176
+
177
+ # for backward compatibility
178
+ if num_key_value_heads is None:
179
+ num_key_value_heads = num_attention_heads
180
+
181
+ self.num_key_value_heads = num_key_value_heads
182
+ self.hidden_act = hidden_act
183
+ self.initializer_range = initializer_range
184
+ self.rms_norm_eps = rms_norm_eps
185
+
186
+ self.use_cache = use_cache
187
+ self.calc_logits_for_entire_prompt = calc_logits_for_entire_prompt
188
+ self.output_router_logits = output_router_logits
189
+ self.router_aux_loss_coef = router_aux_loss_coef
190
+
191
+ self.num_experts_per_tok = num_experts_per_tok
192
+ self.num_experts = num_experts
193
+ self.expert_layer_period = expert_layer_period
194
+ self.expert_layer_offset = expert_layer_offset
195
+ self.attn_layer_period = attn_layer_period
196
+ self.attn_layer_offset = attn_layer_offset
197
+
198
+ self.use_mamba_kernels = use_mamba_kernels
199
+ self.mamba_d_state = mamba_d_state
200
+ self.mamba_d_conv = mamba_d_conv
201
+ self.mamba_expand = mamba_expand
202
+ self.mamba_dt_rank = math.ceil(self.hidden_size / 16) if mamba_dt_rank == "auto" else mamba_dt_rank
203
+ self.mamba_conv_bias = mamba_conv_bias
204
+ self.mamba_proj_bias = mamba_proj_bias
205
+ self.mamba_inner_layernorms = mamba_inner_layernorms
206
+
207
+ super().__init__(
208
+ pad_token_id=pad_token_id,
209
+ bos_token_id=bos_token_id,
210
+ eos_token_id=eos_token_id,
211
+ tie_word_embeddings=tie_word_embeddings,
212
+ **kwargs,
213
+ )
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.40.0.dev0"
7
+ }
huggingface-metadata.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ url: https://huggingface.co/OxxoCodes/jamba-small-v1
2
+ branch: main
3
+ download date: 2024-04-07 03:58:19
4
+ sha256sum:
5
+ c1cb0813aa7b7947b100d5def870bc9007bed87e9b37609eb42e0577d9a2da68 model-00001-of-00011.safetensors
6
+ e42afe8aaf5e7b6730cad42dd25d8679ccab7175a0849be2c1fd6aae5be55a21 model-00002-of-00011.safetensors
7
+ dc0ca09411409a8250c587b6b0c7eda884e8c19797970ca3a7e1406752d9b3a6 model-00003-of-00011.safetensors
8
+ 5fed085a6c116563d2fe756da7f258bc1f35082ea43a0eb4cb971365571edcf9 model-00004-of-00011.safetensors
9
+ 0cbed5c551732bfa154eac609ad630729ba33bef55cf87e8ccd7680e5f39b9d7 model-00005-of-00011.safetensors
10
+ c6b02c596fcc7b1387d548cc7fc964c659cb4b13f62be3ad076ece8e1be8719c model-00006-of-00011.safetensors
11
+ 25c52d109acab176ead45c946a80516ca90b90dc15b38d17c3a2c61e9ffc40e1 model-00007-of-00011.safetensors
12
+ 1372be472ed40332bfa66a0149becdfd64b0a192cd36f1c90c2e9c7991e23d64 model-00008-of-00011.safetensors
13
+ fd60b87524590c051a49cc6081f59ceb1b001110afac03ffff18747d75191771 model-00009-of-00011.safetensors
14
+ 0f87ae190ce25cbecfa77b33298cde814b2078bc5b925d4b43eb72306ffd5a96 model-00010-of-00011.safetensors
15
+ 46703a22cc27bdcc08fbdbd4df84019d64927d16932ffb4f17b49a79706fc610 model-00011-of-00011.safetensors
model-00001-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1cb0813aa7b7947b100d5def870bc9007bed87e9b37609eb42e0577d9a2da68
3
+ size 4969962312
model-00002-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e42afe8aaf5e7b6730cad42dd25d8679ccab7175a0849be2c1fd6aae5be55a21
3
+ size 4932504120
model-00003-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc0ca09411409a8250c587b6b0c7eda884e8c19797970ca3a7e1406752d9b3a6
3
+ size 4884009920
model-00004-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fed085a6c116563d2fe756da7f258bc1f35082ea43a0eb4cb971365571edcf9
3
+ size 4884272128
model-00005-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cbed5c551732bfa154eac609ad630729ba33bef55cf87e8ccd7680e5f39b9d7
3
+ size 4932504128
model-00006-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6b02c596fcc7b1387d548cc7fc964c659cb4b13f62be3ad076ece8e1be8719c
3
+ size 4817196624
model-00007-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25c52d109acab176ead45c946a80516ca90b90dc15b38d17c3a2c61e9ffc40e1
3
+ size 4932504120
model-00008-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1372be472ed40332bfa66a0149becdfd64b0a192cd36f1c90c2e9c7991e23d64
3
+ size 4933258424
model-00009-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd60b87524590c051a49cc6081f59ceb1b001110afac03ffff18747d75191771
3
+ size 4835023592
model-00010-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f87ae190ce25cbecfa77b33298cde814b2078bc5b925d4b43eb72306ffd5a96
3
+ size 4932504128
model-00011-of-00011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46703a22cc27bdcc08fbdbd4df84019d64927d16932ffb4f17b49a79706fc610
3
+ size 4127246352
model.safetensors.index.json ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 53180948352
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00011-of-00011.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00011.safetensors",
8
+ "model.final_layernorm.weight": "model-00011-of-00011.safetensors",
9
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00011.safetensors",
10
+ "model.layers.0.mamba.A_log": "model-00001-of-00011.safetensors",
11
+ "model.layers.0.mamba.B_layernorm.weight": "model-00001-of-00011.safetensors",
12
+ "model.layers.0.mamba.C_layernorm.weight": "model-00001-of-00011.safetensors",
13
+ "model.layers.0.mamba.D": "model-00001-of-00011.safetensors",
14
+ "model.layers.0.mamba.conv1d.bias": "model-00001-of-00011.safetensors",
15
+ "model.layers.0.mamba.conv1d.weight": "model-00001-of-00011.safetensors",
16
+ "model.layers.0.mamba.dt_layernorm.weight": "model-00001-of-00011.safetensors",
17
+ "model.layers.0.mamba.dt_proj.bias": "model-00001-of-00011.safetensors",
18
+ "model.layers.0.mamba.dt_proj.weight": "model-00001-of-00011.safetensors",
19
+ "model.layers.0.mamba.in_proj.weight": "model-00001-of-00011.safetensors",
20
+ "model.layers.0.mamba.out_proj.weight": "model-00001-of-00011.safetensors",
21
+ "model.layers.0.mamba.x_proj.weight": "model-00001-of-00011.safetensors",
22
+ "model.layers.0.moe.experts.0.down_proj.weight": "model-00001-of-00011.safetensors",
23
+ "model.layers.0.moe.experts.0.gate_proj.weight": "model-00001-of-00011.safetensors",
24
+ "model.layers.0.moe.experts.0.up_proj.weight": "model-00001-of-00011.safetensors",
25
+ "model.layers.0.pre_moe_layernorm.weight": "model-00001-of-00011.safetensors",
26
+ "model.layers.1.input_layernorm.weight": "model-00003-of-00011.safetensors",
27
+ "model.layers.1.mamba.A_log": "model-00001-of-00011.safetensors",
28
+ "model.layers.1.mamba.B_layernorm.weight": "model-00001-of-00011.safetensors",
29
+ "model.layers.1.mamba.C_layernorm.weight": "model-00001-of-00011.safetensors",
30
+ "model.layers.1.mamba.D": "model-00001-of-00011.safetensors",
31
+ "model.layers.1.mamba.conv1d.bias": "model-00001-of-00011.safetensors",
32
+ "model.layers.1.mamba.conv1d.weight": "model-00001-of-00011.safetensors",
33
+ "model.layers.1.mamba.dt_layernorm.weight": "model-00001-of-00011.safetensors",
34
+ "model.layers.1.mamba.dt_proj.bias": "model-00001-of-00011.safetensors",
35
+ "model.layers.1.mamba.dt_proj.weight": "model-00001-of-00011.safetensors",
36
+ "model.layers.1.mamba.in_proj.weight": "model-00001-of-00011.safetensors",
37
+ "model.layers.1.mamba.out_proj.weight": "model-00001-of-00011.safetensors",
38
+ "model.layers.1.mamba.x_proj.weight": "model-00001-of-00011.safetensors",
39
+ "model.layers.1.moe.experts.0.down_proj.weight": "model-00001-of-00011.safetensors",
40
+ "model.layers.1.moe.experts.0.gate_proj.weight": "model-00001-of-00011.safetensors",
41
+ "model.layers.1.moe.experts.0.up_proj.weight": "model-00001-of-00011.safetensors",
42
+ "model.layers.1.moe.experts.1.down_proj.weight": "model-00001-of-00011.safetensors",
43
+ "model.layers.1.moe.experts.1.gate_proj.weight": "model-00001-of-00011.safetensors",
44
+ "model.layers.1.moe.experts.1.up_proj.weight": "model-00001-of-00011.safetensors",
45
+ "model.layers.1.moe.experts.10.down_proj.weight": "model-00003-of-00011.safetensors",
46
+ "model.layers.1.moe.experts.10.gate_proj.weight": "model-00002-of-00011.safetensors",
47
+ "model.layers.1.moe.experts.10.up_proj.weight": "model-00003-of-00011.safetensors",
48
+ "model.layers.1.moe.experts.11.down_proj.weight": "model-00003-of-00011.safetensors",
49
+ "model.layers.1.moe.experts.11.gate_proj.weight": "model-00003-of-00011.safetensors",
50
+ "model.layers.1.moe.experts.11.up_proj.weight": "model-00003-of-00011.safetensors",
51
+ "model.layers.1.moe.experts.12.down_proj.weight": "model-00003-of-00011.safetensors",
52
+ "model.layers.1.moe.experts.12.gate_proj.weight": "model-00003-of-00011.safetensors",
53
+ "model.layers.1.moe.experts.12.up_proj.weight": "model-00003-of-00011.safetensors",
54
+ "model.layers.1.moe.experts.13.down_proj.weight": "model-00003-of-00011.safetensors",
55
+ "model.layers.1.moe.experts.13.gate_proj.weight": "model-00003-of-00011.safetensors",
56
+ "model.layers.1.moe.experts.13.up_proj.weight": "model-00003-of-00011.safetensors",
57
+ "model.layers.1.moe.experts.14.down_proj.weight": "model-00003-of-00011.safetensors",
58
+ "model.layers.1.moe.experts.14.gate_proj.weight": "model-00003-of-00011.safetensors",
59
+ "model.layers.1.moe.experts.14.up_proj.weight": "model-00003-of-00011.safetensors",
60
+ "model.layers.1.moe.experts.15.down_proj.weight": "model-00003-of-00011.safetensors",
61
+ "model.layers.1.moe.experts.15.gate_proj.weight": "model-00003-of-00011.safetensors",
62
+ "model.layers.1.moe.experts.15.up_proj.weight": "model-00003-of-00011.safetensors",
63
+ "model.layers.1.moe.experts.2.down_proj.weight": "model-00001-of-00011.safetensors",
64
+ "model.layers.1.moe.experts.2.gate_proj.weight": "model-00001-of-00011.safetensors",
65
+ "model.layers.1.moe.experts.2.up_proj.weight": "model-00001-of-00011.safetensors",
66
+ "model.layers.1.moe.experts.3.down_proj.weight": "model-00002-of-00011.safetensors",
67
+ "model.layers.1.moe.experts.3.gate_proj.weight": "model-00001-of-00011.safetensors",
68
+ "model.layers.1.moe.experts.3.up_proj.weight": "model-00002-of-00011.safetensors",
69
+ "model.layers.1.moe.experts.4.down_proj.weight": "model-00002-of-00011.safetensors",
70
+ "model.layers.1.moe.experts.4.gate_proj.weight": "model-00002-of-00011.safetensors",
71
+ "model.layers.1.moe.experts.4.up_proj.weight": "model-00002-of-00011.safetensors",
72
+ "model.layers.1.moe.experts.5.down_proj.weight": "model-00002-of-00011.safetensors",
73
+ "model.layers.1.moe.experts.5.gate_proj.weight": "model-00002-of-00011.safetensors",
74
+ "model.layers.1.moe.experts.5.up_proj.weight": "model-00002-of-00011.safetensors",
75
+ "model.layers.1.moe.experts.6.down_proj.weight": "model-00002-of-00011.safetensors",
76
+ "model.layers.1.moe.experts.6.gate_proj.weight": "model-00002-of-00011.safetensors",
77
+ "model.layers.1.moe.experts.6.up_proj.weight": "model-00002-of-00011.safetensors",
78
+ "model.layers.1.moe.experts.7.down_proj.weight": "model-00002-of-00011.safetensors",
79
+ "model.layers.1.moe.experts.7.gate_proj.weight": "model-00002-of-00011.safetensors",
80
+ "model.layers.1.moe.experts.7.up_proj.weight": "model-00002-of-00011.safetensors",
81
+ "model.layers.1.moe.experts.8.down_proj.weight": "model-00002-of-00011.safetensors",
82
+ "model.layers.1.moe.experts.8.gate_proj.weight": "model-00002-of-00011.safetensors",
83
+ "model.layers.1.moe.experts.8.up_proj.weight": "model-00002-of-00011.safetensors",
84
+ "model.layers.1.moe.experts.9.down_proj.weight": "model-00002-of-00011.safetensors",
85
+ "model.layers.1.moe.experts.9.gate_proj.weight": "model-00002-of-00011.safetensors",
86
+ "model.layers.1.moe.experts.9.up_proj.weight": "model-00002-of-00011.safetensors",
87
+ "model.layers.1.moe.router.weight": "model-00001-of-00011.safetensors",
88
+ "model.layers.1.pre_moe_layernorm.weight": "model-00003-of-00011.safetensors",
89
+ "model.layers.2.input_layernorm.weight": "model-00004-of-00011.safetensors",
90
+ "model.layers.2.mamba.A_log": "model-00003-of-00011.safetensors",
91
+ "model.layers.2.mamba.B_layernorm.weight": "model-00003-of-00011.safetensors",
92
+ "model.layers.2.mamba.C_layernorm.weight": "model-00003-of-00011.safetensors",
93
+ "model.layers.2.mamba.D": "model-00003-of-00011.safetensors",
94
+ "model.layers.2.mamba.conv1d.bias": "model-00003-of-00011.safetensors",
95
+ "model.layers.2.mamba.conv1d.weight": "model-00003-of-00011.safetensors",
96
+ "model.layers.2.mamba.dt_layernorm.weight": "model-00003-of-00011.safetensors",
97
+ "model.layers.2.mamba.dt_proj.bias": "model-00003-of-00011.safetensors",
98
+ "model.layers.2.mamba.dt_proj.weight": "model-00003-of-00011.safetensors",
99
+ "model.layers.2.mamba.in_proj.weight": "model-00003-of-00011.safetensors",
100
+ "model.layers.2.mamba.out_proj.weight": "model-00003-of-00011.safetensors",
101
+ "model.layers.2.mamba.x_proj.weight": "model-00003-of-00011.safetensors",
102
+ "model.layers.2.moe.experts.0.down_proj.weight": "model-00003-of-00011.safetensors",
103
+ "model.layers.2.moe.experts.0.gate_proj.weight": "model-00003-of-00011.safetensors",
104
+ "model.layers.2.moe.experts.0.up_proj.weight": "model-00004-of-00011.safetensors",
105
+ "model.layers.2.pre_moe_layernorm.weight": "model-00004-of-00011.safetensors",
106
+ "model.layers.3.input_layernorm.weight": "model-00006-of-00011.safetensors",
107
+ "model.layers.3.mamba.A_log": "model-00004-of-00011.safetensors",
108
+ "model.layers.3.mamba.B_layernorm.weight": "model-00004-of-00011.safetensors",
109
+ "model.layers.3.mamba.C_layernorm.weight": "model-00004-of-00011.safetensors",
110
+ "model.layers.3.mamba.D": "model-00004-of-00011.safetensors",
111
+ "model.layers.3.mamba.conv1d.bias": "model-00004-of-00011.safetensors",
112
+ "model.layers.3.mamba.conv1d.weight": "model-00004-of-00011.safetensors",
113
+ "model.layers.3.mamba.dt_layernorm.weight": "model-00004-of-00011.safetensors",
114
+ "model.layers.3.mamba.dt_proj.bias": "model-00004-of-00011.safetensors",
115
+ "model.layers.3.mamba.dt_proj.weight": "model-00004-of-00011.safetensors",
116
+ "model.layers.3.mamba.in_proj.weight": "model-00004-of-00011.safetensors",
117
+ "model.layers.3.mamba.out_proj.weight": "model-00004-of-00011.safetensors",
118
+ "model.layers.3.mamba.x_proj.weight": "model-00004-of-00011.safetensors",
119
+ "model.layers.3.moe.experts.0.down_proj.weight": "model-00004-of-00011.safetensors",
120
+ "model.layers.3.moe.experts.0.gate_proj.weight": "model-00004-of-00011.safetensors",
121
+ "model.layers.3.moe.experts.0.up_proj.weight": "model-00004-of-00011.safetensors",
122
+ "model.layers.3.moe.experts.1.down_proj.weight": "model-00004-of-00011.safetensors",
123
+ "model.layers.3.moe.experts.1.gate_proj.weight": "model-00004-of-00011.safetensors",
124
+ "model.layers.3.moe.experts.1.up_proj.weight": "model-00004-of-00011.safetensors",
125
+ "model.layers.3.moe.experts.10.down_proj.weight": "model-00005-of-00011.safetensors",
126
+ "model.layers.3.moe.experts.10.gate_proj.weight": "model-00005-of-00011.safetensors",
127
+ "model.layers.3.moe.experts.10.up_proj.weight": "model-00005-of-00011.safetensors",
128
+ "model.layers.3.moe.experts.11.down_proj.weight": "model-00005-of-00011.safetensors",
129
+ "model.layers.3.moe.experts.11.gate_proj.weight": "model-00005-of-00011.safetensors",
130
+ "model.layers.3.moe.experts.11.up_proj.weight": "model-00005-of-00011.safetensors",
131
+ "model.layers.3.moe.experts.12.down_proj.weight": "model-00005-of-00011.safetensors",
132
+ "model.layers.3.moe.experts.12.gate_proj.weight": "model-00005-of-00011.safetensors",
133
+ "model.layers.3.moe.experts.12.up_proj.weight": "model-00005-of-00011.safetensors",
134
+ "model.layers.3.moe.experts.13.down_proj.weight": "model-00006-of-00011.safetensors",
135
+ "model.layers.3.moe.experts.13.gate_proj.weight": "model-00006-of-00011.safetensors",
136
+ "model.layers.3.moe.experts.13.up_proj.weight": "model-00006-of-00011.safetensors",
137
+ "model.layers.3.moe.experts.14.down_proj.weight": "model-00006-of-00011.safetensors",
138
+ "model.layers.3.moe.experts.14.gate_proj.weight": "model-00006-of-00011.safetensors",
139
+ "model.layers.3.moe.experts.14.up_proj.weight": "model-00006-of-00011.safetensors",
140
+ "model.layers.3.moe.experts.15.down_proj.weight": "model-00006-of-00011.safetensors",
141
+ "model.layers.3.moe.experts.15.gate_proj.weight": "model-00006-of-00011.safetensors",
142
+ "model.layers.3.moe.experts.15.up_proj.weight": "model-00006-of-00011.safetensors",
143
+ "model.layers.3.moe.experts.2.down_proj.weight": "model-00004-of-00011.safetensors",
144
+ "model.layers.3.moe.experts.2.gate_proj.weight": "model-00004-of-00011.safetensors",
145
+ "model.layers.3.moe.experts.2.up_proj.weight": "model-00004-of-00011.safetensors",
146
+ "model.layers.3.moe.experts.3.down_proj.weight": "model-00004-of-00011.safetensors",
147
+ "model.layers.3.moe.experts.3.gate_proj.weight": "model-00004-of-00011.safetensors",
148
+ "model.layers.3.moe.experts.3.up_proj.weight": "model-00004-of-00011.safetensors",
149
+ "model.layers.3.moe.experts.4.down_proj.weight": "model-00004-of-00011.safetensors",
150
+ "model.layers.3.moe.experts.4.gate_proj.weight": "model-00004-of-00011.safetensors",
151
+ "model.layers.3.moe.experts.4.up_proj.weight": "model-00004-of-00011.safetensors",
152
+ "model.layers.3.moe.experts.5.down_proj.weight": "model-00004-of-00011.safetensors",
153
+ "model.layers.3.moe.experts.5.gate_proj.weight": "model-00004-of-00011.safetensors",
154
+ "model.layers.3.moe.experts.5.up_proj.weight": "model-00004-of-00011.safetensors",
155
+ "model.layers.3.moe.experts.6.down_proj.weight": "model-00005-of-00011.safetensors",
156
+ "model.layers.3.moe.experts.6.gate_proj.weight": "model-00005-of-00011.safetensors",
157
+ "model.layers.3.moe.experts.6.up_proj.weight": "model-00005-of-00011.safetensors",
158
+ "model.layers.3.moe.experts.7.down_proj.weight": "model-00005-of-00011.safetensors",
159
+ "model.layers.3.moe.experts.7.gate_proj.weight": "model-00005-of-00011.safetensors",
160
+ "model.layers.3.moe.experts.7.up_proj.weight": "model-00005-of-00011.safetensors",
161
+ "model.layers.3.moe.experts.8.down_proj.weight": "model-00005-of-00011.safetensors",
162
+ "model.layers.3.moe.experts.8.gate_proj.weight": "model-00005-of-00011.safetensors",
163
+ "model.layers.3.moe.experts.8.up_proj.weight": "model-00005-of-00011.safetensors",
164
+ "model.layers.3.moe.experts.9.down_proj.weight": "model-00005-of-00011.safetensors",
165
+ "model.layers.3.moe.experts.9.gate_proj.weight": "model-00005-of-00011.safetensors",
166
+ "model.layers.3.moe.experts.9.up_proj.weight": "model-00005-of-00011.safetensors",
167
+ "model.layers.3.moe.router.weight": "model-00004-of-00011.safetensors",
168
+ "model.layers.3.pre_moe_layernorm.weight": "model-00006-of-00011.safetensors",
169
+ "model.layers.4.input_layernorm.weight": "model-00006-of-00011.safetensors",
170
+ "model.layers.4.moe.experts.0.down_proj.weight": "model-00006-of-00011.safetensors",
171
+ "model.layers.4.moe.experts.0.gate_proj.weight": "model-00006-of-00011.safetensors",
172
+ "model.layers.4.moe.experts.0.up_proj.weight": "model-00006-of-00011.safetensors",
173
+ "model.layers.4.pre_moe_layernorm.weight": "model-00006-of-00011.safetensors",
174
+ "model.layers.4.self_attn.k_proj.weight": "model-00006-of-00011.safetensors",
175
+ "model.layers.4.self_attn.o_proj.weight": "model-00006-of-00011.safetensors",
176
+ "model.layers.4.self_attn.q_proj.weight": "model-00006-of-00011.safetensors",
177
+ "model.layers.4.self_attn.v_proj.weight": "model-00006-of-00011.safetensors",
178
+ "model.layers.5.input_layernorm.weight": "model-00008-of-00011.safetensors",
179
+ "model.layers.5.mamba.A_log": "model-00006-of-00011.safetensors",
180
+ "model.layers.5.mamba.B_layernorm.weight": "model-00006-of-00011.safetensors",
181
+ "model.layers.5.mamba.C_layernorm.weight": "model-00006-of-00011.safetensors",
182
+ "model.layers.5.mamba.D": "model-00006-of-00011.safetensors",
183
+ "model.layers.5.mamba.conv1d.bias": "model-00006-of-00011.safetensors",
184
+ "model.layers.5.mamba.conv1d.weight": "model-00006-of-00011.safetensors",
185
+ "model.layers.5.mamba.dt_layernorm.weight": "model-00006-of-00011.safetensors",
186
+ "model.layers.5.mamba.dt_proj.bias": "model-00006-of-00011.safetensors",
187
+ "model.layers.5.mamba.dt_proj.weight": "model-00006-of-00011.safetensors",
188
+ "model.layers.5.mamba.in_proj.weight": "model-00006-of-00011.safetensors",
189
+ "model.layers.5.mamba.out_proj.weight": "model-00006-of-00011.safetensors",
190
+ "model.layers.5.mamba.x_proj.weight": "model-00006-of-00011.safetensors",
191
+ "model.layers.5.moe.experts.0.down_proj.weight": "model-00006-of-00011.safetensors",
192
+ "model.layers.5.moe.experts.0.gate_proj.weight": "model-00006-of-00011.safetensors",
193
+ "model.layers.5.moe.experts.0.up_proj.weight": "model-00006-of-00011.safetensors",
194
+ "model.layers.5.moe.experts.1.down_proj.weight": "model-00006-of-00011.safetensors",
195
+ "model.layers.5.moe.experts.1.gate_proj.weight": "model-00006-of-00011.safetensors",
196
+ "model.layers.5.moe.experts.1.up_proj.weight": "model-00006-of-00011.safetensors",
197
+ "model.layers.5.moe.experts.10.down_proj.weight": "model-00008-of-00011.safetensors",
198
+ "model.layers.5.moe.experts.10.gate_proj.weight": "model-00008-of-00011.safetensors",
199
+ "model.layers.5.moe.experts.10.up_proj.weight": "model-00008-of-00011.safetensors",
200
+ "model.layers.5.moe.experts.11.down_proj.weight": "model-00008-of-00011.safetensors",
201
+ "model.layers.5.moe.experts.11.gate_proj.weight": "model-00008-of-00011.safetensors",
202
+ "model.layers.5.moe.experts.11.up_proj.weight": "model-00008-of-00011.safetensors",
203
+ "model.layers.5.moe.experts.12.down_proj.weight": "model-00008-of-00011.safetensors",
204
+ "model.layers.5.moe.experts.12.gate_proj.weight": "model-00008-of-00011.safetensors",
205
+ "model.layers.5.moe.experts.12.up_proj.weight": "model-00008-of-00011.safetensors",
206
+ "model.layers.5.moe.experts.13.down_proj.weight": "model-00008-of-00011.safetensors",
207
+ "model.layers.5.moe.experts.13.gate_proj.weight": "model-00008-of-00011.safetensors",
208
+ "model.layers.5.moe.experts.13.up_proj.weight": "model-00008-of-00011.safetensors",
209
+ "model.layers.5.moe.experts.14.down_proj.weight": "model-00008-of-00011.safetensors",
210
+ "model.layers.5.moe.experts.14.gate_proj.weight": "model-00008-of-00011.safetensors",
211
+ "model.layers.5.moe.experts.14.up_proj.weight": "model-00008-of-00011.safetensors",
212
+ "model.layers.5.moe.experts.15.down_proj.weight": "model-00008-of-00011.safetensors",
213
+ "model.layers.5.moe.experts.15.gate_proj.weight": "model-00008-of-00011.safetensors",
214
+ "model.layers.5.moe.experts.15.up_proj.weight": "model-00008-of-00011.safetensors",
215
+ "model.layers.5.moe.experts.2.down_proj.weight": "model-00007-of-00011.safetensors",
216
+ "model.layers.5.moe.experts.2.gate_proj.weight": "model-00007-of-00011.safetensors",
217
+ "model.layers.5.moe.experts.2.up_proj.weight": "model-00007-of-00011.safetensors",
218
+ "model.layers.5.moe.experts.3.down_proj.weight": "model-00007-of-00011.safetensors",
219
+ "model.layers.5.moe.experts.3.gate_proj.weight": "model-00007-of-00011.safetensors",
220
+ "model.layers.5.moe.experts.3.up_proj.weight": "model-00007-of-00011.safetensors",
221
+ "model.layers.5.moe.experts.4.down_proj.weight": "model-00007-of-00011.safetensors",
222
+ "model.layers.5.moe.experts.4.gate_proj.weight": "model-00007-of-00011.safetensors",
223
+ "model.layers.5.moe.experts.4.up_proj.weight": "model-00007-of-00011.safetensors",
224
+ "model.layers.5.moe.experts.5.down_proj.weight": "model-00007-of-00011.safetensors",
225
+ "model.layers.5.moe.experts.5.gate_proj.weight": "model-00007-of-00011.safetensors",
226
+ "model.layers.5.moe.experts.5.up_proj.weight": "model-00007-of-00011.safetensors",
227
+ "model.layers.5.moe.experts.6.down_proj.weight": "model-00007-of-00011.safetensors",
228
+ "model.layers.5.moe.experts.6.gate_proj.weight": "model-00007-of-00011.safetensors",
229
+ "model.layers.5.moe.experts.6.up_proj.weight": "model-00007-of-00011.safetensors",
230
+ "model.layers.5.moe.experts.7.down_proj.weight": "model-00007-of-00011.safetensors",
231
+ "model.layers.5.moe.experts.7.gate_proj.weight": "model-00007-of-00011.safetensors",
232
+ "model.layers.5.moe.experts.7.up_proj.weight": "model-00007-of-00011.safetensors",
233
+ "model.layers.5.moe.experts.8.down_proj.weight": "model-00007-of-00011.safetensors",
234
+ "model.layers.5.moe.experts.8.gate_proj.weight": "model-00007-of-00011.safetensors",
235
+ "model.layers.5.moe.experts.8.up_proj.weight": "model-00007-of-00011.safetensors",
236
+ "model.layers.5.moe.experts.9.down_proj.weight": "model-00008-of-00011.safetensors",
237
+ "model.layers.5.moe.experts.9.gate_proj.weight": "model-00008-of-00011.safetensors",
238
+ "model.layers.5.moe.experts.9.up_proj.weight": "model-00008-of-00011.safetensors",
239
+ "model.layers.5.moe.router.weight": "model-00006-of-00011.safetensors",
240
+ "model.layers.5.pre_moe_layernorm.weight": "model-00008-of-00011.safetensors",
241
+ "model.layers.6.input_layernorm.weight": "model-00009-of-00011.safetensors",
242
+ "model.layers.6.mamba.A_log": "model-00008-of-00011.safetensors",
243
+ "model.layers.6.mamba.B_layernorm.weight": "model-00009-of-00011.safetensors",
244
+ "model.layers.6.mamba.C_layernorm.weight": "model-00009-of-00011.safetensors",
245
+ "model.layers.6.mamba.D": "model-00008-of-00011.safetensors",
246
+ "model.layers.6.mamba.conv1d.bias": "model-00008-of-00011.safetensors",
247
+ "model.layers.6.mamba.conv1d.weight": "model-00008-of-00011.safetensors",
248
+ "model.layers.6.mamba.dt_layernorm.weight": "model-00009-of-00011.safetensors",
249
+ "model.layers.6.mamba.dt_proj.bias": "model-00009-of-00011.safetensors",
250
+ "model.layers.6.mamba.dt_proj.weight": "model-00009-of-00011.safetensors",
251
+ "model.layers.6.mamba.in_proj.weight": "model-00009-of-00011.safetensors",
252
+ "model.layers.6.mamba.out_proj.weight": "model-00009-of-00011.safetensors",
253
+ "model.layers.6.mamba.x_proj.weight": "model-00009-of-00011.safetensors",
254
+ "model.layers.6.moe.experts.0.down_proj.weight": "model-00009-of-00011.safetensors",
255
+ "model.layers.6.moe.experts.0.gate_proj.weight": "model-00009-of-00011.safetensors",
256
+ "model.layers.6.moe.experts.0.up_proj.weight": "model-00009-of-00011.safetensors",
257
+ "model.layers.6.pre_moe_layernorm.weight": "model-00009-of-00011.safetensors",
258
+ "model.layers.7.input_layernorm.weight": "model-00011-of-00011.safetensors",
259
+ "model.layers.7.mamba.A_log": "model-00009-of-00011.safetensors",
260
+ "model.layers.7.mamba.B_layernorm.weight": "model-00009-of-00011.safetensors",
261
+ "model.layers.7.mamba.C_layernorm.weight": "model-00009-of-00011.safetensors",
262
+ "model.layers.7.mamba.D": "model-00009-of-00011.safetensors",
263
+ "model.layers.7.mamba.conv1d.bias": "model-00009-of-00011.safetensors",
264
+ "model.layers.7.mamba.conv1d.weight": "model-00009-of-00011.safetensors",
265
+ "model.layers.7.mamba.dt_layernorm.weight": "model-00009-of-00011.safetensors",
266
+ "model.layers.7.mamba.dt_proj.bias": "model-00009-of-00011.safetensors",
267
+ "model.layers.7.mamba.dt_proj.weight": "model-00009-of-00011.safetensors",
268
+ "model.layers.7.mamba.in_proj.weight": "model-00009-of-00011.safetensors",
269
+ "model.layers.7.mamba.out_proj.weight": "model-00009-of-00011.safetensors",
270
+ "model.layers.7.mamba.x_proj.weight": "model-00009-of-00011.safetensors",
271
+ "model.layers.7.moe.experts.0.down_proj.weight": "model-00009-of-00011.safetensors",
272
+ "model.layers.7.moe.experts.0.gate_proj.weight": "model-00009-of-00011.safetensors",
273
+ "model.layers.7.moe.experts.0.up_proj.weight": "model-00009-of-00011.safetensors",
274
+ "model.layers.7.moe.experts.1.down_proj.weight": "model-00009-of-00011.safetensors",
275
+ "model.layers.7.moe.experts.1.gate_proj.weight": "model-00009-of-00011.safetensors",
276
+ "model.layers.7.moe.experts.1.up_proj.weight": "model-00009-of-00011.safetensors",
277
+ "model.layers.7.moe.experts.10.down_proj.weight": "model-00010-of-00011.safetensors",
278
+ "model.layers.7.moe.experts.10.gate_proj.weight": "model-00010-of-00011.safetensors",
279
+ "model.layers.7.moe.experts.10.up_proj.weight": "model-00010-of-00011.safetensors",
280
+ "model.layers.7.moe.experts.11.down_proj.weight": "model-00010-of-00011.safetensors",
281
+ "model.layers.7.moe.experts.11.gate_proj.weight": "model-00010-of-00011.safetensors",
282
+ "model.layers.7.moe.experts.11.up_proj.weight": "model-00011-of-00011.safetensors",
283
+ "model.layers.7.moe.experts.12.down_proj.weight": "model-00011-of-00011.safetensors",
284
+ "model.layers.7.moe.experts.12.gate_proj.weight": "model-00011-of-00011.safetensors",
285
+ "model.layers.7.moe.experts.12.up_proj.weight": "model-00011-of-00011.safetensors",
286
+ "model.layers.7.moe.experts.13.down_proj.weight": "model-00011-of-00011.safetensors",
287
+ "model.layers.7.moe.experts.13.gate_proj.weight": "model-00011-of-00011.safetensors",
288
+ "model.layers.7.moe.experts.13.up_proj.weight": "model-00011-of-00011.safetensors",
289
+ "model.layers.7.moe.experts.14.down_proj.weight": "model-00011-of-00011.safetensors",
290
+ "model.layers.7.moe.experts.14.gate_proj.weight": "model-00011-of-00011.safetensors",
291
+ "model.layers.7.moe.experts.14.up_proj.weight": "model-00011-of-00011.safetensors",
292
+ "model.layers.7.moe.experts.15.down_proj.weight": "model-00011-of-00011.safetensors",
293
+ "model.layers.7.moe.experts.15.gate_proj.weight": "model-00011-of-00011.safetensors",
294
+ "model.layers.7.moe.experts.15.up_proj.weight": "model-00011-of-00011.safetensors",
295
+ "model.layers.7.moe.experts.2.down_proj.weight": "model-00009-of-00011.safetensors",
296
+ "model.layers.7.moe.experts.2.gate_proj.weight": "model-00009-of-00011.safetensors",
297
+ "model.layers.7.moe.experts.2.up_proj.weight": "model-00009-of-00011.safetensors",
298
+ "model.layers.7.moe.experts.3.down_proj.weight": "model-00009-of-00011.safetensors",
299
+ "model.layers.7.moe.experts.3.gate_proj.weight": "model-00009-of-00011.safetensors",
300
+ "model.layers.7.moe.experts.3.up_proj.weight": "model-00009-of-00011.safetensors",
301
+ "model.layers.7.moe.experts.4.down_proj.weight": "model-00009-of-00011.safetensors",
302
+ "model.layers.7.moe.experts.4.gate_proj.weight": "model-00009-of-00011.safetensors",
303
+ "model.layers.7.moe.experts.4.up_proj.weight": "model-00010-of-00011.safetensors",
304
+ "model.layers.7.moe.experts.5.down_proj.weight": "model-00010-of-00011.safetensors",
305
+ "model.layers.7.moe.experts.5.gate_proj.weight": "model-00010-of-00011.safetensors",
306
+ "model.layers.7.moe.experts.5.up_proj.weight": "model-00010-of-00011.safetensors",
307
+ "model.layers.7.moe.experts.6.down_proj.weight": "model-00010-of-00011.safetensors",
308
+ "model.layers.7.moe.experts.6.gate_proj.weight": "model-00010-of-00011.safetensors",
309
+ "model.layers.7.moe.experts.6.up_proj.weight": "model-00010-of-00011.safetensors",
310
+ "model.layers.7.moe.experts.7.down_proj.weight": "model-00010-of-00011.safetensors",
311
+ "model.layers.7.moe.experts.7.gate_proj.weight": "model-00010-of-00011.safetensors",
312
+ "model.layers.7.moe.experts.7.up_proj.weight": "model-00010-of-00011.safetensors",
313
+ "model.layers.7.moe.experts.8.down_proj.weight": "model-00010-of-00011.safetensors",
314
+ "model.layers.7.moe.experts.8.gate_proj.weight": "model-00010-of-00011.safetensors",
315
+ "model.layers.7.moe.experts.8.up_proj.weight": "model-00010-of-00011.safetensors",
316
+ "model.layers.7.moe.experts.9.down_proj.weight": "model-00010-of-00011.safetensors",
317
+ "model.layers.7.moe.experts.9.gate_proj.weight": "model-00010-of-00011.safetensors",
318
+ "model.layers.7.moe.experts.9.up_proj.weight": "model-00010-of-00011.safetensors",
319
+ "model.layers.7.moe.router.weight": "model-00009-of-00011.safetensors",
320
+ "model.layers.7.pre_moe_layernorm.weight": "model-00011-of-00011.safetensors"
321
+ }
322
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|pad|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|unk|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02fd6530b8ede0eedd8e509fcab32da7b1dd04c8119f8498c787100f13112713
3
+ size 1124742
tokenizer_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<|pad|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<|startoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "<|endoftext|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<|unk|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "bos_token": "<|startoftext|>",
39
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
40
+ "clean_up_tokenization_spaces": false,
41
+ "eos_token": "<|endoftext|>",
42
+ "model_max_length": 1000000000000000019884624838656,
43
+ "pad_token": "<|pad|>",
44
+ "spaces_between_special_tokens": false,
45
+ "tokenizer_class": "LlamaTokenizer",
46
+ "unk_token": "<|unk|>",
47
+ "use_default_system_prompt": false
48
+ }