Half Epoch checkpoint:
Browse files11:27:54-941537 INFO Loading JSON datasets
Map: 100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 1365/1365 [00:05<00:00, 262.82 examples/s]
11:28:00-429977 INFO Getting model ready
11:28:00-437996 INFO Preparing for training
11:28:00-439851 INFO Creating LoRA model
11:28:00-893558 INFO Starting training
Training 'mixtral' model using (q, v) projections
Trainable params: 50,331,648 (1.1246 %), All params: 4,475,538,432 (Model: 4,425,206,784)
11:28:01-555106 INFO Log file 'train_dataset_sample.json' created in the 'logs' directory.
Step: 4 {'loss': 0.0668, 'grad_norm': 0.1359836459159851, 'learning_rate': 9.963736263736264e-06, 'epoch': 0.003663003663003663}
Step: 9 {'loss': 0.0916, 'grad_norm': 0.2279471606016159, 'learning_rate': 9.927472527472529e-06, 'epoch': 0.007326007326007326}
Step: 14 {'loss': 0.0753, 'grad_norm': 0.4511280357837677, 'learning_rate': 9.891208791208792e-06, 'epoch': 0.01098901098901099}
Step: 19 {'loss': 0.084, 'grad_norm': 0.24976688623428345, 'learning_rate': 9.854945054945056e-06, 'epoch': 0.014652014652014652}
Step: 24 {'loss': 0.0741, 'grad_norm': 0.09527433663606644, 'learning_rate': 9.81868131868132e-06, 'epoch': 0.018315018315018316}
Step: 29 {'loss': 0.076, 'grad_norm': 0.06122274696826935, 'learning_rate': 9.782417582417582e-06, 'epoch': 0.02197802197802198}
Step: 34 {'loss': 0.082, 'grad_norm': 0.26327070593833923, 'learning_rate': 9.746153846153847e-06, 'epoch': 0.02564102564102564}
Step: 39 {'loss': 0.0612, 'grad_norm': 0.2465166449546814, 'learning_rate': 9.70989010989011e-06, 'epoch': 0.029304029304029304}
Step: 44 {'loss': 0.0733, 'grad_norm': 0.08420123159885406, 'learning_rate': 9.673626373626375e-06, 'epoch': 0.03296703296703297}
Step: 49 {'loss': 0.0565, 'grad_norm': 0.25095030665397644, 'learning_rate': 9.637362637362638e-06, 'epoch': 0.03663003663003663}
Step: 54 {'loss': 0.0902, 'grad_norm': 0.33763477206230164, 'learning_rate': 9.601098901098903e-06, 'epoch': 0.040293040293040296}
Step: 59 {'loss': 0.1316, 'grad_norm': 0.5101003050804138, 'learning_rate': 9.564835164835166e-06, 'epoch': 0.04395604395604396}
Step: 64 {'loss': 0.1362, 'grad_norm': 0.45701834559440613, 'learning_rate': 9.528571428571429e-06, 'epoch': 0.047619047619047616}
Step: 69 {'loss': 0.1631, 'grad_norm': 0.563866376876831, 'learning_rate': 9.492307692307693e-06, 'epoch': 0.05128205128205128}
Step: 74 {'loss': 0.1354, 'grad_norm': 0.10952164232730865, 'learning_rate': 9.456043956043956e-06, 'epoch': 0.054945054945054944}
Step: 79 {'loss': 0.1858, 'grad_norm': 0.22133922576904297, 'learning_rate': 9.419780219780221e-06, 'epoch': 0.05860805860805861}
Step: 84 {'loss': 0.1611, 'grad_norm': 0.521860659122467, 'learning_rate': 9.383516483516484e-06, 'epoch': 0.06227106227106227}
Step: 89 {'loss': 0.1508, 'grad_norm': 0.56801837682724, 'learning_rate': 9.347252747252749e-06, 'epoch': 0.06593406593406594}
Step: 94 {'loss': 0.1257, 'grad_norm': 0.5347073674201965, 'learning_rate': 9.310989010989012e-06, 'epoch': 0.0695970695970696}
Step: 99 {'loss': 0.187, 'grad_norm': 0.5496135950088501, 'learning_rate': 9.274725274725277e-06, 'epoch': 0.07326007326007326}
Step: 104 {'loss': 0.1954, 'grad_norm': 0.7482754588127136, 'learning_rate': 9.23846153846154e-06, 'epoch': 0.07692307692307693}
Step: 109 {'loss': 0.1157, 'grad_norm': 0.11614499986171722, 'learning_rate': 9.202197802197803e-06, 'epoch': 0.08058608058608059}
Step: 114 {'loss': 0.1099, 'grad_norm': 0.21353064477443695, 'learning_rate': 9.165934065934066e-06, 'epoch': 0.08424908424908426}
Step: 119 {'loss': 0.1752, 'grad_norm': 0.4894441068172455, 'learning_rate': 9.12967032967033e-06, 'epoch': 0.08791208791208792}
Step: 124 {'loss': 0.1616, 'grad_norm': 0.6079414486885071, 'learning_rate': 9.093406593406593e-06, 'epoch': 0.09157509157509157}
Step: 129 {'loss': 0.1349, 'grad_norm': 0.08628781884908676, 'learning_rate': 9.057142857142858e-06, 'epoch': 0.09523809523809523}
Step: 134 {'loss': 0.1627, 'grad_norm': 0.5504783987998962, 'learning_rate': 9.020879120879121e-06, 'epoch': 0.0989010989010989}
Step: 139 {'loss': 0.1627, 'grad_norm': 1.0104156732559204, 'learning_rate': 8.984615384615386e-06, 'epoch': 0.10256410256410256}
Step: 144 {'loss': 0.111, 'grad_norm': 0.2242872714996338, 'learning_rate': 8.948351648351649e-06, 'epoch': 0.10622710622710622}
Step: 149 {'loss': 0.1287, 'grad_norm': 0.20702646672725677, 'learning_rate': 8.912087912087912e-06, 'epoch': 0.10989010989010989}
Step: 154 {'loss': 0.1564, 'grad_norm': 0.42282870411872864, 'learning_rate': 8.875824175824177e-06, 'epoch': 0.11355311355311355}
Step: 159 {'loss': 0.1234, 'grad_norm': 0.29965367913246155, 'learning_rate': 8.83956043956044e-06, 'epoch': 0.11721611721611722}
Step: 164 {'loss': 0.1386, 'grad_norm': 0.25579577684402466, 'learning_rate': 8.803296703296704e-06, 'epoch': 0.12087912087912088}
Step: 169 {'loss': 0.1429, 'grad_norm': 0.9871158599853516, 'learning_rate': 8.767032967032967e-06, 'epoch': 0.12454212454212454}
Step: 174 {'loss': 0.1491, 'grad_norm': 0.4206677973270416, 'learning_rate': 8.730769230769232e-06, 'epoch': 0.1282051282051282}
Step: 179 {'loss': 0.1145, 'grad_norm': 0.5669340491294861, 'learning_rate': 8.694505494505495e-06, 'epoch': 0.13186813186813187}
Step: 184 {'loss': 0.123, 'grad_norm': 0.1574435532093048, 'learning_rate': 8.65824175824176e-06, 'epoch': 0.13553113553113552}
Step: 189 {'loss': 0.1693, 'grad_norm': 0.5592064261436462, 'learning_rate': 8.621978021978023e-06, 'epoch': 0.1391941391941392}
Step: 194 {'loss': 0.1243, 'grad_norm': 0.5779590606689453, 'learning_rate': 8.585714285714287e-06, 'epoch': 0.14285714285714285}
Step: 199 {'loss': 0.1396, 'grad_norm': 0.11793188750743866, 'learning_rate': 8.54945054945055e-06, 'epoch': 0.14652014652014653}
Step: 204 {'loss': 0.1117, 'grad_norm': 0.5391713976860046, 'learning_rate': 8.513186813186814e-06, 'epoch': 0.15018315018315018}
Step: 209 {'loss': 0.1466, 'grad_norm': 0.921928882598877, 'learning_rate': 8.476923076923078e-06, 'epoch': 0.15384615384615385}
Step: 214 {'loss': 0.1295, 'grad_norm': 0.48140949010849, 'learning_rate': 8.440659340659341e-06, 'epoch': 0.1575091575091575}
Step: 219 {'loss': 0.1887, 'grad_norm': 0.658561646938324, 'learning_rate': 8.404395604395606e-06, 'epoch': 0.16117216117216118}
Step: 224 {'loss': 0.1308, 'grad_norm': 0.6024790406227112, 'learning_rate': 8.368131868131869e-06, 'epoch': 0.16483516483516483}
Step: 229 {'loss': 0.067, 'grad_norm': 0.24153736233711243, 'learning_rate': 8.331868131868132e-06, 'epoch': 0.1684981684981685}
Step: 234 {'loss': 0.117, 'grad_norm': 0.17423799633979797, 'learning_rate': 8.295604395604395e-06, 'epoch': 0.17216117216117216}
Step: 239 {'loss': 0.1445, 'grad_norm': 0.6188604831695557, 'learning_rate': 8.25934065934066e-06, 'epoch': 0.17582417582417584}
Step: 244 {'loss': 0.1086, 'grad_norm': 0.47320955991744995, 'learning_rate': 8.223076923076923e-06, 'epoch': 0.1794871794871795}
Step: 249 {'loss': 0.1047, 'grad_norm': 0.22519785165786743, 'learning_rate': 8.186813186813188e-06, 'epoch': 0.18315018315018314}
Step: 254 {'loss': 0.1507, 'grad_norm': 0.8073124885559082, 'learning_rate': 8.15054945054945e-06, 'epoch': 0.18681318681318682}
Step: 259 {'loss': 0.1354, 'grad_norm': 0.5822990536689758, 'learning_rate': 8.114285714285715e-06, 'epoch': 0.19047619047619047}
Step: 264 {'loss': 0.1733, 'grad_norm': 0.7305815815925598, 'learning_rate': 8.078021978021978e-06, 'epoch': 0.19413919413919414}
Step: 269 {'loss': 0.1101, 'grad_norm': 0.4647897779941559, 'learning_rate': 8.041758241758243e-06, 'epoch': 0.1978021978021978}
Step: 274 {'loss': 0.1054, 'grad_norm': 0.6543989181518555, 'learning_rate': 8.005494505494506e-06, 'epoch': 0.20146520146520147}
Step: 279 {'loss': 0.1129, 'grad_norm': 0.8039067983627319, 'learning_rate': 7.96923076923077e-06, 'epoch': 0.20512820512820512}
Step: 284 {'loss': 0.1249, 'grad_norm': 0.35283106565475464, 'learning_rate': 7.932967032967034e-06, 'epoch': 0.2087912087912088}
Step: 289 {'loss': 0.1549, 'grad_norm': 0.6537496447563171, 'learning_rate': 7.896703296703297e-06, 'epoch': 0.21245421245421245}
Step: 294 {'loss': 0.0883, 'grad_norm': 0.23383976519107819, 'learning_rate': 7.860439560439561e-06, 'epoch': 0.21611721611721613}
Step: 299 {'loss': 0.1313, 'grad_norm': 0.1237742155790329, 'learning_rate': 7.824175824175825e-06, 'epoch': 0.21978021978021978}
Step: 304 {'loss': 0.1591, 'grad_norm': 0.7631969451904297, 'learning_rate': 7.78791208791209e-06, 'epoch': 0.22344322344322345}
Step: 309 {'loss': 0.0925, 'grad_norm': 0.48745134472846985, 'learning_rate': 7.751648351648352e-06, 'epoch': 0.2271062271062271}
Step: 314 {'loss': 0.1326, 'grad_norm': 0.6884176731109619, 'learning_rate': 7.715384615384615e-06, 'epoch': 0.23076923076923078}
Step: 319 {'loss': 0.125, 'grad_norm': 0.41068509221076965, 'learning_rate': 7.679120879120878e-06, 'epoch': 0.23443223443223443}
Step: 324 {'loss': 0.1141, 'grad_norm': 1.1253948211669922, 'learning_rate': 7.642857142857143e-06, 'epoch': 0.23809523809523808}
Step: 329 {'loss': 0.1702, 'grad_norm': 0.7410513162612915, 'learning_rate': 7.606593406593405e-06, 'epoch': 0.24175824175824176}
Step: 334 {'loss': 0.1521, 'grad_norm': 0.9737598896026611, 'learning_rate': 7.57032967032967e-06, 'epoch': 0.2454212454212454}
Step: 339 {'loss': 0.137, 'grad_norm': 1.0085108280181885, 'learning_rate': 7.534065934065934e-06, 'epoch': 0.2490842490842491}
Step: 344 {'loss': 0.0966, 'grad_norm': 0.486762672662735, 'learning_rate': 7.497802197802198e-06, 'epoch': 0.25274725274725274}
Step: 349 {'loss': 0.1231, 'grad_norm': 0.801877498626709, 'learning_rate': 7.4615384615384615e-06, 'epoch': 0.2564102564102564}
Step: 354 {'loss': 0.1453, 'grad_norm': 0.6585664749145508, 'learning_rate': 7.425274725274726e-06, 'epoch': 0.2600732600732601}
Step: 359 {'loss': 0.1296, 'grad_norm': 0.2636565566062927, 'learning_rate': 7.389010989010989e-06, 'epoch': 0.26373626373626374}
Step: 364 {'loss': 0.0983, 'grad_norm': 0.25512686371803284, 'learning_rat
- added_tokens.json +40 -0
- config.json +35 -0
- generation_config.json +6 -0
- mergekit_moe_config.yml +144 -0
- pytorch_model-00001-of-00004.bin +3 -0
- pytorch_model-00002-of-00004.bin +3 -0
- pytorch_model-00003-of-00004.bin +3 -0
- pytorch_model-00004-of-00004.bin +3 -0
- pytorch_model.bin.index.json +522 -0
- special_tokens_map.json +33 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +350 -0
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"<|/code|>": 32014,
|
3 |
+
"<|/data|>": 32033,
|
4 |
+
"<|/inst|>": 32037,
|
5 |
+
"<|/query|>": 32031,
|
6 |
+
"<|/sys|>": 32035,
|
7 |
+
"<|assistant_mask|>": 32017,
|
8 |
+
"<|assistant|>": 32001,
|
9 |
+
"<|calc|>": 32012,
|
10 |
+
"<|code|>": 32013,
|
11 |
+
"<|continue|>": 32009,
|
12 |
+
"<|data|>": 32032,
|
13 |
+
"<|diff_marker|>": 32025,
|
14 |
+
"<|disc_sep|>": 32029,
|
15 |
+
"<|disc_start|>": 32028,
|
16 |
+
"<|disc_thread|><|query|>": 32030,
|
17 |
+
"<|endoftext|>": 32000,
|
18 |
+
"<|end|>": 32007,
|
19 |
+
"<|fim_middle|>": 32021,
|
20 |
+
"<|fim_prefix|>": 32020,
|
21 |
+
"<|fim_suffix|>": 32022,
|
22 |
+
"<|function_call|>": 32005,
|
23 |
+
"<|function_list|>": 32011,
|
24 |
+
"<|function_output|>": 32003,
|
25 |
+
"<|ghissue|>": 32026,
|
26 |
+
"<|ghreview|>": 32027,
|
27 |
+
"<|inst|>": 32036,
|
28 |
+
"<|ipynb_marker|>": 32024,
|
29 |
+
"<|message|>": 32019,
|
30 |
+
"<|meta_start|>": 32023,
|
31 |
+
"<|raw|>": 32008,
|
32 |
+
"<|resource|>": 32016,
|
33 |
+
"<|start|>": 32018,
|
34 |
+
"<|step|>": 32002,
|
35 |
+
"<|summary|>": 32015,
|
36 |
+
"<|system|>": 32006,
|
37 |
+
"<|sys|>": 32034,
|
38 |
+
"<|tag|>": 32004,
|
39 |
+
"<|user|>": 32010
|
40 |
+
}
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": ".\\DigitalSoul",
|
3 |
+
"architectures": [
|
4 |
+
"MixtralForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 1,
|
9 |
+
"eos_token_id": 32000,
|
10 |
+
"hidden_act": "silu",
|
11 |
+
"hidden_size": 3072,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 8192,
|
14 |
+
"max_position_embeddings": 16384,
|
15 |
+
"mlp_bias": false,
|
16 |
+
"model_type": "mixtral",
|
17 |
+
"num_attention_heads": 32,
|
18 |
+
"num_experts_per_tok": 3,
|
19 |
+
"num_hidden_layers": 32,
|
20 |
+
"num_key_value_heads": 32,
|
21 |
+
"num_local_experts": 3,
|
22 |
+
"output_router_logits": false,
|
23 |
+
"pretraining_tp": 1,
|
24 |
+
"rms_norm_eps": 1e-05,
|
25 |
+
"rope_scaling": null,
|
26 |
+
"rope_theta": 10000.0,
|
27 |
+
"router_aux_loss_coef": 0.001,
|
28 |
+
"router_jitter_noise": 0.0,
|
29 |
+
"sliding_window": null,
|
30 |
+
"tie_word_embeddings": false,
|
31 |
+
"torch_dtype": "bfloat16",
|
32 |
+
"transformers_version": "4.44.0",
|
33 |
+
"use_cache": true,
|
34 |
+
"vocab_size": 32064
|
35 |
+
}
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 32000,
|
5 |
+
"transformers_version": "4.44.0"
|
6 |
+
}
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_model: TroyDoesAI/BlackSheep-4B
|
2 |
+
gate_mode: random # one of "hidden", "cheap_embed", or "random"
|
3 |
+
|
4 |
+
# There are three methods for populating the MoE gates implemented.
|
5 |
+
|
6 |
+
# "hidden"
|
7 |
+
# Uses the hidden state representations of the positive/negative prompts for MoE gate parameters. Best quality and most effective option; the default. Requires evaluating each prompt using the base model so you might not be able to use this on constrained hardware (depending on the model). You can use --load-in-8bit or --load-in-4bit to reduce VRAM usage.
|
8 |
+
|
9 |
+
# "cheap_embed"
|
10 |
+
# Uses only the raw token embedding of the prompts, using the same gate parameters for every layer. Distinctly less effective than "hidden". Can be run on much, much lower end hardware.
|
11 |
+
|
12 |
+
# "random"
|
13 |
+
# Randomly initializes the MoE gates. Good for if you are going to fine tune the model afterwards, or maybe if you want something a little unhinged? I won't judge.
|
14 |
+
|
15 |
+
experts:
|
16 |
+
- source_model: TroyDoesAI/BlackSheep-4B
|
17 |
+
positive_prompts:
|
18 |
+
- "digital"
|
19 |
+
- "soul"
|
20 |
+
- "<|assistant|>"
|
21 |
+
- "instruction"
|
22 |
+
- "input"
|
23 |
+
- "output"
|
24 |
+
- "response"
|
25 |
+
- "story"
|
26 |
+
- "stories"
|
27 |
+
- "creative"
|
28 |
+
- "curious"
|
29 |
+
- "action"
|
30 |
+
- "sensitive"
|
31 |
+
- "pleasant"
|
32 |
+
- "thoughtful"
|
33 |
+
- "interesting"
|
34 |
+
- "BEGININPUT"
|
35 |
+
- "BEGINCONTEXT"
|
36 |
+
- "ENDCONTEXT"
|
37 |
+
- "ENDINPUT"
|
38 |
+
- "BEGININSTRUCTION"
|
39 |
+
- "ENDINSTRUCTION"
|
40 |
+
- "url"
|
41 |
+
- "date"
|
42 |
+
- "source"
|
43 |
+
- "context"
|
44 |
+
- "cite"
|
45 |
+
- "mermaid"
|
46 |
+
- "flow"
|
47 |
+
- "diagram"
|
48 |
+
- "sequence"
|
49 |
+
- "series"
|
50 |
+
- "code to flow"
|
51 |
+
- "code to diagram"
|
52 |
+
- "python to flow diagram"
|
53 |
+
- "program to flow"
|
54 |
+
- "program to flow diagram"
|
55 |
+
- "write"
|
56 |
+
- "create"
|
57 |
+
- "visualize"
|
58 |
+
- "show"
|
59 |
+
- "explain"
|
60 |
+
- "draw"
|
61 |
+
- "describe"
|
62 |
+
- "summarize"
|
63 |
+
- "concise"
|
64 |
+
- "do"
|
65 |
+
- "say"
|
66 |
+
- "code"
|
67 |
+
- "```"
|
68 |
+
- "```mermaid"
|
69 |
+
- "graphTB"
|
70 |
+
- "graphTD"
|
71 |
+
- "graphLR"
|
72 |
+
- "graphRL"
|
73 |
+
|
74 |
+
- source_model: TroyDoesAI/BlackSheep-4B
|
75 |
+
positive_prompts:
|
76 |
+
- "Digital Soul"
|
77 |
+
- "hello"
|
78 |
+
- "you"
|
79 |
+
- "assistant"
|
80 |
+
- "who"
|
81 |
+
- "what"
|
82 |
+
- "when"
|
83 |
+
- "where"
|
84 |
+
- "why"
|
85 |
+
- "how"
|
86 |
+
- "me"
|
87 |
+
- "politic"
|
88 |
+
- "conservative"
|
89 |
+
|
90 |
+
- source_model: TroyDoesAI/BlackSheep-4B
|
91 |
+
positive_prompts:
|
92 |
+
- "<|assistant|>"
|
93 |
+
- "instruction"
|
94 |
+
- "input"
|
95 |
+
- "output"
|
96 |
+
- "response"
|
97 |
+
- "story"
|
98 |
+
- "stories"
|
99 |
+
- "creative"
|
100 |
+
- "curious"
|
101 |
+
- "action"
|
102 |
+
- "sensitive"
|
103 |
+
- "pleasant"
|
104 |
+
- "thoughtful"
|
105 |
+
- "interesting"
|
106 |
+
- "BEGININPUT"
|
107 |
+
- "BEGINCONTEXT"
|
108 |
+
- "ENDCONTEXT"
|
109 |
+
- "ENDINPUT"
|
110 |
+
- "BEGININSTRUCTION"
|
111 |
+
- "ENDINSTRUCTION"
|
112 |
+
- "url"
|
113 |
+
- "date"
|
114 |
+
- "source"
|
115 |
+
- "context"
|
116 |
+
- "cite"
|
117 |
+
- "mermaid"
|
118 |
+
- "flow"
|
119 |
+
- "diagram"
|
120 |
+
- "sequence"
|
121 |
+
- "series"
|
122 |
+
- "code to flow"
|
123 |
+
- "code to diagram"
|
124 |
+
- "python to flow diagram"
|
125 |
+
- "program to flow"
|
126 |
+
- "program to flow diagram"
|
127 |
+
- "write"
|
128 |
+
- "create"
|
129 |
+
- "visualize"
|
130 |
+
- "show"
|
131 |
+
- "explain"
|
132 |
+
- "draw"
|
133 |
+
- "describe"
|
134 |
+
- "summarize"
|
135 |
+
- "concise"
|
136 |
+
- "do"
|
137 |
+
- "say"
|
138 |
+
- "code"
|
139 |
+
- "```"
|
140 |
+
- "```mermaid"
|
141 |
+
- "graphTB"
|
142 |
+
- "graphTD"
|
143 |
+
- "graphLR"
|
144 |
+
- "graphRL"
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:78a2c5bfcab4531a259160d621b362de1c05578ccda88e986b34be7c2ac40a56
|
3 |
+
size 4991420771
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f71573bdbe775a26e3ad9cbf9ecd51032f0307af9c8f96bdfc3f19d0624d73c4
|
3 |
+
size 4995766167
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:948697ccb8053a18f248fa57e188ea3b80e4fc34d03ed1814efb7024602f740b
|
3 |
+
size 4957997958
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d349868422cd99a07e86708db413472ce5932a6b0a83f2589b92f4d3905d64dd
|
3 |
+
size 2361428548
|
@@ -0,0 +1,522 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 17306425344
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "pytorch_model-00004-of-00004.bin",
|
7 |
+
"model.embed_tokens.weight": "pytorch_model-00001-of-00004.bin",
|
8 |
+
"model.layers.0.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00001-of-00004.bin",
|
9 |
+
"model.layers.0.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00001-of-00004.bin",
|
10 |
+
"model.layers.0.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00001-of-00004.bin",
|
11 |
+
"model.layers.0.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00001-of-00004.bin",
|
12 |
+
"model.layers.0.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00001-of-00004.bin",
|
13 |
+
"model.layers.0.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00001-of-00004.bin",
|
14 |
+
"model.layers.0.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00001-of-00004.bin",
|
15 |
+
"model.layers.0.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00001-of-00004.bin",
|
16 |
+
"model.layers.0.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00001-of-00004.bin",
|
17 |
+
"model.layers.0.block_sparse_moe.gate.weight": "pytorch_model-00001-of-00004.bin",
|
18 |
+
"model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
19 |
+
"model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
20 |
+
"model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
|
21 |
+
"model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
|
22 |
+
"model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
|
23 |
+
"model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
|
24 |
+
"model.layers.1.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00001-of-00004.bin",
|
25 |
+
"model.layers.1.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00001-of-00004.bin",
|
26 |
+
"model.layers.1.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00001-of-00004.bin",
|
27 |
+
"model.layers.1.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00001-of-00004.bin",
|
28 |
+
"model.layers.1.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00001-of-00004.bin",
|
29 |
+
"model.layers.1.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00001-of-00004.bin",
|
30 |
+
"model.layers.1.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00001-of-00004.bin",
|
31 |
+
"model.layers.1.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00001-of-00004.bin",
|
32 |
+
"model.layers.1.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00001-of-00004.bin",
|
33 |
+
"model.layers.1.block_sparse_moe.gate.weight": "pytorch_model-00001-of-00004.bin",
|
34 |
+
"model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
35 |
+
"model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
36 |
+
"model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
|
37 |
+
"model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
|
38 |
+
"model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
|
39 |
+
"model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
|
40 |
+
"model.layers.10.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00002-of-00004.bin",
|
41 |
+
"model.layers.10.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00002-of-00004.bin",
|
42 |
+
"model.layers.10.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00002-of-00004.bin",
|
43 |
+
"model.layers.10.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00002-of-00004.bin",
|
44 |
+
"model.layers.10.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00002-of-00004.bin",
|
45 |
+
"model.layers.10.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00002-of-00004.bin",
|
46 |
+
"model.layers.10.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00002-of-00004.bin",
|
47 |
+
"model.layers.10.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00002-of-00004.bin",
|
48 |
+
"model.layers.10.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00002-of-00004.bin",
|
49 |
+
"model.layers.10.block_sparse_moe.gate.weight": "pytorch_model-00002-of-00004.bin",
|
50 |
+
"model.layers.10.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
51 |
+
"model.layers.10.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
52 |
+
"model.layers.10.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
|
53 |
+
"model.layers.10.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
|
54 |
+
"model.layers.10.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
|
55 |
+
"model.layers.10.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
|
56 |
+
"model.layers.11.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00002-of-00004.bin",
|
57 |
+
"model.layers.11.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00002-of-00004.bin",
|
58 |
+
"model.layers.11.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00002-of-00004.bin",
|
59 |
+
"model.layers.11.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00002-of-00004.bin",
|
60 |
+
"model.layers.11.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00002-of-00004.bin",
|
61 |
+
"model.layers.11.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00002-of-00004.bin",
|
62 |
+
"model.layers.11.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00002-of-00004.bin",
|
63 |
+
"model.layers.11.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00002-of-00004.bin",
|
64 |
+
"model.layers.11.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00002-of-00004.bin",
|
65 |
+
"model.layers.11.block_sparse_moe.gate.weight": "pytorch_model-00002-of-00004.bin",
|
66 |
+
"model.layers.11.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
67 |
+
"model.layers.11.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
68 |
+
"model.layers.11.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
|
69 |
+
"model.layers.11.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
|
70 |
+
"model.layers.11.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
|
71 |
+
"model.layers.11.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
|
72 |
+
"model.layers.12.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00002-of-00004.bin",
|
73 |
+
"model.layers.12.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00002-of-00004.bin",
|
74 |
+
"model.layers.12.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00002-of-00004.bin",
|
75 |
+
"model.layers.12.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00002-of-00004.bin",
|
76 |
+
"model.layers.12.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00002-of-00004.bin",
|
77 |
+
"model.layers.12.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00002-of-00004.bin",
|
78 |
+
"model.layers.12.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00002-of-00004.bin",
|
79 |
+
"model.layers.12.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00002-of-00004.bin",
|
80 |
+
"model.layers.12.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00002-of-00004.bin",
|
81 |
+
"model.layers.12.block_sparse_moe.gate.weight": "pytorch_model-00002-of-00004.bin",
|
82 |
+
"model.layers.12.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
83 |
+
"model.layers.12.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
84 |
+
"model.layers.12.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
|
85 |
+
"model.layers.12.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
|
86 |
+
"model.layers.12.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
|
87 |
+
"model.layers.12.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
|
88 |
+
"model.layers.13.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00002-of-00004.bin",
|
89 |
+
"model.layers.13.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00002-of-00004.bin",
|
90 |
+
"model.layers.13.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00002-of-00004.bin",
|
91 |
+
"model.layers.13.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00002-of-00004.bin",
|
92 |
+
"model.layers.13.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00002-of-00004.bin",
|
93 |
+
"model.layers.13.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00002-of-00004.bin",
|
94 |
+
"model.layers.13.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00002-of-00004.bin",
|
95 |
+
"model.layers.13.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00002-of-00004.bin",
|
96 |
+
"model.layers.13.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00002-of-00004.bin",
|
97 |
+
"model.layers.13.block_sparse_moe.gate.weight": "pytorch_model-00002-of-00004.bin",
|
98 |
+
"model.layers.13.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
99 |
+
"model.layers.13.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
100 |
+
"model.layers.13.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
|
101 |
+
"model.layers.13.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
|
102 |
+
"model.layers.13.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
|
103 |
+
"model.layers.13.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
|
104 |
+
"model.layers.14.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00002-of-00004.bin",
|
105 |
+
"model.layers.14.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00002-of-00004.bin",
|
106 |
+
"model.layers.14.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00002-of-00004.bin",
|
107 |
+
"model.layers.14.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00002-of-00004.bin",
|
108 |
+
"model.layers.14.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00002-of-00004.bin",
|
109 |
+
"model.layers.14.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00002-of-00004.bin",
|
110 |
+
"model.layers.14.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00002-of-00004.bin",
|
111 |
+
"model.layers.14.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00002-of-00004.bin",
|
112 |
+
"model.layers.14.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00002-of-00004.bin",
|
113 |
+
"model.layers.14.block_sparse_moe.gate.weight": "pytorch_model-00002-of-00004.bin",
|
114 |
+
"model.layers.14.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
115 |
+
"model.layers.14.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
116 |
+
"model.layers.14.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
|
117 |
+
"model.layers.14.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
|
118 |
+
"model.layers.14.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
|
119 |
+
"model.layers.14.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
|
120 |
+
"model.layers.15.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00002-of-00004.bin",
|
121 |
+
"model.layers.15.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00002-of-00004.bin",
|
122 |
+
"model.layers.15.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00002-of-00004.bin",
|
123 |
+
"model.layers.15.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00002-of-00004.bin",
|
124 |
+
"model.layers.15.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00002-of-00004.bin",
|
125 |
+
"model.layers.15.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00002-of-00004.bin",
|
126 |
+
"model.layers.15.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00002-of-00004.bin",
|
127 |
+
"model.layers.15.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00002-of-00004.bin",
|
128 |
+
"model.layers.15.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00002-of-00004.bin",
|
129 |
+
"model.layers.15.block_sparse_moe.gate.weight": "pytorch_model-00002-of-00004.bin",
|
130 |
+
"model.layers.15.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
131 |
+
"model.layers.15.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
132 |
+
"model.layers.15.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
|
133 |
+
"model.layers.15.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
|
134 |
+
"model.layers.15.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
|
135 |
+
"model.layers.15.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
|
136 |
+
"model.layers.16.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00002-of-00004.bin",
|
137 |
+
"model.layers.16.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00002-of-00004.bin",
|
138 |
+
"model.layers.16.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00002-of-00004.bin",
|
139 |
+
"model.layers.16.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00002-of-00004.bin",
|
140 |
+
"model.layers.16.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00002-of-00004.bin",
|
141 |
+
"model.layers.16.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00002-of-00004.bin",
|
142 |
+
"model.layers.16.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00002-of-00004.bin",
|
143 |
+
"model.layers.16.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00002-of-00004.bin",
|
144 |
+
"model.layers.16.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00002-of-00004.bin",
|
145 |
+
"model.layers.16.block_sparse_moe.gate.weight": "pytorch_model-00002-of-00004.bin",
|
146 |
+
"model.layers.16.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
147 |
+
"model.layers.16.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
148 |
+
"model.layers.16.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
|
149 |
+
"model.layers.16.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
|
150 |
+
"model.layers.16.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
|
151 |
+
"model.layers.16.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
|
152 |
+
"model.layers.17.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00002-of-00004.bin",
|
153 |
+
"model.layers.17.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00002-of-00004.bin",
|
154 |
+
"model.layers.17.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00002-of-00004.bin",
|
155 |
+
"model.layers.17.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00002-of-00004.bin",
|
156 |
+
"model.layers.17.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00002-of-00004.bin",
|
157 |
+
"model.layers.17.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00002-of-00004.bin",
|
158 |
+
"model.layers.17.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00002-of-00004.bin",
|
159 |
+
"model.layers.17.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00002-of-00004.bin",
|
160 |
+
"model.layers.17.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00002-of-00004.bin",
|
161 |
+
"model.layers.17.block_sparse_moe.gate.weight": "pytorch_model-00002-of-00004.bin",
|
162 |
+
"model.layers.17.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
163 |
+
"model.layers.17.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
164 |
+
"model.layers.17.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
|
165 |
+
"model.layers.17.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
|
166 |
+
"model.layers.17.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
|
167 |
+
"model.layers.17.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
|
168 |
+
"model.layers.18.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00002-of-00004.bin",
|
169 |
+
"model.layers.18.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00002-of-00004.bin",
|
170 |
+
"model.layers.18.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00002-of-00004.bin",
|
171 |
+
"model.layers.18.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00002-of-00004.bin",
|
172 |
+
"model.layers.18.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00003-of-00004.bin",
|
173 |
+
"model.layers.18.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00003-of-00004.bin",
|
174 |
+
"model.layers.18.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00003-of-00004.bin",
|
175 |
+
"model.layers.18.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00003-of-00004.bin",
|
176 |
+
"model.layers.18.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00003-of-00004.bin",
|
177 |
+
"model.layers.18.block_sparse_moe.gate.weight": "pytorch_model-00002-of-00004.bin",
|
178 |
+
"model.layers.18.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
179 |
+
"model.layers.18.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
180 |
+
"model.layers.18.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
|
181 |
+
"model.layers.18.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
|
182 |
+
"model.layers.18.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
|
183 |
+
"model.layers.18.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
|
184 |
+
"model.layers.19.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00003-of-00004.bin",
|
185 |
+
"model.layers.19.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00003-of-00004.bin",
|
186 |
+
"model.layers.19.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00003-of-00004.bin",
|
187 |
+
"model.layers.19.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00003-of-00004.bin",
|
188 |
+
"model.layers.19.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00003-of-00004.bin",
|
189 |
+
"model.layers.19.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00003-of-00004.bin",
|
190 |
+
"model.layers.19.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00003-of-00004.bin",
|
191 |
+
"model.layers.19.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00003-of-00004.bin",
|
192 |
+
"model.layers.19.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00003-of-00004.bin",
|
193 |
+
"model.layers.19.block_sparse_moe.gate.weight": "pytorch_model-00003-of-00004.bin",
|
194 |
+
"model.layers.19.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
195 |
+
"model.layers.19.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
196 |
+
"model.layers.19.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
|
197 |
+
"model.layers.19.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
|
198 |
+
"model.layers.19.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
|
199 |
+
"model.layers.19.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
|
200 |
+
"model.layers.2.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00001-of-00004.bin",
|
201 |
+
"model.layers.2.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00001-of-00004.bin",
|
202 |
+
"model.layers.2.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00001-of-00004.bin",
|
203 |
+
"model.layers.2.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00001-of-00004.bin",
|
204 |
+
"model.layers.2.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00001-of-00004.bin",
|
205 |
+
"model.layers.2.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00001-of-00004.bin",
|
206 |
+
"model.layers.2.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00001-of-00004.bin",
|
207 |
+
"model.layers.2.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00001-of-00004.bin",
|
208 |
+
"model.layers.2.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00001-of-00004.bin",
|
209 |
+
"model.layers.2.block_sparse_moe.gate.weight": "pytorch_model-00001-of-00004.bin",
|
210 |
+
"model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
211 |
+
"model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
212 |
+
"model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
|
213 |
+
"model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
|
214 |
+
"model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
|
215 |
+
"model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
|
216 |
+
"model.layers.20.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00003-of-00004.bin",
|
217 |
+
"model.layers.20.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00003-of-00004.bin",
|
218 |
+
"model.layers.20.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00003-of-00004.bin",
|
219 |
+
"model.layers.20.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00003-of-00004.bin",
|
220 |
+
"model.layers.20.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00003-of-00004.bin",
|
221 |
+
"model.layers.20.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00003-of-00004.bin",
|
222 |
+
"model.layers.20.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00003-of-00004.bin",
|
223 |
+
"model.layers.20.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00003-of-00004.bin",
|
224 |
+
"model.layers.20.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00003-of-00004.bin",
|
225 |
+
"model.layers.20.block_sparse_moe.gate.weight": "pytorch_model-00003-of-00004.bin",
|
226 |
+
"model.layers.20.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
227 |
+
"model.layers.20.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
228 |
+
"model.layers.20.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
|
229 |
+
"model.layers.20.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
|
230 |
+
"model.layers.20.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
|
231 |
+
"model.layers.20.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
|
232 |
+
"model.layers.21.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00003-of-00004.bin",
|
233 |
+
"model.layers.21.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00003-of-00004.bin",
|
234 |
+
"model.layers.21.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00003-of-00004.bin",
|
235 |
+
"model.layers.21.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00003-of-00004.bin",
|
236 |
+
"model.layers.21.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00003-of-00004.bin",
|
237 |
+
"model.layers.21.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00003-of-00004.bin",
|
238 |
+
"model.layers.21.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00003-of-00004.bin",
|
239 |
+
"model.layers.21.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00003-of-00004.bin",
|
240 |
+
"model.layers.21.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00003-of-00004.bin",
|
241 |
+
"model.layers.21.block_sparse_moe.gate.weight": "pytorch_model-00003-of-00004.bin",
|
242 |
+
"model.layers.21.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
243 |
+
"model.layers.21.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
244 |
+
"model.layers.21.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
|
245 |
+
"model.layers.21.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
|
246 |
+
"model.layers.21.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
|
247 |
+
"model.layers.21.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
|
248 |
+
"model.layers.22.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00003-of-00004.bin",
|
249 |
+
"model.layers.22.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00003-of-00004.bin",
|
250 |
+
"model.layers.22.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00003-of-00004.bin",
|
251 |
+
"model.layers.22.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00003-of-00004.bin",
|
252 |
+
"model.layers.22.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00003-of-00004.bin",
|
253 |
+
"model.layers.22.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00003-of-00004.bin",
|
254 |
+
"model.layers.22.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00003-of-00004.bin",
|
255 |
+
"model.layers.22.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00003-of-00004.bin",
|
256 |
+
"model.layers.22.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00003-of-00004.bin",
|
257 |
+
"model.layers.22.block_sparse_moe.gate.weight": "pytorch_model-00003-of-00004.bin",
|
258 |
+
"model.layers.22.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
259 |
+
"model.layers.22.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
260 |
+
"model.layers.22.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
|
261 |
+
"model.layers.22.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
|
262 |
+
"model.layers.22.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
|
263 |
+
"model.layers.22.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
|
264 |
+
"model.layers.23.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00003-of-00004.bin",
|
265 |
+
"model.layers.23.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00003-of-00004.bin",
|
266 |
+
"model.layers.23.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00003-of-00004.bin",
|
267 |
+
"model.layers.23.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00003-of-00004.bin",
|
268 |
+
"model.layers.23.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00003-of-00004.bin",
|
269 |
+
"model.layers.23.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00003-of-00004.bin",
|
270 |
+
"model.layers.23.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00003-of-00004.bin",
|
271 |
+
"model.layers.23.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00003-of-00004.bin",
|
272 |
+
"model.layers.23.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00003-of-00004.bin",
|
273 |
+
"model.layers.23.block_sparse_moe.gate.weight": "pytorch_model-00003-of-00004.bin",
|
274 |
+
"model.layers.23.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
275 |
+
"model.layers.23.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
276 |
+
"model.layers.23.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
|
277 |
+
"model.layers.23.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
|
278 |
+
"model.layers.23.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
|
279 |
+
"model.layers.23.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
|
280 |
+
"model.layers.24.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00003-of-00004.bin",
|
281 |
+
"model.layers.24.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00003-of-00004.bin",
|
282 |
+
"model.layers.24.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00003-of-00004.bin",
|
283 |
+
"model.layers.24.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00003-of-00004.bin",
|
284 |
+
"model.layers.24.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00003-of-00004.bin",
|
285 |
+
"model.layers.24.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00003-of-00004.bin",
|
286 |
+
"model.layers.24.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00003-of-00004.bin",
|
287 |
+
"model.layers.24.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00003-of-00004.bin",
|
288 |
+
"model.layers.24.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00003-of-00004.bin",
|
289 |
+
"model.layers.24.block_sparse_moe.gate.weight": "pytorch_model-00003-of-00004.bin",
|
290 |
+
"model.layers.24.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
291 |
+
"model.layers.24.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
292 |
+
"model.layers.24.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
|
293 |
+
"model.layers.24.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
|
294 |
+
"model.layers.24.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
|
295 |
+
"model.layers.24.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
|
296 |
+
"model.layers.25.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00003-of-00004.bin",
|
297 |
+
"model.layers.25.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00003-of-00004.bin",
|
298 |
+
"model.layers.25.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00003-of-00004.bin",
|
299 |
+
"model.layers.25.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00003-of-00004.bin",
|
300 |
+
"model.layers.25.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00003-of-00004.bin",
|
301 |
+
"model.layers.25.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00003-of-00004.bin",
|
302 |
+
"model.layers.25.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00003-of-00004.bin",
|
303 |
+
"model.layers.25.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00003-of-00004.bin",
|
304 |
+
"model.layers.25.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00003-of-00004.bin",
|
305 |
+
"model.layers.25.block_sparse_moe.gate.weight": "pytorch_model-00003-of-00004.bin",
|
306 |
+
"model.layers.25.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
307 |
+
"model.layers.25.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
308 |
+
"model.layers.25.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
|
309 |
+
"model.layers.25.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
|
310 |
+
"model.layers.25.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
|
311 |
+
"model.layers.25.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
|
312 |
+
"model.layers.26.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00003-of-00004.bin",
|
313 |
+
"model.layers.26.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00003-of-00004.bin",
|
314 |
+
"model.layers.26.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00003-of-00004.bin",
|
315 |
+
"model.layers.26.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00003-of-00004.bin",
|
316 |
+
"model.layers.26.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00003-of-00004.bin",
|
317 |
+
"model.layers.26.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00003-of-00004.bin",
|
318 |
+
"model.layers.26.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00003-of-00004.bin",
|
319 |
+
"model.layers.26.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00003-of-00004.bin",
|
320 |
+
"model.layers.26.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00003-of-00004.bin",
|
321 |
+
"model.layers.26.block_sparse_moe.gate.weight": "pytorch_model-00003-of-00004.bin",
|
322 |
+
"model.layers.26.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
323 |
+
"model.layers.26.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
|
324 |
+
"model.layers.26.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
|
325 |
+
"model.layers.26.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
|
326 |
+
"model.layers.26.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
|
327 |
+
"model.layers.26.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
|
328 |
+
"model.layers.27.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00003-of-00004.bin",
|
329 |
+
"model.layers.27.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00003-of-00004.bin",
|
330 |
+
"model.layers.27.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00003-of-00004.bin",
|
331 |
+
"model.layers.27.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00003-of-00004.bin",
|
332 |
+
"model.layers.27.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00003-of-00004.bin",
|
333 |
+
"model.layers.27.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00003-of-00004.bin",
|
334 |
+
"model.layers.27.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00003-of-00004.bin",
|
335 |
+
"model.layers.27.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00003-of-00004.bin",
|
336 |
+
"model.layers.27.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00004-of-00004.bin",
|
337 |
+
"model.layers.27.block_sparse_moe.gate.weight": "pytorch_model-00003-of-00004.bin",
|
338 |
+
"model.layers.27.input_layernorm.weight": "pytorch_model-00004-of-00004.bin",
|
339 |
+
"model.layers.27.post_attention_layernorm.weight": "pytorch_model-00004-of-00004.bin",
|
340 |
+
"model.layers.27.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
|
341 |
+
"model.layers.27.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
|
342 |
+
"model.layers.27.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
|
343 |
+
"model.layers.27.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
|
344 |
+
"model.layers.28.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00004-of-00004.bin",
|
345 |
+
"model.layers.28.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00004-of-00004.bin",
|
346 |
+
"model.layers.28.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00004-of-00004.bin",
|
347 |
+
"model.layers.28.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00004-of-00004.bin",
|
348 |
+
"model.layers.28.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00004-of-00004.bin",
|
349 |
+
"model.layers.28.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00004-of-00004.bin",
|
350 |
+
"model.layers.28.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00004-of-00004.bin",
|
351 |
+
"model.layers.28.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00004-of-00004.bin",
|
352 |
+
"model.layers.28.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00004-of-00004.bin",
|
353 |
+
"model.layers.28.block_sparse_moe.gate.weight": "pytorch_model-00004-of-00004.bin",
|
354 |
+
"model.layers.28.input_layernorm.weight": "pytorch_model-00004-of-00004.bin",
|
355 |
+
"model.layers.28.post_attention_layernorm.weight": "pytorch_model-00004-of-00004.bin",
|
356 |
+
"model.layers.28.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
|
357 |
+
"model.layers.28.self_attn.o_proj.weight": "pytorch_model-00004-of-00004.bin",
|
358 |
+
"model.layers.28.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
|
359 |
+
"model.layers.28.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
|
360 |
+
"model.layers.29.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00004-of-00004.bin",
|
361 |
+
"model.layers.29.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00004-of-00004.bin",
|
362 |
+
"model.layers.29.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00004-of-00004.bin",
|
363 |
+
"model.layers.29.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00004-of-00004.bin",
|
364 |
+
"model.layers.29.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00004-of-00004.bin",
|
365 |
+
"model.layers.29.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00004-of-00004.bin",
|
366 |
+
"model.layers.29.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00004-of-00004.bin",
|
367 |
+
"model.layers.29.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00004-of-00004.bin",
|
368 |
+
"model.layers.29.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00004-of-00004.bin",
|
369 |
+
"model.layers.29.block_sparse_moe.gate.weight": "pytorch_model-00004-of-00004.bin",
|
370 |
+
"model.layers.29.input_layernorm.weight": "pytorch_model-00004-of-00004.bin",
|
371 |
+
"model.layers.29.post_attention_layernorm.weight": "pytorch_model-00004-of-00004.bin",
|
372 |
+
"model.layers.29.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
|
373 |
+
"model.layers.29.self_attn.o_proj.weight": "pytorch_model-00004-of-00004.bin",
|
374 |
+
"model.layers.29.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
|
375 |
+
"model.layers.29.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
|
376 |
+
"model.layers.3.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00001-of-00004.bin",
|
377 |
+
"model.layers.3.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00001-of-00004.bin",
|
378 |
+
"model.layers.3.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00001-of-00004.bin",
|
379 |
+
"model.layers.3.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00001-of-00004.bin",
|
380 |
+
"model.layers.3.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00001-of-00004.bin",
|
381 |
+
"model.layers.3.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00001-of-00004.bin",
|
382 |
+
"model.layers.3.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00001-of-00004.bin",
|
383 |
+
"model.layers.3.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00001-of-00004.bin",
|
384 |
+
"model.layers.3.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00001-of-00004.bin",
|
385 |
+
"model.layers.3.block_sparse_moe.gate.weight": "pytorch_model-00001-of-00004.bin",
|
386 |
+
"model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
387 |
+
"model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
388 |
+
"model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
|
389 |
+
"model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
|
390 |
+
"model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
|
391 |
+
"model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
|
392 |
+
"model.layers.30.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00004-of-00004.bin",
|
393 |
+
"model.layers.30.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00004-of-00004.bin",
|
394 |
+
"model.layers.30.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00004-of-00004.bin",
|
395 |
+
"model.layers.30.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00004-of-00004.bin",
|
396 |
+
"model.layers.30.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00004-of-00004.bin",
|
397 |
+
"model.layers.30.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00004-of-00004.bin",
|
398 |
+
"model.layers.30.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00004-of-00004.bin",
|
399 |
+
"model.layers.30.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00004-of-00004.bin",
|
400 |
+
"model.layers.30.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00004-of-00004.bin",
|
401 |
+
"model.layers.30.block_sparse_moe.gate.weight": "pytorch_model-00004-of-00004.bin",
|
402 |
+
"model.layers.30.input_layernorm.weight": "pytorch_model-00004-of-00004.bin",
|
403 |
+
"model.layers.30.post_attention_layernorm.weight": "pytorch_model-00004-of-00004.bin",
|
404 |
+
"model.layers.30.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
|
405 |
+
"model.layers.30.self_attn.o_proj.weight": "pytorch_model-00004-of-00004.bin",
|
406 |
+
"model.layers.30.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
|
407 |
+
"model.layers.30.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
|
408 |
+
"model.layers.31.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00004-of-00004.bin",
|
409 |
+
"model.layers.31.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00004-of-00004.bin",
|
410 |
+
"model.layers.31.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00004-of-00004.bin",
|
411 |
+
"model.layers.31.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00004-of-00004.bin",
|
412 |
+
"model.layers.31.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00004-of-00004.bin",
|
413 |
+
"model.layers.31.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00004-of-00004.bin",
|
414 |
+
"model.layers.31.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00004-of-00004.bin",
|
415 |
+
"model.layers.31.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00004-of-00004.bin",
|
416 |
+
"model.layers.31.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00004-of-00004.bin",
|
417 |
+
"model.layers.31.block_sparse_moe.gate.weight": "pytorch_model-00004-of-00004.bin",
|
418 |
+
"model.layers.31.input_layernorm.weight": "pytorch_model-00004-of-00004.bin",
|
419 |
+
"model.layers.31.post_attention_layernorm.weight": "pytorch_model-00004-of-00004.bin",
|
420 |
+
"model.layers.31.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
|
421 |
+
"model.layers.31.self_attn.o_proj.weight": "pytorch_model-00004-of-00004.bin",
|
422 |
+
"model.layers.31.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
|
423 |
+
"model.layers.31.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
|
424 |
+
"model.layers.4.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00001-of-00004.bin",
|
425 |
+
"model.layers.4.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00001-of-00004.bin",
|
426 |
+
"model.layers.4.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00001-of-00004.bin",
|
427 |
+
"model.layers.4.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00001-of-00004.bin",
|
428 |
+
"model.layers.4.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00001-of-00004.bin",
|
429 |
+
"model.layers.4.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00001-of-00004.bin",
|
430 |
+
"model.layers.4.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00001-of-00004.bin",
|
431 |
+
"model.layers.4.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00001-of-00004.bin",
|
432 |
+
"model.layers.4.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00001-of-00004.bin",
|
433 |
+
"model.layers.4.block_sparse_moe.gate.weight": "pytorch_model-00001-of-00004.bin",
|
434 |
+
"model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
435 |
+
"model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
436 |
+
"model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
|
437 |
+
"model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
|
438 |
+
"model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
|
439 |
+
"model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
|
440 |
+
"model.layers.5.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00001-of-00004.bin",
|
441 |
+
"model.layers.5.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00001-of-00004.bin",
|
442 |
+
"model.layers.5.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00001-of-00004.bin",
|
443 |
+
"model.layers.5.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00001-of-00004.bin",
|
444 |
+
"model.layers.5.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00001-of-00004.bin",
|
445 |
+
"model.layers.5.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00001-of-00004.bin",
|
446 |
+
"model.layers.5.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00001-of-00004.bin",
|
447 |
+
"model.layers.5.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00001-of-00004.bin",
|
448 |
+
"model.layers.5.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00001-of-00004.bin",
|
449 |
+
"model.layers.5.block_sparse_moe.gate.weight": "pytorch_model-00001-of-00004.bin",
|
450 |
+
"model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
451 |
+
"model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
452 |
+
"model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
|
453 |
+
"model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
|
454 |
+
"model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
|
455 |
+
"model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
|
456 |
+
"model.layers.6.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00001-of-00004.bin",
|
457 |
+
"model.layers.6.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00001-of-00004.bin",
|
458 |
+
"model.layers.6.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00001-of-00004.bin",
|
459 |
+
"model.layers.6.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00001-of-00004.bin",
|
460 |
+
"model.layers.6.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00001-of-00004.bin",
|
461 |
+
"model.layers.6.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00001-of-00004.bin",
|
462 |
+
"model.layers.6.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00001-of-00004.bin",
|
463 |
+
"model.layers.6.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00001-of-00004.bin",
|
464 |
+
"model.layers.6.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00001-of-00004.bin",
|
465 |
+
"model.layers.6.block_sparse_moe.gate.weight": "pytorch_model-00001-of-00004.bin",
|
466 |
+
"model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
467 |
+
"model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
468 |
+
"model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
|
469 |
+
"model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
|
470 |
+
"model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
|
471 |
+
"model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
|
472 |
+
"model.layers.7.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00001-of-00004.bin",
|
473 |
+
"model.layers.7.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00001-of-00004.bin",
|
474 |
+
"model.layers.7.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00001-of-00004.bin",
|
475 |
+
"model.layers.7.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00001-of-00004.bin",
|
476 |
+
"model.layers.7.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00001-of-00004.bin",
|
477 |
+
"model.layers.7.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00001-of-00004.bin",
|
478 |
+
"model.layers.7.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00001-of-00004.bin",
|
479 |
+
"model.layers.7.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00001-of-00004.bin",
|
480 |
+
"model.layers.7.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00001-of-00004.bin",
|
481 |
+
"model.layers.7.block_sparse_moe.gate.weight": "pytorch_model-00001-of-00004.bin",
|
482 |
+
"model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
483 |
+
"model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
484 |
+
"model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
|
485 |
+
"model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
|
486 |
+
"model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
|
487 |
+
"model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
|
488 |
+
"model.layers.8.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00001-of-00004.bin",
|
489 |
+
"model.layers.8.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00001-of-00004.bin",
|
490 |
+
"model.layers.8.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00001-of-00004.bin",
|
491 |
+
"model.layers.8.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00001-of-00004.bin",
|
492 |
+
"model.layers.8.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00001-of-00004.bin",
|
493 |
+
"model.layers.8.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00001-of-00004.bin",
|
494 |
+
"model.layers.8.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00001-of-00004.bin",
|
495 |
+
"model.layers.8.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00001-of-00004.bin",
|
496 |
+
"model.layers.8.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00001-of-00004.bin",
|
497 |
+
"model.layers.8.block_sparse_moe.gate.weight": "pytorch_model-00001-of-00004.bin",
|
498 |
+
"model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
499 |
+
"model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
|
500 |
+
"model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
|
501 |
+
"model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
|
502 |
+
"model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
|
503 |
+
"model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
|
504 |
+
"model.layers.9.block_sparse_moe.experts.0.w1.weight": "pytorch_model-00002-of-00004.bin",
|
505 |
+
"model.layers.9.block_sparse_moe.experts.0.w2.weight": "pytorch_model-00002-of-00004.bin",
|
506 |
+
"model.layers.9.block_sparse_moe.experts.0.w3.weight": "pytorch_model-00002-of-00004.bin",
|
507 |
+
"model.layers.9.block_sparse_moe.experts.1.w1.weight": "pytorch_model-00002-of-00004.bin",
|
508 |
+
"model.layers.9.block_sparse_moe.experts.1.w2.weight": "pytorch_model-00002-of-00004.bin",
|
509 |
+
"model.layers.9.block_sparse_moe.experts.1.w3.weight": "pytorch_model-00002-of-00004.bin",
|
510 |
+
"model.layers.9.block_sparse_moe.experts.2.w1.weight": "pytorch_model-00002-of-00004.bin",
|
511 |
+
"model.layers.9.block_sparse_moe.experts.2.w2.weight": "pytorch_model-00002-of-00004.bin",
|
512 |
+
"model.layers.9.block_sparse_moe.experts.2.w3.weight": "pytorch_model-00002-of-00004.bin",
|
513 |
+
"model.layers.9.block_sparse_moe.gate.weight": "pytorch_model-00002-of-00004.bin",
|
514 |
+
"model.layers.9.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
515 |
+
"model.layers.9.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
|
516 |
+
"model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
|
517 |
+
"model.layers.9.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
|
518 |
+
"model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
|
519 |
+
"model.layers.9.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
|
520 |
+
"model.norm.weight": "pytorch_model-00004-of-00004.bin"
|
521 |
+
}
|
522 |
+
}
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|/inst|>"
|
4 |
+
],
|
5 |
+
"bos_token": {
|
6 |
+
"content": "<s>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"eos_token": {
|
13 |
+
"content": "<|end|>",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": false,
|
16 |
+
"rstrip": true,
|
17 |
+
"single_word": false
|
18 |
+
},
|
19 |
+
"pad_token": {
|
20 |
+
"content": "<s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false
|
25 |
+
},
|
26 |
+
"unk_token": {
|
27 |
+
"content": "<unk>",
|
28 |
+
"lstrip": false,
|
29 |
+
"normalized": false,
|
30 |
+
"rstrip": false,
|
31 |
+
"single_word": false
|
32 |
+
}
|
33 |
+
}
|
The diff for this file is too large to render.
See raw diff
|
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
3 |
+
size 499723
|
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"add_prefix_space": null,
|
5 |
+
"added_tokens_decoder": {
|
6 |
+
"0": {
|
7 |
+
"content": "<unk>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false,
|
12 |
+
"special": true
|
13 |
+
},
|
14 |
+
"1": {
|
15 |
+
"content": "<s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false,
|
20 |
+
"special": true
|
21 |
+
},
|
22 |
+
"2": {
|
23 |
+
"content": "</s>",
|
24 |
+
"lstrip": false,
|
25 |
+
"normalized": false,
|
26 |
+
"rstrip": true,
|
27 |
+
"single_word": false,
|
28 |
+
"special": false
|
29 |
+
},
|
30 |
+
"32000": {
|
31 |
+
"content": "<|endoftext|>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false,
|
36 |
+
"special": true
|
37 |
+
},
|
38 |
+
"32001": {
|
39 |
+
"content": "<|assistant|>",
|
40 |
+
"lstrip": false,
|
41 |
+
"normalized": false,
|
42 |
+
"rstrip": true,
|
43 |
+
"single_word": false,
|
44 |
+
"special": true
|
45 |
+
},
|
46 |
+
"32002": {
|
47 |
+
"content": "<|step|>",
|
48 |
+
"lstrip": false,
|
49 |
+
"normalized": false,
|
50 |
+
"rstrip": true,
|
51 |
+
"single_word": false,
|
52 |
+
"special": true
|
53 |
+
},
|
54 |
+
"32003": {
|
55 |
+
"content": "<|function_output|>",
|
56 |
+
"lstrip": false,
|
57 |
+
"normalized": false,
|
58 |
+
"rstrip": true,
|
59 |
+
"single_word": false,
|
60 |
+
"special": true
|
61 |
+
},
|
62 |
+
"32004": {
|
63 |
+
"content": "<|tag|>",
|
64 |
+
"lstrip": false,
|
65 |
+
"normalized": false,
|
66 |
+
"rstrip": true,
|
67 |
+
"single_word": false,
|
68 |
+
"special": true
|
69 |
+
},
|
70 |
+
"32005": {
|
71 |
+
"content": "<|function_call|>",
|
72 |
+
"lstrip": false,
|
73 |
+
"normalized": false,
|
74 |
+
"rstrip": true,
|
75 |
+
"single_word": false,
|
76 |
+
"special": true
|
77 |
+
},
|
78 |
+
"32006": {
|
79 |
+
"content": "<|system|>",
|
80 |
+
"lstrip": false,
|
81 |
+
"normalized": false,
|
82 |
+
"rstrip": true,
|
83 |
+
"single_word": false,
|
84 |
+
"special": true
|
85 |
+
},
|
86 |
+
"32007": {
|
87 |
+
"content": "<|end|>",
|
88 |
+
"lstrip": false,
|
89 |
+
"normalized": false,
|
90 |
+
"rstrip": true,
|
91 |
+
"single_word": false,
|
92 |
+
"special": true
|
93 |
+
},
|
94 |
+
"32008": {
|
95 |
+
"content": "<|raw|>",
|
96 |
+
"lstrip": false,
|
97 |
+
"normalized": false,
|
98 |
+
"rstrip": true,
|
99 |
+
"single_word": false,
|
100 |
+
"special": true
|
101 |
+
},
|
102 |
+
"32009": {
|
103 |
+
"content": "<|continue|>",
|
104 |
+
"lstrip": false,
|
105 |
+
"normalized": false,
|
106 |
+
"rstrip": true,
|
107 |
+
"single_word": false,
|
108 |
+
"special": true
|
109 |
+
},
|
110 |
+
"32010": {
|
111 |
+
"content": "<|user|>",
|
112 |
+
"lstrip": false,
|
113 |
+
"normalized": false,
|
114 |
+
"rstrip": true,
|
115 |
+
"single_word": false,
|
116 |
+
"special": true
|
117 |
+
},
|
118 |
+
"32011": {
|
119 |
+
"content": "<|function_list|>",
|
120 |
+
"lstrip": false,
|
121 |
+
"normalized": false,
|
122 |
+
"rstrip": true,
|
123 |
+
"single_word": false,
|
124 |
+
"special": true
|
125 |
+
},
|
126 |
+
"32012": {
|
127 |
+
"content": "<|calc|>",
|
128 |
+
"lstrip": false,
|
129 |
+
"normalized": false,
|
130 |
+
"rstrip": true,
|
131 |
+
"single_word": false,
|
132 |
+
"special": true
|
133 |
+
},
|
134 |
+
"32013": {
|
135 |
+
"content": "<|code|>",
|
136 |
+
"lstrip": false,
|
137 |
+
"normalized": false,
|
138 |
+
"rstrip": true,
|
139 |
+
"single_word": false,
|
140 |
+
"special": true
|
141 |
+
},
|
142 |
+
"32014": {
|
143 |
+
"content": "<|/code|>",
|
144 |
+
"lstrip": false,
|
145 |
+
"normalized": false,
|
146 |
+
"rstrip": true,
|
147 |
+
"single_word": false,
|
148 |
+
"special": true
|
149 |
+
},
|
150 |
+
"32015": {
|
151 |
+
"content": "<|summary|>",
|
152 |
+
"lstrip": false,
|
153 |
+
"normalized": false,
|
154 |
+
"rstrip": true,
|
155 |
+
"single_word": false,
|
156 |
+
"special": true
|
157 |
+
},
|
158 |
+
"32016": {
|
159 |
+
"content": "<|resource|>",
|
160 |
+
"lstrip": false,
|
161 |
+
"normalized": false,
|
162 |
+
"rstrip": true,
|
163 |
+
"single_word": false,
|
164 |
+
"special": true
|
165 |
+
},
|
166 |
+
"32017": {
|
167 |
+
"content": "<|assistant_mask|>",
|
168 |
+
"lstrip": false,
|
169 |
+
"normalized": false,
|
170 |
+
"rstrip": true,
|
171 |
+
"single_word": false,
|
172 |
+
"special": true
|
173 |
+
},
|
174 |
+
"32018": {
|
175 |
+
"content": "<|start|>",
|
176 |
+
"lstrip": false,
|
177 |
+
"normalized": false,
|
178 |
+
"rstrip": true,
|
179 |
+
"single_word": false,
|
180 |
+
"special": true
|
181 |
+
},
|
182 |
+
"32019": {
|
183 |
+
"content": "<|message|>",
|
184 |
+
"lstrip": false,
|
185 |
+
"normalized": false,
|
186 |
+
"rstrip": true,
|
187 |
+
"single_word": false,
|
188 |
+
"special": true
|
189 |
+
},
|
190 |
+
"32020": {
|
191 |
+
"content": "<|fim_prefix|>",
|
192 |
+
"lstrip": false,
|
193 |
+
"normalized": false,
|
194 |
+
"rstrip": true,
|
195 |
+
"single_word": false,
|
196 |
+
"special": true
|
197 |
+
},
|
198 |
+
"32021": {
|
199 |
+
"content": "<|fim_middle|>",
|
200 |
+
"lstrip": false,
|
201 |
+
"normalized": false,
|
202 |
+
"rstrip": true,
|
203 |
+
"single_word": false,
|
204 |
+
"special": true
|
205 |
+
},
|
206 |
+
"32022": {
|
207 |
+
"content": "<|fim_suffix|>",
|
208 |
+
"lstrip": false,
|
209 |
+
"normalized": false,
|
210 |
+
"rstrip": true,
|
211 |
+
"single_word": false,
|
212 |
+
"special": true
|
213 |
+
},
|
214 |
+
"32023": {
|
215 |
+
"content": "<|meta_start|>",
|
216 |
+
"lstrip": false,
|
217 |
+
"normalized": false,
|
218 |
+
"rstrip": true,
|
219 |
+
"single_word": false,
|
220 |
+
"special": true
|
221 |
+
},
|
222 |
+
"32024": {
|
223 |
+
"content": "<|ipynb_marker|>",
|
224 |
+
"lstrip": false,
|
225 |
+
"normalized": false,
|
226 |
+
"rstrip": true,
|
227 |
+
"single_word": false,
|
228 |
+
"special": true
|
229 |
+
},
|
230 |
+
"32025": {
|
231 |
+
"content": "<|diff_marker|>",
|
232 |
+
"lstrip": false,
|
233 |
+
"normalized": false,
|
234 |
+
"rstrip": true,
|
235 |
+
"single_word": false,
|
236 |
+
"special": true
|
237 |
+
},
|
238 |
+
"32026": {
|
239 |
+
"content": "<|ghissue|>",
|
240 |
+
"lstrip": false,
|
241 |
+
"normalized": false,
|
242 |
+
"rstrip": true,
|
243 |
+
"single_word": false,
|
244 |
+
"special": true
|
245 |
+
},
|
246 |
+
"32027": {
|
247 |
+
"content": "<|ghreview|>",
|
248 |
+
"lstrip": false,
|
249 |
+
"normalized": false,
|
250 |
+
"rstrip": true,
|
251 |
+
"single_word": false,
|
252 |
+
"special": true
|
253 |
+
},
|
254 |
+
"32028": {
|
255 |
+
"content": "<|disc_start|>",
|
256 |
+
"lstrip": false,
|
257 |
+
"normalized": false,
|
258 |
+
"rstrip": true,
|
259 |
+
"single_word": false,
|
260 |
+
"special": true
|
261 |
+
},
|
262 |
+
"32029": {
|
263 |
+
"content": "<|disc_sep|>",
|
264 |
+
"lstrip": false,
|
265 |
+
"normalized": false,
|
266 |
+
"rstrip": true,
|
267 |
+
"single_word": false,
|
268 |
+
"special": true
|
269 |
+
},
|
270 |
+
"32030": {
|
271 |
+
"content": "<|disc_thread|><|query|>",
|
272 |
+
"lstrip": false,
|
273 |
+
"normalized": false,
|
274 |
+
"rstrip": true,
|
275 |
+
"single_word": false,
|
276 |
+
"special": true
|
277 |
+
},
|
278 |
+
"32031": {
|
279 |
+
"content": "<|/query|>",
|
280 |
+
"lstrip": false,
|
281 |
+
"normalized": false,
|
282 |
+
"rstrip": true,
|
283 |
+
"single_word": false,
|
284 |
+
"special": true
|
285 |
+
},
|
286 |
+
"32032": {
|
287 |
+
"content": "<|data|>",
|
288 |
+
"lstrip": false,
|
289 |
+
"normalized": false,
|
290 |
+
"rstrip": true,
|
291 |
+
"single_word": false,
|
292 |
+
"special": true
|
293 |
+
},
|
294 |
+
"32033": {
|
295 |
+
"content": "<|/data|>",
|
296 |
+
"lstrip": false,
|
297 |
+
"normalized": false,
|
298 |
+
"rstrip": true,
|
299 |
+
"single_word": false,
|
300 |
+
"special": true
|
301 |
+
},
|
302 |
+
"32034": {
|
303 |
+
"content": "<|sys|>",
|
304 |
+
"lstrip": false,
|
305 |
+
"normalized": false,
|
306 |
+
"rstrip": true,
|
307 |
+
"single_word": false,
|
308 |
+
"special": true
|
309 |
+
},
|
310 |
+
"32035": {
|
311 |
+
"content": "<|/sys|>",
|
312 |
+
"lstrip": false,
|
313 |
+
"normalized": false,
|
314 |
+
"rstrip": true,
|
315 |
+
"single_word": false,
|
316 |
+
"special": true
|
317 |
+
},
|
318 |
+
"32036": {
|
319 |
+
"content": "<|inst|>",
|
320 |
+
"lstrip": false,
|
321 |
+
"normalized": false,
|
322 |
+
"rstrip": true,
|
323 |
+
"single_word": false,
|
324 |
+
"special": true
|
325 |
+
},
|
326 |
+
"32037": {
|
327 |
+
"content": "<|/inst|>",
|
328 |
+
"lstrip": false,
|
329 |
+
"normalized": false,
|
330 |
+
"rstrip": true,
|
331 |
+
"single_word": false,
|
332 |
+
"special": true
|
333 |
+
}
|
334 |
+
},
|
335 |
+
"additional_special_tokens": [
|
336 |
+
"<|/inst|>"
|
337 |
+
],
|
338 |
+
"bos_token": "<s>",
|
339 |
+
"chat_template": "{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
|
340 |
+
"clean_up_tokenization_spaces": false,
|
341 |
+
"eos_token": "<|end|>",
|
342 |
+
"legacy": false,
|
343 |
+
"model_max_length": 4096,
|
344 |
+
"pad_token": "<s>",
|
345 |
+
"padding_side": "left",
|
346 |
+
"sp_model_kwargs": {},
|
347 |
+
"tokenizer_class": "LlamaTokenizer",
|
348 |
+
"unk_token": "<unk>",
|
349 |
+
"use_default_system_prompt": false
|
350 |
+
}
|