|
{ |
|
"metadata": { |
|
"total_size": 7429930432 |
|
}, |
|
"weight_map": { |
|
"text_model.lm_head.linear.bias": "model-00002-of-00002.safetensors", |
|
"text_model.lm_head.linear.weight": "model-00002-of-00002.safetensors", |
|
"text_model.lm_head.ln.bias": "model-00002-of-00002.safetensors", |
|
"text_model.lm_head.ln.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.embd.wte.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.0.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.0.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.0.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.0.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.0.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.0.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.0.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.0.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.0.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.0.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.1.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.1.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.1.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.1.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.1.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.1.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.1.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.1.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.1.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.1.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.10.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.10.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.10.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.10.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.10.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.10.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.10.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.10.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.10.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.10.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.11.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.11.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.11.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.11.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.11.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.11.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.11.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.11.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.11.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.11.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.12.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.12.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.12.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.12.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.12.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.12.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.12.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.12.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.12.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.12.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.13.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.13.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.13.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.13.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.13.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.13.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.13.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.13.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.13.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.13.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.14.ln.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.14.ln.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.14.mixer.Wqkv.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.14.mixer.Wqkv.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.14.mixer.out_proj.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.14.mixer.out_proj.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.14.mlp.fc1.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.14.mlp.fc1.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.14.mlp.fc2.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.14.mlp.fc2.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.15.ln.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.15.ln.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.15.mixer.Wqkv.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.15.mixer.Wqkv.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.15.mixer.out_proj.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.15.mixer.out_proj.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.15.mlp.fc1.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.15.mlp.fc1.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.15.mlp.fc2.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.15.mlp.fc2.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.16.ln.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.16.ln.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.16.mixer.Wqkv.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.16.mixer.Wqkv.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.16.mixer.out_proj.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.16.mixer.out_proj.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.16.mlp.fc1.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.16.mlp.fc1.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.16.mlp.fc2.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.16.mlp.fc2.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.17.ln.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.17.ln.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.17.mixer.Wqkv.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.17.mixer.Wqkv.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.17.mixer.out_proj.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.17.mixer.out_proj.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.17.mlp.fc1.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.17.mlp.fc1.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.17.mlp.fc2.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.17.mlp.fc2.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.18.ln.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.18.ln.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.18.mixer.Wqkv.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.18.mixer.Wqkv.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.18.mixer.out_proj.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.18.mixer.out_proj.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.18.mlp.fc1.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.18.mlp.fc1.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.18.mlp.fc2.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.18.mlp.fc2.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.19.ln.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.19.ln.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.19.mixer.Wqkv.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.19.mixer.Wqkv.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.19.mixer.out_proj.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.19.mixer.out_proj.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.19.mlp.fc1.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.19.mlp.fc1.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.19.mlp.fc2.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.19.mlp.fc2.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.2.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.2.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.2.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.2.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.2.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.2.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.2.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.2.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.2.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.2.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.20.ln.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.20.ln.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.20.mixer.Wqkv.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.20.mixer.Wqkv.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.20.mixer.out_proj.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.20.mixer.out_proj.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.20.mlp.fc1.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.20.mlp.fc1.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.20.mlp.fc2.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.20.mlp.fc2.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.21.ln.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.21.ln.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.21.mixer.Wqkv.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.21.mixer.Wqkv.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.21.mixer.out_proj.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.21.mixer.out_proj.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.21.mlp.fc1.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.21.mlp.fc1.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.21.mlp.fc2.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.21.mlp.fc2.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.22.ln.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.22.ln.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.22.mixer.Wqkv.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.22.mixer.Wqkv.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.22.mixer.out_proj.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.22.mixer.out_proj.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.22.mlp.fc1.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.22.mlp.fc1.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.22.mlp.fc2.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.22.mlp.fc2.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.23.ln.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.23.ln.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.23.mixer.Wqkv.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.23.mixer.Wqkv.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.23.mixer.out_proj.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.23.mixer.out_proj.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.23.mlp.fc1.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.23.mlp.fc1.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.23.mlp.fc2.bias": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.23.mlp.fc2.weight": "model-00002-of-00002.safetensors", |
|
"text_model.transformer.h.3.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.3.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.3.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.3.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.3.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.3.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.3.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.3.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.3.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.3.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.4.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.4.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.4.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.4.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.4.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.4.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.4.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.4.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.4.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.4.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.5.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.5.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.5.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.5.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.5.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.5.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.5.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.5.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.5.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.5.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.6.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.6.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.6.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.6.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.6.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.6.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.6.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.6.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.6.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.6.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.7.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.7.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.7.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.7.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.7.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.7.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.7.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.7.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.7.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.7.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.8.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.8.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.8.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.8.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.8.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.8.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.8.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.8.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.8.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.8.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.9.ln.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.9.ln.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.9.mixer.Wqkv.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.9.mixer.Wqkv.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.9.mixer.out_proj.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.9.mixer.out_proj.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.9.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.9.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.9.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"text_model.transformer.h.9.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.0.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.0.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.0.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.0.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.0.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.0.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.0.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.0.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.0.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.0.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.0.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.0.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.1.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.1.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.1.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.1.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.1.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.1.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.1.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.1.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.1.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.1.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.1.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.1.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.10.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.10.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.10.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.10.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.10.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.10.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.10.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.10.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.10.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.10.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.10.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.10.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.11.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.11.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.11.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.11.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.11.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.11.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.11.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.11.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.11.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.11.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.11.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.11.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.12.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.12.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.12.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.12.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.12.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.12.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.12.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.12.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.12.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.12.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.12.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.12.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.13.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.13.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.13.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.13.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.13.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.13.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.13.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.13.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.13.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.13.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.13.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.13.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.14.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.14.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.14.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.14.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.14.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.14.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.14.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.14.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.14.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.14.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.14.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.14.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.15.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.15.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.15.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.15.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.15.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.15.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.15.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.15.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.15.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.15.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.15.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.15.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.16.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.16.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.16.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.16.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.16.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.16.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.16.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.16.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.16.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.16.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.16.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.16.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.17.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.17.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.17.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.17.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.17.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.17.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.17.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.17.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.17.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.17.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.17.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.17.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.18.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.18.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.18.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.18.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.18.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.18.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.18.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.18.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.18.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.18.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.18.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.18.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.19.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.19.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.19.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.19.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.19.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.19.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.19.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.19.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.19.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.19.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.19.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.19.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.2.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.2.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.2.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.2.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.2.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.2.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.2.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.2.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.2.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.2.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.2.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.2.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.20.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.20.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.20.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.20.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.20.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.20.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.20.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.20.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.20.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.20.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.20.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.20.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.21.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.21.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.21.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.21.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.21.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.21.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.21.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.21.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.21.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.21.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.21.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.21.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.22.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.22.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.22.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.22.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.22.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.22.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.22.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.22.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.22.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.22.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.22.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.22.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.23.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.23.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.23.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.23.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.23.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.23.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.23.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.23.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.23.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.23.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.23.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.23.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.24.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.24.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.24.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.24.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.24.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.24.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.24.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.24.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.24.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.24.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.24.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.24.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.25.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.25.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.25.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.25.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.25.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.25.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.25.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.25.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.25.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.25.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.25.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.25.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.26.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.26.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.26.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.26.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.26.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.26.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.26.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.26.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.26.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.26.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.26.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.26.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.3.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.3.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.3.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.3.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.3.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.3.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.3.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.3.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.3.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.3.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.3.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.3.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.4.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.4.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.4.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.4.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.4.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.4.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.4.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.4.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.4.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.4.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.4.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.4.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.5.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.5.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.5.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.5.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.5.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.5.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.5.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.5.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.5.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.5.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.5.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.5.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.6.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.6.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.6.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.6.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.6.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.6.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.6.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.6.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.6.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.6.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.6.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.6.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.7.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.7.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.7.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.7.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.7.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.7.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.7.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.7.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.7.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.7.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.7.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.7.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.8.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.8.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.8.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.8.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.8.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.8.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.8.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.8.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.8.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.8.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.8.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.8.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.9.attn.proj.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.9.attn.proj.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.9.attn.qkv.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.9.attn.qkv.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.9.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.9.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.9.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.9.mlp.fc2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.9.norm1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.9.norm1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.9.norm2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.blocks.9.norm2.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.norm.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.norm.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.patch_embed.linear.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.patch_embed.linear.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.encoder.model.visual.pos_embed": "model-00001-of-00002.safetensors", |
|
"vision_encoder.projection.mlp.fc1.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.projection.mlp.fc1.weight": "model-00001-of-00002.safetensors", |
|
"vision_encoder.projection.mlp.fc2.bias": "model-00001-of-00002.safetensors", |
|
"vision_encoder.projection.mlp.fc2.weight": "model-00001-of-00002.safetensors" |
|
} |
|
} |
|
|