|
{ |
|
"_name_or_path": "/data/vicuna/vicuna-13b-v1.5", |
|
"architectures": [ |
|
"MoDLLaVALlamaForCausalLM" |
|
], |
|
"bos_token_id": 1, |
|
"delay_load": false, |
|
"eos_token_id": 2, |
|
"freeze_mm_mlp_adapter": false, |
|
"freeze_vision": false, |
|
"hidden_act": "silu", |
|
"hidden_size": 5120, |
|
"image_aspect_ratio": "pad", |
|
"image_grid_pinpoints": null, |
|
"initializer_range": 0.02, |
|
"input_image_size": 1024, |
|
"intermediate_size": 13824, |
|
"is_multipath_encoder": true, |
|
"max_length": 4096, |
|
"max_position_embeddings": 4096, |
|
"mm_hidden_size": 1024, |
|
"mm_projector_type": "mlp2x_gelu", |
|
"mm_use_im_patch_token": false, |
|
"mm_use_im_start_end": false, |
|
"mm_vision_select_feature": "patch", |
|
"mm_vision_select_layer": -2, |
|
"mm_vision_tower": "openai/clip-vit-large-patch14-336", |
|
"mod": { |
|
"capacity_factor": 0.5, |
|
"mod_enable": true, |
|
"mod_layers_idx": [ |
|
3, |
|
4, |
|
5, |
|
6, |
|
7, |
|
8, |
|
9, |
|
10, |
|
11, |
|
12, |
|
13, |
|
14, |
|
15, |
|
16, |
|
17, |
|
18, |
|
19, |
|
20, |
|
21, |
|
22, |
|
23, |
|
24, |
|
25, |
|
26, |
|
27, |
|
28, |
|
29, |
|
30, |
|
31, |
|
32, |
|
33, |
|
34, |
|
35, |
|
36, |
|
37, |
|
38 |
|
], |
|
"mod_mode": "arank_mod", |
|
"router_aux_loss_coef": 0.01 |
|
}, |
|
"model_type": "mod_llava_llama", |
|
"num_attention_heads": 40, |
|
"num_hidden_layers": 40, |
|
"num_key_value_heads": 40, |
|
"pad_token_id": 0, |
|
"pretraining_tp": 1, |
|
"rms_norm_eps": 1e-05, |
|
"rope_scaling": null, |
|
"tie_word_embeddings": false, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.31.0", |
|
"tune_mm_mlp_adapter": false, |
|
"use_cache": true, |
|
"use_mm_proj": true, |
|
"vision_tower_slow": "convnext_large_mlp.clip_laion2b_ft_320", |
|
"vocab_size": 32000 |
|
} |
|
|