dfdf commited on
Commit
2e6545c
1 Parent(s): 8ffc53e

Upload config.json

Browse files
Files changed (1) hide show
  1. config.json +34 -35
config.json CHANGED
@@ -1,40 +1,39 @@
1
  {
2
- "_name_or_path": "llava-v1.5-7b",
3
- "architectures": ["LlavaLlamaForCausalLM"],
4
- "bos_token_id": 1,
5
- "eos_token_id": 2,
6
- "freeze_mm_mlp_adapter": false,
7
- "freeze_mm_vision_resampler": false,
8
- "hidden_act": "silu",
9
- "hidden_size": 4096,
10
- "image_aspect_ratio": "pad",
11
- "initializer_range": 0.02,
12
- "intermediate_size": 11008,
13
- "max_length": 4096,
14
- "max_position_embeddings": 4096,
15
- "mm_hidden_size": 1024,
16
- "mm_projector_type": "mlp2x_gelu",
17
- "mm_resampler_type": null,
18
- "mm_use_im_patch_token": false,
19
- "mm_use_im_start_end": false,
20
- "mm_vision_select_feature": "patch",
21
- "mm_vision_select_layer": -2,
22
- "mm_vision_tower": "openai/clip-vit-large-patch14-336",
23
  "model_type": "llava",
24
- "num_attention_heads": 32,
25
- "num_hidden_layers": 32,
26
- "num_key_value_heads": 32,
27
- "pad_token_id": 0,
28
- "pretraining_tp": 1,
29
- "rms_norm_eps": 1e-5,
30
- "rope_scaling": null,
 
 
 
 
 
 
31
  "tie_word_embeddings": false,
32
  "torch_dtype": "float16",
33
- "transformers_version": "4.31.0",
34
- "tune_mm_mlp_adapter": false,
35
- "tune_mm_vision_resampler": false,
36
- "unfreeze_mm_vision_tower": false,
37
- "use_cache": true,
38
- "use_mm_proj": true,
39
- "vocab_size": 32000
 
 
 
 
 
 
 
 
40
  }
 
 
1
  {
2
+ "architectures": [
3
+ "LlavaForConditionalGeneration"
4
+ ],
5
+ "ignore_index": -100,
6
+ "image_token_index": 32000,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  "model_type": "llava",
8
+ "pad_token_id": 32001,
9
+ "projector_hidden_act": "gelu",
10
+ "text_config": {
11
+ "_name_or_path": "lmsys/vicuna-7b-v1.5",
12
+ "architectures": [
13
+ "LlamaForCausalLM"
14
+ ],
15
+ "max_position_embeddings": 4096,
16
+ "model_type": "llama",
17
+ "rms_norm_eps": 1e-05,
18
+ "torch_dtype": "float16",
19
+ "vocab_size": 32064
20
+ },
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "float16",
23
+ "transformers_version": "4.36.0.dev0",
24
+ "vision_config": {
25
+ "hidden_size": 1024,
26
+ "image_size": 336,
27
+ "intermediate_size": 4096,
28
+ "model_type": "clip_vision_model",
29
+ "num_attention_heads": 16,
30
+ "num_hidden_layers": 24,
31
+ "patch_size": 14,
32
+ "projection_dim": 768,
33
+ "vocab_size": 32000
34
+ },
35
+ "vision_feature_layer": -2,
36
+ "vision_feature_select_strategy": "default",
37
+ "vocab_size": 32064
38
  }
39
+