{ | |
"_name_or_path": "openai/clip-vit-large-patch14-336", | |
"architectures": [ | |
"CLIPForImageClassification" | |
], | |
"id2label": { | |
"0": "0", | |
"1": "1", | |
"2": "2", | |
"3": "3", | |
"4": "4", | |
"5": "5", | |
"6": "6", | |
"7": "7", | |
"8": "8", | |
"9": "9" | |
}, | |
"initializer_factor": 1.0, | |
"label2id": { | |
"0": 0, | |
"1": 1, | |
"2": 2, | |
"3": 3, | |
"4": 4, | |
"5": 5, | |
"6": 6, | |
"7": 7, | |
"8": 8, | |
"9": 9 | |
}, | |
"logit_scale_init_value": 2.6592, | |
"model_type": "clip", | |
"problem_type": "single_label_classification", | |
"projection_dim": 768, | |
"text_config": { | |
"dropout": 0.0, | |
"hidden_size": 768, | |
"intermediate_size": 3072, | |
"model_type": "clip_text_model", | |
"num_attention_heads": 12, | |
"projection_dim": 768 | |
}, | |
"torch_dtype": "float32", | |
"transformers_version": "4.44.2", | |
"vision_config": { | |
"dropout": 0.0, | |
"hidden_size": 1024, | |
"image_size": 336, | |
"intermediate_size": 4096, | |
"model_type": "clip_vision_model", | |
"num_attention_heads": 16, | |
"num_hidden_layers": 24, | |
"patch_size": 14, | |
"projection_dim": 768 | |
} | |
} | |