{ "architectures": [ "Blip2ForImageTextRetrieval" ], "image_text_hidden_size": 256, "initializer_factor": 1.0, "initializer_range": 0.02, "model_type": "blip-2", "num_query_tokens": 32, "qformer_config": { "model_type": "blip_2_qformer", "use_qformer_text_input": true, "vocab_size": 30523 }, "text_config": { "model_type": "opt" }, "torch_dtype": "float32", "transformers_version": "4.41.0.dev0", "use_decoder_only_language_model": true, "vision_config": { "model_type": "blip_2_vision_model" } }