{ "_name_or_path": "./", "architectures": [ "QWenLMHeadModel" ], "auto_map": { "AutoConfig": "configuration_qwen.QWenConfig", "AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel" }, "attn_dropout_prob": 0.0, "bf16": false, "emb_dropout_prob": 0.0, "fp16": true, "fp32": false, "hidden_size": 8, "initializer_range": 0.02, "intermediate_size": 16, "kv_channels": 4, "layer_norm_epsilon": 1e-06, "max_position_embeddings": 8192, "model_type": "qwen", "no_bias": true, "num_attention_heads": 2, "num_hidden_layers": 2, "rotary_emb_base": 10000, "rotary_pct": 1.0, "scale_attn_weights": true, "seq_length": 2048, "tie_word_embeddings": false, "torch_dtype": "float16", "transformers_version": "4.35.2", "use_cache": true, "use_dynamic_ntk": true, "use_flash_attn": "auto", "use_logn_attn": true, "visual": { "heads": 2, "image_size": 448, "image_start_id": 151857, "layers": 2, "mlp_ratio": 1.0, "output_dim": 8, "patch_size": 14, "width": 8 }, "vocab_size": 151936 }