{ "_name_or_path": "/workspace/Qwen_Qwen-7B", "activation": "swiglu", "apply_residual_connection_post_layernorm": false, "architectures": [ "QWenLMHeadModel" ], "attn_pdrop": 0.0, "auto_map": { "AutoConfig": "configuration_qwen.QWenConfig", "AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel" }, "bf16": true, "bias_dropout_fusion": true, "bos_token_id": 151643, "embd_pdrop": 0.0, "eos_token_id": 151643, "ffn_hidden_size": 22016, "fp16": false, "fp32": false, "initializer_range": 0.02, "kv_channels": 128, "layer_norm_epsilon": 1e-06, "model_type": "qwen", "n_embd": 4096, "n_head": 32, "n_inner": null, "n_layer": 32, "n_positions": 6144, "no_bias": true, "onnx_safe": null, "padded_vocab_size": 151936, "params_dtype": "torch.bfloat16", "pos_emb": "rotary", "resid_pdrop": 0.1, "rotary_emb_base": 10000, "rotary_pct": 1.0, "scale_attn_weights": true, "seq_length": 2048, "tie_word_embeddings": false, "tokenizer_type": "QWenTokenizer", "torch_dtype": "float16", "transformers_version": "4.31.0", "use_cache": true, "use_dynamic_ntk": true, "use_flash_attn": true, "use_logn_attn": true, "vocab_size": 151936 }