llama2_swa / config.json
ahatamiz's picture
Upload HF_GPTForCausalLM
69f5b7b verified
raw
history blame
616 Bytes
{
"architectures": [
"HF_GPTForCausalLM"
],
"bos_token_id": 1,
"conv_size": 4,
"eos_token_id": 2,
"expand_k": 1.0,
"expand_v": 1.0,
"hidden_size": null,
"mode": "fused_chunk",
"model_type": "hf_gpt",
"name": "LLaMA_1.3B_SWA",
"norm_eps": 1e-05,
"norm_first": false,
"num_heads": 4,
"pad_token_id": 0,
"qk_activation": "silu",
"qk_norm": "l2",
"tie_word_embeddings": false,
"torch_dtype": "float32",
"transformers_version": "4.43.2",
"use_beta": true,
"use_cache": true,
"use_gate": false,
"use_output_norm": true,
"use_short_conv": true,
"vocab_size": 32000
}