evo-1-8k-base / config.json
pragaash's picture
Fix auto tokenizer relative import in auto map as a list for slow and fast.
77b76cb verified
raw
history blame
1.87 kB
{
"_commit_hash": "1cc23830f62c268082475776fb449af8428eb703",
"_name_or_path": "togethercomputer/evo-1-131k-base",
"architectures": [
"StripedHyenaModelForCausalLM"
],
"attn_layer_idxs": [
8,
16,
24
],
"auto_map": {
"AutoConfig": "togethercomputer/evo-1-131k-base--configuration_hyena.StripedHyenaConfig",
"AutoModelForCausalLM": "togethercomputer/evo-1-131k-base--modeling_hyena.StripedHyenaModelForCausalLM",
"AutoTokenizer": [
"togethercomputer/evo-1-131k-base--tokenizer.ByteTokenizer",
null
]
},
"column_split": false,
"column_split_hyena": true,
"eps": 1e-06,
"final_norm": true,
"hidden_size": 4096,
"hyena_filter_groups": 1,
"hyena_layer_idxs": [
0,
1,
2,
3,
4,
5,
6,
7,
9,
10,
11,
12,
13,
14,
15,
17,
18,
19,
20,
21,
22,
23,
25,
26,
27,
28,
29,
30,
31
],
"inference_mode": false,
"inner_mlp_size": 10928,
"log_intermediate_values": false,
"make_vocab_size_divisible_by": 8,
"max_seqlen": 8192,
"mha_out_proj_bias": true,
"mlp_activation": "gelu",
"model_parallel_size": 1,
"model_type": "stripedhyena",
"num_attention_heads": 32,
"num_filters": 4096,
"num_layers": 32,
"pipe_parallel_size": 1,
"prefill_style": "fft",
"proj_groups": 1,
"qkv_proj_bias": true,
"rotary_emb_base": 10000,
"rotary_emb_scaling_factor": 1,
"short_filter_bias": true,
"short_filter_length": 3,
"smeared_gqa": false,
"split_k0": true,
"state_size": 8,
"tie_embeddings": true,
"torch_dtype": "bfloat16",
"transformers_version": null,
"use_cache": true,
"use_flash_attention_2": true,
"use_flash_depthwise": false,
"use_flash_rmsnorm": false,
"use_flashfft": false,
"use_interpolated_rotary_pos_emb": false,
"vocab_size": 512
}