Upload folder using huggingface_hub
Browse files- config.json +1 -0
- model.layers.24/cfg.json +1 -0
- model.layers.24/sae.safetensors +3 -0
config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"sae": {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 131072, "k": 256, "multi_topk": false}, "batch_size": 1, "grad_acc_steps": 4, "micro_acc_steps": 1, "lr": null, "lr_warmup_steps": 1000, "auxk_alpha": 0.0, "dead_feature_threshold": 10000000, "hookpoints": ["model.layers.24"], "layers": [24], "layer_stride": 1, "distribute_modules": false, "save_every": 1000, "log_to_wandb": true, "run_name": "llava-hf_llama3-llava-next-8b-hf-lmms-lab_LLaVA-NeXT-Data-sae", "wandb_log_frequency": 1, "mm_data": true, "model": "llava-hf/llama3-llava-next-8b-hf", "dataset": "lmms-lab/LLaVA-NeXT-Data", "split": "train", "ctx_len": 2048, "hf_token": null, "load_in_8bit": false, "max_examples": null, "resume": false, "seed": 42, "data_preprocessing_num_proc": 32}
|
model.layers.24/cfg.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"expansion_factor": 32, "normalize_decoder": true, "num_latents": 131072, "k": 256, "multi_topk": false, "d_in": 4096}
|
model.layers.24/sae.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4fb9a5215dc8df502575fde5eb5e903c08e97683610525acb590f2a70fd4af31
|
3 |
+
size 4295508312
|