ghidav commited on
Commit
49befce
1 Parent(s): fbff050

Delete backwards_TL

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. backwards_TL/22ninmut/100003840/cfg.json +0 -1
  2. backwards_TL/22ninmut/100003840/sae_weights.safetensors +0 -3
  3. backwards_TL/22ninmut/100003840/sparsity.safetensors +0 -3
  4. backwards_TL/22ninmut/200003584/cfg.json +0 -1
  5. backwards_TL/22ninmut/200003584/sae_weights.safetensors +0 -3
  6. backwards_TL/22ninmut/200003584/sparsity.safetensors +0 -3
  7. backwards_TL/22ninmut/300003328/cfg.json +0 -1
  8. backwards_TL/22ninmut/300003328/sae_weights.safetensors +0 -3
  9. backwards_TL/22ninmut/300003328/sparsity.safetensors +0 -3
  10. backwards_TL/22ninmut/400003072/cfg.json +0 -1
  11. backwards_TL/22ninmut/400003072/sae_weights.safetensors +0 -3
  12. backwards_TL/22ninmut/400003072/sparsity.safetensors +0 -3
  13. backwards_TL/22ninmut/final_500002816/.cfg.json.swp +0 -0
  14. backwards_TL/22ninmut/final_500002816/cfg.json +0 -1
  15. backwards_TL/22ninmut/final_500002816/sae_weights.safetensors +0 -3
  16. backwards_TL/22ninmut/final_500002816/sparsity.safetensors +0 -3
  17. backwards_TL/7oj8g6c4/100003840/cfg.json +0 -1
  18. backwards_TL/7oj8g6c4/100003840/sae_weights.safetensors +0 -3
  19. backwards_TL/7oj8g6c4/100003840/sparsity.safetensors +0 -3
  20. backwards_TL/7oj8g6c4/200003584/cfg.json +0 -1
  21. backwards_TL/7oj8g6c4/200003584/sae_weights.safetensors +0 -3
  22. backwards_TL/7oj8g6c4/200003584/sparsity.safetensors +0 -3
  23. backwards_TL/7oj8g6c4/300003328/cfg.json +0 -1
  24. backwards_TL/7oj8g6c4/300003328/sae_weights.safetensors +0 -3
  25. backwards_TL/7oj8g6c4/300003328/sparsity.safetensors +0 -3
  26. backwards_TL/7oj8g6c4/400003072/cfg.json +0 -1
  27. backwards_TL/7oj8g6c4/400003072/sae_weights.safetensors +0 -3
  28. backwards_TL/7oj8g6c4/400003072/sparsity.safetensors +0 -3
  29. backwards_TL/7oj8g6c4/final_500002816/cfg.json +0 -1
  30. backwards_TL/7oj8g6c4/final_500002816/sae_weights.safetensors +0 -3
  31. backwards_TL/7oj8g6c4/final_500002816/sparsity.safetensors +0 -3
  32. backwards_TL/djw9vsut/100003840/cfg.json +0 -1
  33. backwards_TL/djw9vsut/100003840/sae_weights.safetensors +0 -3
  34. backwards_TL/djw9vsut/100003840/sparsity.safetensors +0 -3
  35. backwards_TL/djw9vsut/200003584/cfg.json +0 -1
  36. backwards_TL/djw9vsut/200003584/sae_weights.safetensors +0 -3
  37. backwards_TL/djw9vsut/200003584/sparsity.safetensors +0 -3
  38. backwards_TL/djw9vsut/300003328/cfg.json +0 -1
  39. backwards_TL/djw9vsut/300003328/sae_weights.safetensors +0 -3
  40. backwards_TL/djw9vsut/300003328/sparsity.safetensors +0 -3
  41. backwards_TL/djw9vsut/400003072/cfg.json +0 -1
  42. backwards_TL/djw9vsut/400003072/sae_weights.safetensors +0 -3
  43. backwards_TL/djw9vsut/400003072/sparsity.safetensors +0 -3
  44. backwards_TL/djw9vsut/final_500002816/cfg.json +0 -1
  45. backwards_TL/djw9vsut/final_500002816/sae_weights.safetensors +0 -3
  46. backwards_TL/djw9vsut/final_500002816/sparsity.safetensors +0 -3
  47. backwards_TL/drbi3oow/100003840/cfg.json +0 -1
  48. backwards_TL/drbi3oow/100003840/sae_weights.safetensors +0 -3
  49. backwards_TL/drbi3oow/100003840/sparsity.safetensors +0 -3
  50. backwards_TL/drbi3oow/200003584/cfg.json +0 -1
backwards_TL/22ninmut/100003840/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.0.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 0, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L1", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1.0, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L0_hook_resid_post_L1_1_0", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/22ninmut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/22ninmut/100003840/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:eaf688bedd969130bc40dabef5ecd4d20d1650527d890da1486a3009cc52703e
3
- size 37801344
 
 
 
 
backwards_TL/22ninmut/100003840/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3eccde1a921dbea545c2838d42de3e05112412e5ef0ff625013f1152fc54f41d
3
- size 24656
 
 
 
 
backwards_TL/22ninmut/200003584/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.0.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 0, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L1", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1.0, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L0_hook_resid_post_L1_1_0", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/22ninmut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/22ninmut/200003584/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b7e2e760369b076b49810ebb2e9856dbfb0cba2a555021dc7b1f4b181056d125
3
- size 37801344
 
 
 
 
backwards_TL/22ninmut/200003584/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:174f1e22cce1b01e437592772e23c4c46d594bdd06361197c1f1d13c2de0adbe
3
- size 24656
 
 
 
 
backwards_TL/22ninmut/300003328/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.0.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 0, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L1", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1.0, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L0_hook_resid_post_L1_1_0", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/22ninmut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/22ninmut/300003328/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:90c6a2bc0fb1221f1a297e78f914dcd4431d0594d665b9e6971ad53ad1e3a4f9
3
- size 37801344
 
 
 
 
backwards_TL/22ninmut/300003328/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:82ab0163c40693019a4f9971942ad6b2bc7864077175efae7d2b48bc2772af5d
3
- size 24656
 
 
 
 
backwards_TL/22ninmut/400003072/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.0.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 0, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L1", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1.0, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L0_hook_resid_post_L1_1_0", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/22ninmut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/22ninmut/400003072/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3523d121fcf5fba450df3a43ec1d64c3175d40673953b7525d740e02c61d1470
3
- size 37801344
 
 
 
 
backwards_TL/22ninmut/400003072/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed8bdac9956cf39cd59d7407ca55c418010f28ef8e30c02221bcc3d2bac90e88
3
- size 24656
 
 
 
 
backwards_TL/22ninmut/final_500002816/.cfg.json.swp DELETED
Binary file (12.3 kB)
 
backwards_TL/22ninmut/final_500002816/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.0.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 0, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L1", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1.0, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L0_hook_resid_post_L1_1_0", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/22ninmut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/22ninmut/final_500002816/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f1f5172f0847c6a15fb993ea9c10fea00470bdfdb436eb9f5f3cd60c746e11d
3
- size 37801344
 
 
 
 
backwards_TL/22ninmut/final_500002816/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:53bb6ce0541567e2888b72807252ecd39b2d7763ce2dd9041f362a8913659437
3
- size 24656
 
 
 
 
backwards_TL/7oj8g6c4/100003840/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.10.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 10, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L11", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L10_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/7oj8g6c4", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/7oj8g6c4/100003840/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:06c7f1afee5174577ebf48c4ba2b2623c6702f5efca1f78a234ad93fc2b430f9
3
- size 37801344
 
 
 
 
backwards_TL/7oj8g6c4/100003840/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0df1c1f57067e205693519d17ea3ef8475d08e59d6fba88702ccaba5eba7e9de
3
- size 24656
 
 
 
 
backwards_TL/7oj8g6c4/200003584/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.10.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 10, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L11", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L10_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/7oj8g6c4", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/7oj8g6c4/200003584/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2469b8f471cef975f3bb34af4032fe84028d291fe2e21caa5eb413c9decab866
3
- size 37801344
 
 
 
 
backwards_TL/7oj8g6c4/200003584/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9cf0a539cc13b22142a55193ddd9cf21e650016917f95265017723d483907c46
3
- size 24656
 
 
 
 
backwards_TL/7oj8g6c4/300003328/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.10.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 10, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L11", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L10_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/7oj8g6c4", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/7oj8g6c4/300003328/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5108e31bc436352cee16984fd02e9694fee65469604e79827fd67c96e5577cbb
3
- size 37801344
 
 
 
 
backwards_TL/7oj8g6c4/300003328/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:564c97f8a8520504b79659fb02ee5ce0a8171ad865184fe7c61e17aedf6593dd
3
- size 24656
 
 
 
 
backwards_TL/7oj8g6c4/400003072/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.10.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 10, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L11", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L10_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/7oj8g6c4", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/7oj8g6c4/400003072/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f1faf430cb13561449cdd2c921a6f3f1c3208e786954644403c6c502629d630
3
- size 37801344
 
 
 
 
backwards_TL/7oj8g6c4/400003072/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1746ddb7c85e37d01196f381bf4c52cd3f4438e7814aa985205fadfc8bac887d
3
- size 24656
 
 
 
 
backwards_TL/7oj8g6c4/final_500002816/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.10.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 10, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L11", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L10_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/7oj8g6c4", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/7oj8g6c4/final_500002816/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:46b26bfa7fd1c40589596c2635fb7b8f9f66733d37106eceef2ad086cee13357
3
- size 37801344
 
 
 
 
backwards_TL/7oj8g6c4/final_500002816/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:664f11b2ae8eb9e56008562107a2e88435b2985b96f83e0934c16005cf7dc7e8
3
- size 24656
 
 
 
 
backwards_TL/djw9vsut/100003840/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.8.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 8, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L9", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L8_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/djw9vsut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/djw9vsut/100003840/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c88a94f94fa76d2443334202b792bcb2b58cc09fba0d518254abc0d6150ec262
3
- size 37801344
 
 
 
 
backwards_TL/djw9vsut/100003840/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b992501a5f990ac63c38f3d1056c26dbd76feb5be2965d9a48981d77940215b
3
- size 24656
 
 
 
 
backwards_TL/djw9vsut/200003584/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.8.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 8, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L9", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L8_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/djw9vsut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/djw9vsut/200003584/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3bfdab5cd4df36ad506e336e3fe8b54514a3870a4ce5c2425994e5bb50207b4d
3
- size 37801344
 
 
 
 
backwards_TL/djw9vsut/200003584/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c187b98557ddbc032ad09c447ddb0f77c2fe6d542e843386277bc82cca99118
3
- size 24656
 
 
 
 
backwards_TL/djw9vsut/300003328/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.8.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 8, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L9", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L8_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/djw9vsut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/djw9vsut/300003328/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3ac0d2b9f0886da1b393d1b148c06643d75206bc106c77a5201a98e22e5c2e06
3
- size 37801344
 
 
 
 
backwards_TL/djw9vsut/300003328/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:057e839a6f93fa3905a99dc4dd65357de53b5ad3884c032e8d3723cd89c47ff8
3
- size 24656
 
 
 
 
backwards_TL/djw9vsut/400003072/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.8.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 8, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L9", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L8_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/djw9vsut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/djw9vsut/400003072/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8d17604d5fb6be2b153dc52f6ad34fac7ab51eced26c5c059d395b88d582365
3
- size 37801344
 
 
 
 
backwards_TL/djw9vsut/400003072/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9baca68defa7b370c8f44378abc7cfc1c800cc75ca3649cb30a703346ced688
3
- size 24656
 
 
 
 
backwards_TL/djw9vsut/final_500002816/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.8.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 8, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L9", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L8_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/djw9vsut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/djw9vsut/final_500002816/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:57d935477552535a76aa373274260a92af693dacb7b41e8fd7c7b74b6c69fbd1
3
- size 37801344
 
 
 
 
backwards_TL/djw9vsut/final_500002816/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6c2c2577594fdda2908b675580ad41cd6f4672a7dfbcf94f7da6159553bfb1e
3
- size 24656
 
 
 
 
backwards_TL/drbi3oow/100003840/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.9.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 9, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L10", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L9_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/drbi3oow", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
 
 
backwards_TL/drbi3oow/100003840/sae_weights.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:240a5ea1acd913b121e20e76ed4713fe80fa907c8d69178ca37ea9a2f9e21329
3
- size 37801344
 
 
 
 
backwards_TL/drbi3oow/100003840/sparsity.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8bd076cb5293df80bf5138602ddb0e898ed59500252f5346460a37a0f234813
3
- size 24656
 
 
 
 
backwards_TL/drbi3oow/200003584/cfg.json DELETED
@@ -1 +0,0 @@
1
- {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.9.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 9, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L10", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L9_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/drbi3oow", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}