ghidav commited on
Commit
5ae749d
1 Parent(s): fad35a5

Big Rename

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. backward/L0/100M/cfg.json +1 -0
  2. backward/L0/100M/sae_weights.safetensors +3 -0
  3. backward/L0/100M/sparsity.safetensors +3 -0
  4. backward/L0/200M/cfg.json +1 -0
  5. backward/L0/200M/sae_weights.safetensors +3 -0
  6. backward/L0/200M/sparsity.safetensors +3 -0
  7. backward/L0/300M/cfg.json +1 -0
  8. backward/L0/300M/sae_weights.safetensors +3 -0
  9. backward/L0/300M/sparsity.safetensors +3 -0
  10. backward/L0/400M/cfg.json +1 -0
  11. backward/L0/400M/sae_weights.safetensors +3 -0
  12. backward/L0/400M/sparsity.safetensors +3 -0
  13. backward/L0/500M/.cfg.json.swp +0 -0
  14. backward/L0/500M/cfg.json +1 -0
  15. backward/L0/500M/sae_weights.safetensors +3 -0
  16. backward/L0/500M/sparsity.safetensors +3 -0
  17. backward/L1/100M/cfg.json +1 -0
  18. backward/L1/100M/sae_weights.safetensors +3 -0
  19. backward/L1/100M/sparsity.safetensors +3 -0
  20. backward/L1/200M/cfg.json +1 -0
  21. backward/L1/200M/sae_weights.safetensors +3 -0
  22. backward/L1/200M/sparsity.safetensors +3 -0
  23. backward/L1/300M/cfg.json +1 -0
  24. backward/L1/300M/sae_weights.safetensors +3 -0
  25. backward/L1/300M/sparsity.safetensors +3 -0
  26. backward/L1/400M/cfg.json +1 -0
  27. backward/L1/400M/sae_weights.safetensors +3 -0
  28. backward/L1/400M/sparsity.safetensors +3 -0
  29. backward/L1/500M/cfg.json +1 -0
  30. backward/L1/500M/sae_weights.safetensors +3 -0
  31. backward/L1/500M/sparsity.safetensors +3 -0
  32. backward/L10/100M/cfg.json +1 -0
  33. backward/L10/100M/sae_weights.safetensors +3 -0
  34. backward/L10/100M/sparsity.safetensors +3 -0
  35. backward/L10/200M/cfg.json +1 -0
  36. backward/L10/200M/sae_weights.safetensors +3 -0
  37. backward/L10/200M/sparsity.safetensors +3 -0
  38. backward/L10/300M/cfg.json +1 -0
  39. backward/L10/300M/sae_weights.safetensors +3 -0
  40. backward/L10/300M/sparsity.safetensors +3 -0
  41. backward/L10/400M/cfg.json +1 -0
  42. backward/L10/400M/sae_weights.safetensors +3 -0
  43. backward/L10/400M/sparsity.safetensors +3 -0
  44. backward/L10/500M/cfg.json +1 -0
  45. backward/L10/500M/sae_weights.safetensors +3 -0
  46. backward/L10/500M/sparsity.safetensors +3 -0
  47. backward/L2/100M/cfg.json +1 -0
  48. backward/L2/100M/sae_weights.safetensors +3 -0
  49. backward/L2/100M/sparsity.safetensors +3 -0
  50. backward/L2/200M/cfg.json +1 -0
backward/L0/100M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.0.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 0, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L1", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1.0, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L0_hook_resid_post_L1_1_0", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/22ninmut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L0/100M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaf688bedd969130bc40dabef5ecd4d20d1650527d890da1486a3009cc52703e
3
+ size 37801344
backward/L0/100M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3eccde1a921dbea545c2838d42de3e05112412e5ef0ff625013f1152fc54f41d
3
+ size 24656
backward/L0/200M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.0.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 0, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L1", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1.0, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L0_hook_resid_post_L1_1_0", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/22ninmut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L0/200M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7e2e760369b076b49810ebb2e9856dbfb0cba2a555021dc7b1f4b181056d125
3
+ size 37801344
backward/L0/200M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:174f1e22cce1b01e437592772e23c4c46d594bdd06361197c1f1d13c2de0adbe
3
+ size 24656
backward/L0/300M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.0.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 0, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L1", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1.0, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L0_hook_resid_post_L1_1_0", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/22ninmut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L0/300M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90c6a2bc0fb1221f1a297e78f914dcd4431d0594d665b9e6971ad53ad1e3a4f9
3
+ size 37801344
backward/L0/300M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82ab0163c40693019a4f9971942ad6b2bc7864077175efae7d2b48bc2772af5d
3
+ size 24656
backward/L0/400M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.0.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 0, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L1", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1.0, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L0_hook_resid_post_L1_1_0", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/22ninmut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L0/400M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3523d121fcf5fba450df3a43ec1d64c3175d40673953b7525d740e02c61d1470
3
+ size 37801344
backward/L0/400M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed8bdac9956cf39cd59d7407ca55c418010f28ef8e30c02221bcc3d2bac90e88
3
+ size 24656
backward/L0/500M/.cfg.json.swp ADDED
Binary file (12.3 kB). View file
 
backward/L0/500M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.0.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 0, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L1", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1.0, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L0_hook_resid_post_L1_1_0", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/22ninmut", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L0/500M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f1f5172f0847c6a15fb993ea9c10fea00470bdfdb436eb9f5f3cd60c746e11d
3
+ size 37801344
backward/L0/500M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53bb6ce0541567e2888b72807252ecd39b2d7763ce2dd9041f362a8913659437
3
+ size 24656
backward/L1/100M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.1.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 1, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L2", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L1_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/e902fx68", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L1/100M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40654495475ccdfa37cba476e5365566e00c0b44cd848978c3a73c679570633b
3
+ size 37801344
backward/L1/100M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a86592e4854a528529eab9693d810a1a5f2fea1e2a4dfebeffe23b1933caba07
3
+ size 24656
backward/L1/200M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.1.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 1, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L2", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L1_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/e902fx68", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L1/200M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faeff4d4e2cb069296723e02137a1f7c2262f85b9207a97ff0c44591a669b995
3
+ size 37801344
backward/L1/200M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbe8a880c3cd75eab5e4a2cff2565620fe824fb546a769b99cfac144975a7a80
3
+ size 24656
backward/L1/300M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.1.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 1, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L2", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L1_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/e902fx68", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L1/300M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2512ab962c015caf7df3065b6f9029923b84a6605a9013d77b928741c4d88c2b
3
+ size 37801344
backward/L1/300M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0bcbdff899ed867b0f7ad7541cc91b42157a2f4899454a7008ca22939ad7a94
3
+ size 24656
backward/L1/400M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.1.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 1, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L2", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L1_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/e902fx68", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L1/400M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b83684c51c5d75b650c6d0c45216a426f53f1d8f29d11b612f12ba4221ac5f5b
3
+ size 37801344
backward/L1/400M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f505ab8c0d2a825afb14b676b4acef5c3c99827b93de3cba9990018cd6febc3
3
+ size 24656
backward/L1/500M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.1.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 1, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L2", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L1_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/e902fx68", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L1/500M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eede70447401ff9ab88d6e2a2d2bd52fae4035b2edbaca337bc67a9cd8ac2d03
3
+ size 37801344
backward/L1/500M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4b05feb5c4cf7cbb3ce4648e8fd36be3924fc33a9d942232ad189d1c460a267
3
+ size 24656
backward/L10/100M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.10.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 10, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L11", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L10_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/7oj8g6c4", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L10/100M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06c7f1afee5174577ebf48c4ba2b2623c6702f5efca1f78a234ad93fc2b430f9
3
+ size 37801344
backward/L10/100M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0df1c1f57067e205693519d17ea3ef8475d08e59d6fba88702ccaba5eba7e9de
3
+ size 24656
backward/L10/200M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.10.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 10, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L11", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L10_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/7oj8g6c4", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L10/200M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2469b8f471cef975f3bb34af4032fe84028d291fe2e21caa5eb413c9decab866
3
+ size 37801344
backward/L10/200M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cf0a539cc13b22142a55193ddd9cf21e650016917f95265017723d483907c46
3
+ size 24656
backward/L10/300M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.10.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 10, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L11", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L10_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/7oj8g6c4", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L10/300M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5108e31bc436352cee16984fd02e9694fee65469604e79827fd67c96e5577cbb
3
+ size 37801344
backward/L10/300M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:564c97f8a8520504b79659fb02ee5ce0a8171ad865184fe7c61e17aedf6593dd
3
+ size 24656
backward/L10/400M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.10.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 10, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L11", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L10_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/7oj8g6c4", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L10/400M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f1faf430cb13561449cdd2c921a6f3f1c3208e786954644403c6c502629d630
3
+ size 37801344
backward/L10/400M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1746ddb7c85e37d01196f381bf4c52cd3f4438e7814aa985205fadfc8bac887d
3
+ size 24656
backward/L10/500M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.10.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 10, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L11", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L10_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/7oj8g6c4", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L10/500M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46b26bfa7fd1c40589596c2635fb7b8f9f66733d37106eceef2ad086cee13357
3
+ size 37801344
backward/L10/500M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:664f11b2ae8eb9e56008562107a2e88435b2985b96f83e0934c16005cf7dc7e8
3
+ size 24656
backward/L2/100M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.2.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 2, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L3", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L2_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/is8g1rbr", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}
backward/L2/100M/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8088d59e465ebf95e62b76e2fc5ac260da1a0baf3eb145ee7426bfc13ce609bb
3
+ size 37801344
backward/L2/100M/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b284c60af440f7662815ea431c073ae5c6e9c76685a4a447aed3e74b51ec1dd4
3
+ size 24656
backward/L2/200M/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "pythia-160m-deduped", "model_class_name": "HookedTransformer", "hook_name": "blocks.2.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 2, "hook_head_index": null, "dataset_path": "NeelNanda/pile-small-tokenized-2b", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 768, "d_sae": 6144, "b_dec_init_method": "zeros", "expansion_factor": 8, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": "./hub/models--mech-interp--pythia-160m-deduped-rs-post/snapshots/ad21dc2dd4070805dbeb842dcbfa14e6ad74e2b9/L3", "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 500000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 4096, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 6103, "lr": 1e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 1.0000000000000002e-06, "lr_decay_steps": 24414, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-transfer-learning", "wandb_id": null, "run_name": "FT_L2_hook_resid_post_L1_1", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 5, "checkpoint_path": "checkpoints/is8g1rbr", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {}, "sae_lens_version": "3.14.0", "sae_lens_training_version": "3.14.0", "tokens_per_buffer": 536870912}