Davide Ghilardi commited on
Commit
977dfba
·
1 Parent(s): 0dda12c

Add reverse l1_1

Browse files
l1_1/reversed/attn_out_layer0/cfg.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name": "1l-gelu",
3
+ "model_class_name": "HookedTransformer",
4
+ "hook_name": "blocks.0.attn.hook_z",
5
+ "hook_eval": "NOT_IN_USE",
6
+ "hook_layer": 0,
7
+ "hook_head_index": null,
8
+ "dataset_path": "ghidav/arithmetics_reversed",
9
+ "dataset_trust_remote_code": true,
10
+ "streaming": true,
11
+ "is_dataset_tokenized": true,
12
+ "context_size": 18,
13
+ "use_cached_activations": false,
14
+ "cached_activations_path": null,
15
+ "architecture": "jumprelu",
16
+ "d_in": 512,
17
+ "d_sae": 8192,
18
+ "b_dec_init_method": "zeros",
19
+ "expansion_factor": 16,
20
+ "activation_fn": "relu",
21
+ "activation_fn_kwargs": {},
22
+ "normalize_sae_decoder": true,
23
+ "noise_scale": 0.0,
24
+ "from_pretrained_path": null,
25
+ "apply_b_dec_to_input": false,
26
+ "decoder_orthogonal_init": false,
27
+ "decoder_heuristic_init": false,
28
+ "init_encoder_as_decoder_transpose": false,
29
+ "n_batches_in_buffer": 128,
30
+ "training_tokens": 50000000,
31
+ "finetuning_tokens": 0,
32
+ "store_batch_size_prompts": 8,
33
+ "train_batch_size_tokens": 1024,
34
+ "normalize_activations": "none",
35
+ "device": "cuda",
36
+ "act_store_device": "cuda",
37
+ "seed": 42,
38
+ "dtype": "float32",
39
+ "prepend_bos": false,
40
+ "autocast": false,
41
+ "autocast_lm": false,
42
+ "compile_llm": false,
43
+ "llm_compilation_mode": null,
44
+ "compile_sae": false,
45
+ "sae_compilation_mode": null,
46
+ "adam_beta1": 0,
47
+ "adam_beta2": 0.999,
48
+ "mse_loss_normalization": null,
49
+ "l1_coefficient": 1.0,
50
+ "lp_norm": 1,
51
+ "scale_sparsity_penalty_by_decoder_norm": false,
52
+ "l1_warm_up_steps": 2441,
53
+ "lr": 0.0005,
54
+ "lr_scheduler_name": "constant",
55
+ "lr_warm_up_steps": 0,
56
+ "lr_end": 5e-05,
57
+ "lr_decay_steps": 9765,
58
+ "n_restart_cycles": 1,
59
+ "finetuning_method": null,
60
+ "use_ghost_grads": false,
61
+ "feature_sampling_window": 2000,
62
+ "dead_feature_window": 1000,
63
+ "dead_feature_threshold": 1e-06,
64
+ "n_eval_batches": 10,
65
+ "eval_batch_size_prompts": null,
66
+ "log_to_wandb": true,
67
+ "log_activations_store_to_wandb": false,
68
+ "log_optimizer_state_to_wandb": false,
69
+ "wandb_project": "sae-feature-circuits",
70
+ "wandb_id": null,
71
+ "run_name": "L0_attn.hook_z_L1_1_0_rev",
72
+ "wandb_entity": null,
73
+ "wandb_log_frequency": 30,
74
+ "eval_every_n_wandb_logs": 100,
75
+ "resume": false,
76
+ "n_checkpoints": 0,
77
+ "checkpoint_path": "checkpoints/ksq6xya7",
78
+ "verbose": false,
79
+ "model_kwargs": {},
80
+ "model_from_pretrained_kwargs": {
81
+ "center_writing_weights": false
82
+ },
83
+ "sae_lens_version": "3.20.5",
84
+ "sae_lens_training_version": "3.20.5",
85
+ "tokens_per_buffer": 2359296
86
+ }
l1_1/reversed/attn_out_layer0/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53f05bef3182877f0ca82ea045fc120240b98d0a23313ff7040c5d414fc71464
3
+ size 33622400
l1_1/reversed/attn_out_layer0/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:060a8e2c43f1d1c6254169289a7d732150319f08f747b9b141b39f70557fbea7
3
+ size 32848
l1_1/reversed/mlp_out_layer0/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "1l-gelu", "model_class_name": "HookedTransformer", "hook_name": "blocks.0.hook_mlp_out", "hook_eval": "NOT_IN_USE", "hook_layer": 0, "hook_head_index": null, "dataset_path": "ghidav/arithmetics_reversed", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 18, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 512, "d_sae": 8192, "b_dec_init_method": "zeros", "expansion_factor": 16, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": null, "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 50000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 1024, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1.0, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 2441, "lr": 0.0005, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 5e-05, "lr_decay_steps": 9765, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-feature-circuits", "wandb_id": null, "run_name": "L0_hook_mlp_out_L1_1_0_rev", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 0, "checkpoint_path": "checkpoints/b901vcmz", "verbose": false, "model_kwargs": {}, "model_from_pretrained_kwargs": {"center_writing_weights": false}, "sae_lens_version": "3.20.5", "sae_lens_training_version": "3.20.5", "tokens_per_buffer": 2359296}
l1_1/reversed/mlp_out_layer0/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a1b85fef7ce03ef1222cc7f3a1861176b4566997237cbbdee96651bb5075239
3
+ size 33622400
l1_1/reversed/mlp_out_layer0/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c58e029f81b4a19381d4e6ce6efd8524d8dea5d89826983b0b63ab39d3c7c8a1
3
+ size 32848
l1_1/reversed/resid_out_layer0/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "1l-gelu", "model_class_name": "HookedTransformer", "hook_name": "blocks.0.hook_resid_post", "hook_eval": "NOT_IN_USE", "hook_layer": 0, "hook_head_index": null, "dataset_path": "ghidav/arithmetics_reversed", "dataset_trust_remote_code": true, "streaming": true, "is_dataset_tokenized": true, "context_size": 18, "use_cached_activations": false, "cached_activations_path": null, "architecture": "jumprelu", "d_in": 512, "d_sae": 8192, "b_dec_init_method": "zeros", "expansion_factor": 16, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": true, "noise_scale": 0.0, "from_pretrained_path": null, "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": false, "init_encoder_as_decoder_transpose": false, "n_batches_in_buffer": 128, "training_tokens": 50000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "train_batch_size_tokens": 1024, "normalize_activations": "none", "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "adam_beta1": 0, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 1.0, "lp_norm": 1, "scale_sparsity_penalty_by_decoder_norm": false, "l1_warm_up_steps": 2441, "lr": 0.0005, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 5e-05, "lr_decay_steps": 9765, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 2000, "dead_feature_window": 1000, "dead_feature_threshold": 1e-06, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-feature-circuits", "wandb_id": null, "run_name": "L0_hook_resid_post_L1_1_0_rev", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 100, "resume": false, "n_checkpoints": 0, "checkpoint_path": "checkpoints/fk64hm4k", "verbose": false, "model_kwargs": {}, "model_from_pretrained_kwargs": {"center_writing_weights": false}, "sae_lens_version": "3.20.5", "sae_lens_training_version": "3.20.5", "tokens_per_buffer": 2359296}
l1_1/reversed/resid_out_layer0/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c6fb090b0ed1bd35f51112a050b86b479917aff5a047402ecdcf493070b37b9
3
+ size 33622400
l1_1/reversed/resid_out_layer0/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48024286c1aa07a8e7f2c9d1d1e35abebd6f81754a0ae8e0cc0400de00b33199
3
+ size 32848