MrGonao commited on
Commit
3977f04
·
verified ·
1 Parent(s): 4178b22

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. k32-sae-32k-seed2/config.json +1 -0
  2. k32-sae-32k-seed2/layers.6/cfg.json +1 -0
  3. k32-sae-32k-seed2/layers.6/sae.safetensors +3 -0
  4. k32-sae-32k-seed2/lr_scheduler.pt +3 -0
  5. k32-sae-32k-seed2/optimizer.pt +3 -0
  6. k32-sae-32k-seed2/state.pt +3 -0
  7. k32-sae-32k-seed3/config.json +1 -0
  8. k32-sae-32k-seed3/layers.6/cfg.json +1 -0
  9. k32-sae-32k-seed3/layers.6/sae.safetensors +3 -0
  10. k32-sae-32k-seed3/lr_scheduler.pt +3 -0
  11. k32-sae-32k-seed3/optimizer.pt +3 -0
  12. k32-sae-32k-seed3/state.pt +3 -0
  13. k32-sae-4k-seed2/config.json +1 -0
  14. k32-sae-4k-seed2/layers.6/cfg.json +1 -0
  15. k32-sae-4k-seed2/layers.6/sae.safetensors +3 -0
  16. k32-sae-4k-seed2/lr_scheduler.pt +3 -0
  17. k32-sae-4k-seed2/optimizer.pt +3 -0
  18. k32-sae-4k-seed2/state.pt +3 -0
  19. k32-sae-4k/config.json +1 -0
  20. k32-sae-4k/layers.6/cfg.json +1 -0
  21. k32-sae-4k/layers.6/sae.safetensors +3 -0
  22. k32-sae-4k/lr_scheduler.pt +3 -0
  23. k32-sae-4k/optimizer.pt +3 -0
  24. k32-sae-4k/state.pt +3 -0
  25. k32-sae-mlp-131k-seed2/config.json +1 -0
  26. k32-sae-mlp-131k-seed2/layers.6.mlp/cfg.json +1 -0
  27. k32-sae-mlp-131k-seed2/layers.6.mlp/sae.safetensors +3 -0
  28. k32-sae-mlp-131k-seed2/lr_scheduler.pt +3 -0
  29. k32-sae-mlp-131k-seed2/optimizer.pt +3 -0
  30. k32-sae-mlp-131k-seed2/state.pt +3 -0
  31. k32-sae-mlp-131k/config.json +1 -0
  32. k32-sae-mlp-131k/layers.6.mlp/cfg.json +1 -0
  33. k32-sae-mlp-131k/layers.6.mlp/sae.safetensors +3 -0
  34. k32-sae-mlp-131k/lr_scheduler.pt +3 -0
  35. k32-sae-mlp-131k/optimizer.pt +3 -0
  36. k32-sae-mlp-131k/state.pt +3 -0
  37. k32-sae-mlp-32-seed2/config.json +1 -0
  38. k32-sae-mlp-32-seed2/layers.0.mlp/cfg.json +1 -0
  39. k32-sae-mlp-32-seed2/layers.0.mlp/sae.safetensors +3 -0
  40. k32-sae-mlp-32-seed2/layers.1.mlp/cfg.json +1 -0
  41. k32-sae-mlp-32-seed2/layers.1.mlp/sae.safetensors +3 -0
  42. k32-sae-mlp-32-seed2/layers.10.mlp/cfg.json +1 -0
  43. k32-sae-mlp-32-seed2/layers.10.mlp/sae.safetensors +3 -0
  44. k32-sae-mlp-32-seed2/layers.11.mlp/cfg.json +1 -0
  45. k32-sae-mlp-32-seed2/layers.11.mlp/sae.safetensors +3 -0
  46. k32-sae-mlp-32-seed2/layers.2.mlp/cfg.json +1 -0
  47. k32-sae-mlp-32-seed2/layers.2.mlp/sae.safetensors +3 -0
  48. k32-sae-mlp-32-seed2/layers.3.mlp/cfg.json +1 -0
  49. k32-sae-mlp-32-seed2/layers.3.mlp/sae.safetensors +3 -0
  50. k32-sae-mlp-32-seed2/layers.4.mlp/cfg.json +1 -0
k32-sae-32k-seed2/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"sae": {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 32767, "k": 32, "multi_topk": false}, "batch_size": 8, "grad_acc_steps": 1, "micro_acc_steps": 1, "lr": null, "lr_warmup_steps": 1000, "auxk_alpha": 0.0, "dead_feature_threshold": 10000000, "hookpoints": ["layers.6"], "layers": [], "layer_stride": 1, "transcode": false, "distribute_modules": false, "save_every": 1000, "log_to_wandb": true, "run_name": "k32-sae-32k-seed-2", "wandb_log_frequency": 1, "model": "EleutherAI/pythia-160m", "dataset": "/mnt/ssd-1/pile_preshuffled/standard/document.bin", "split": "train", "ctx_len": 2049, "hf_token": null, "revision": null, "load_in_8bit": false, "max_examples": 4000000, "resume": false, "finetune": null, "seed": 42, "data_preprocessing_num_proc": 48}
k32-sae-32k-seed2/layers.6/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 32767, "k": 32, "multi_topk": false, "d_in": 768}
k32-sae-32k-seed2/layers.6/sae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16b40cd627dd5d4b46033cd36f30707bb0abb2f16b2dcee5c9d1ffda7c256220
3
+ size 201454924
k32-sae-32k-seed2/lr_scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f0a1e452458dcff0a7d7c508004ac9dd6172452eab713e5cf8e037254b55ff1
3
+ size 1012
k32-sae-32k-seed2/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a8d536df4f49670ebb57f98551e2c773c9261ddd17b84fb3377db87cd17ae41
3
+ size 102313038
k32-sae-32k-seed2/state.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01f44192946218fe46687d074f00646485b9caaafcf4278c59eed86882fba20a
3
+ size 263314
k32-sae-32k-seed3/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"sae": {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 32768, "k": 32, "multi_topk": false, "skip_connection": false}, "batch_size": 8, "grad_acc_steps": 1, "micro_acc_steps": 1, "lr": null, "lr_warmup_steps": 1000, "auxk_alpha": 0.0, "dead_feature_threshold": 10000000, "hookpoints": ["layers.6"], "layers": [], "layer_stride": 1, "transcode": false, "distribute_modules": false, "save_every": 1000, "log_to_wandb": true, "run_name": "k32-sae-mlp-32k-seed3", "wandb_log_frequency": 1, "model": "EleutherAI/pythia-160m", "dataset": "/mnt/ssd-1/pile_preshuffled/standard/document.bin", "split": "train", "ctx_len": 2049, "hf_token": null, "revision": null, "load_in_8bit": false, "max_examples": 4000000, "resume": false, "finetune": null, "seed": 52, "data_preprocessing_num_proc": 48}
k32-sae-32k-seed3/layers.6/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 32768, "k": 32, "multi_topk": false, "skip_connection": false, "d_in": 768}
k32-sae-32k-seed3/layers.6/sae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1eaca564411d77eb066645d7d70c90a9ab8cf2af3daf1a580d8db1508578f42c
3
+ size 201461072
k32-sae-32k-seed3/lr_scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a645210450394a692a612289cb5fe097161dfa420f3c634f6516bc67841ac2b4
3
+ size 1012
k32-sae-32k-seed3/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c76608cb52470444eff72adfb2856858648c97fca54c139888d5cd90f23ca92
3
+ size 102316366
k32-sae-32k-seed3/state.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:030d953fc60b9fade27a7f202235308c1b5910d15c53c99ff0153648d3f7efd2
3
+ size 263314
k32-sae-4k-seed2/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"sae": {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 4084, "k": 32, "multi_topk": false}, "batch_size": 8, "grad_acc_steps": 1, "micro_acc_steps": 1, "lr": null, "lr_warmup_steps": 1000, "auxk_alpha": 0.0, "dead_feature_threshold": 10000000, "hookpoints": ["layers.6"], "layers": [], "layer_stride": 1, "transcode": false, "distribute_modules": false, "save_every": 1000, "log_to_wandb": true, "run_name": "k32-sae-4k-seed-2", "wandb_log_frequency": 1, "model": "EleutherAI/pythia-160m", "dataset": "/mnt/ssd-1/pile_preshuffled/standard/document.bin", "split": "train", "ctx_len": 2049, "hf_token": null, "revision": null, "load_in_8bit": false, "max_examples": 4000000, "resume": false, "finetune": null, "seed": 42, "data_preprocessing_num_proc": 48}
k32-sae-4k-seed2/layers.6/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 4084, "k": 32, "multi_topk": false, "d_in": 768}
k32-sae-4k-seed2/layers.6/sae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10f3b782c57111d9b70bddf462b9f3b2ddce9f2acf8e5f1903b6ec8ae3003aa7
3
+ size 25111832
k32-sae-4k-seed2/lr_scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee7d6621abae43ef404e05f222fac18c5b0579dd18d6dfa06ce767eb4da83c84
3
+ size 1012
k32-sae-4k-seed2/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1da6314ae5fe35c78c7c2a9f8c5d90a3e2fcb826b867ad49b15a3df848c79ada
3
+ size 12787664
k32-sae-4k-seed2/state.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8efed45dd478ea04f30ecad126e8e39188dc200a94c8fe8e4f9ac5cc3f119e0
3
+ size 33874
k32-sae-4k/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"sae": {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 4084, "k": 32, "multi_topk": false}, "batch_size": 8, "grad_acc_steps": 1, "micro_acc_steps": 1, "lr": null, "lr_warmup_steps": 1000, "auxk_alpha": 0.0, "dead_feature_threshold": 10000000, "hookpoints": ["layers.6"], "layers": [], "layer_stride": 1, "transcode": false, "distribute_modules": false, "save_every": 1000, "log_to_wandb": true, "run_name": "k32-sae-4k", "wandb_log_frequency": 1, "model": "EleutherAI/pythia-160m", "dataset": "/mnt/ssd-1/pile_preshuffled/standard/document.bin", "split": "train", "ctx_len": 2049, "hf_token": null, "revision": null, "load_in_8bit": false, "max_examples": 4000000, "resume": false, "finetune": null, "seed": 42, "data_preprocessing_num_proc": 48}
k32-sae-4k/layers.6/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 4084, "k": 32, "multi_topk": false, "d_in": 768}
k32-sae-4k/layers.6/sae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff14a1d338e11b6ca41a490ac9342b21ac28e597e49348cfad88a335efd3bdee
3
+ size 25111832
k32-sae-4k/lr_scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee7d6621abae43ef404e05f222fac18c5b0579dd18d6dfa06ce767eb4da83c84
3
+ size 1012
k32-sae-4k/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e859e10167a20db5e4a7b30d086194a0f07e6fb384b41992ba04118f57d3aec9
3
+ size 12787664
k32-sae-4k/state.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0322e5e343f9de89481204249014b0cf21e1574df473ebb1101fd826e10ec30a
3
+ size 33874
k32-sae-mlp-131k-seed2/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"sae": {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 131072, "k": 32, "multi_topk": false, "skip_connection": false}, "batch_size": 8, "grad_acc_steps": 1, "micro_acc_steps": 1, "lr": null, "lr_warmup_steps": 1000, "auxk_alpha": 0.0, "dead_feature_threshold": 10000000, "hookpoints": ["layers.6.mlp"], "layers": [], "layer_stride": 1, "transcode": false, "distribute_modules": false, "save_every": 1000, "log_to_wandb": true, "run_name": "k32-sae-mlp-131k-seed2", "wandb_log_frequency": 1, "model": "EleutherAI/pythia-160m", "dataset": "/mnt/ssd-1/pile_preshuffled/standard/document.bin", "split": "train", "ctx_len": 2049, "hf_token": null, "revision": null, "load_in_8bit": false, "max_examples": 4000000, "resume": false, "finetune": null, "seed": 22, "data_preprocessing_num_proc": 48}
k32-sae-mlp-131k-seed2/layers.6.mlp/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 131072, "k": 32, "multi_topk": false, "skip_connection": false, "d_in": 768}
k32-sae-mlp-131k-seed2/layers.6.mlp/sae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4221255f0e0ab6d83d5a0ca2a82057cc136e548b7ece89a3f9fc10ff16917b8
3
+ size 805834064
k32-sae-mlp-131k-seed2/lr_scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:362ac3a1db985156bc8176ba7ca7e961052de2b14537e00920c9d0ced3332f83
3
+ size 1012
k32-sae-mlp-131k-seed2/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:399683a7d9f55b264a13ce603ba3bc208056d2b42445fbf30379b2d4604f958a
3
+ size 409224590
k32-sae-mlp-131k-seed2/state.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:702086fe6e3933ee528e639b59c34fa6050e7aa481538fdf531ef0625d5d23b6
3
+ size 1049746
k32-sae-mlp-131k/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"sae": {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 131072, "k": 32, "multi_topk": false, "skip_connection": false}, "batch_size": 8, "grad_acc_steps": 1, "micro_acc_steps": 1, "lr": null, "lr_warmup_steps": 1000, "auxk_alpha": 0.0, "dead_feature_threshold": 10000000, "hookpoints": ["layers.6.mlp"], "layers": [], "layer_stride": 1, "transcode": false, "distribute_modules": false, "save_every": 1000, "log_to_wandb": true, "run_name": "k32-sae-mlp-131k", "wandb_log_frequency": 1, "model": "EleutherAI/pythia-160m", "dataset": "/mnt/ssd-1/pile_preshuffled/standard/document.bin", "split": "train", "ctx_len": 2049, "hf_token": null, "revision": null, "load_in_8bit": false, "max_examples": 4000000, "resume": false, "finetune": null, "seed": 42, "data_preprocessing_num_proc": 48}
k32-sae-mlp-131k/layers.6.mlp/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 131072, "k": 32, "multi_topk": false, "skip_connection": false, "d_in": 768}
k32-sae-mlp-131k/layers.6.mlp/sae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f227709d7296ee81b47df96e823ae05cad6896cb200296a98f15c7e6e5112570
3
+ size 805834064
k32-sae-mlp-131k/lr_scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:362ac3a1db985156bc8176ba7ca7e961052de2b14537e00920c9d0ced3332f83
3
+ size 1012
k32-sae-mlp-131k/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af46175f5379285fbd13a4202c53bdf9058453445d1e014b2807c2c93c0f8af1
3
+ size 409224590
k32-sae-mlp-131k/state.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:315e73d5973c4fe4362f3e7ab6db5e111279736148ff3a66b30665db9d4d33ce
3
+ size 1049746
k32-sae-mlp-32-seed2/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"sae": {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 32, "k": 32, "multi_topk": false, "skip_connection": false}, "batch_size": 8, "grad_acc_steps": 1, "micro_acc_steps": 1, "lr": null, "lr_warmup_steps": 1000, "auxk_alpha": 0.0, "dead_feature_threshold": 10000000, "hookpoints": ["layers.0.mlp", "layers.1.mlp", "layers.2.mlp", "layers.3.mlp", "layers.4.mlp", "layers.5.mlp", "layers.6.mlp", "layers.7.mlp", "layers.8.mlp", "layers.9.mlp", "layers.10.mlp", "layers.11.mlp"], "layers": [], "layer_stride": 1, "transcode": false, "distribute_modules": false, "save_every": 1000, "log_to_wandb": true, "run_name": "k32-sae-mlp-32-seed2", "wandb_log_frequency": 1, "model": "EleutherAI/pythia-160m", "dataset": "/mnt/ssd-1/pile_preshuffled/standard/document.bin", "split": "train", "ctx_len": 2049, "hf_token": null, "revision": null, "load_in_8bit": false, "max_examples": 4000000, "resume": false, "finetune": null, "seed": 22, "data_preprocessing_num_proc": 48}
k32-sae-mlp-32-seed2/layers.0.mlp/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 32, "k": 32, "multi_topk": false, "skip_connection": false, "d_in": 768}
k32-sae-mlp-32-seed2/layers.0.mlp/sae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e38ba482665bf6cf883ef2ae7eb4c03fe58eeed7c70512cde2145e1a09aa9338
3
+ size 200112
k32-sae-mlp-32-seed2/layers.1.mlp/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 32, "k": 32, "multi_topk": false, "skip_connection": false, "d_in": 768}
k32-sae-mlp-32-seed2/layers.1.mlp/sae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3a60c4f4fe47161150cd5a9eec22c0af508a60628d657e532fecfa9a3317d16
3
+ size 200112
k32-sae-mlp-32-seed2/layers.10.mlp/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 32, "k": 32, "multi_topk": false, "skip_connection": false, "d_in": 768}
k32-sae-mlp-32-seed2/layers.10.mlp/sae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:630bd111b8ba1156a167ddbc1362f07ef69c9254e0b68ba4f859fb1692a2261f
3
+ size 200112
k32-sae-mlp-32-seed2/layers.11.mlp/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 32, "k": 32, "multi_topk": false, "skip_connection": false, "d_in": 768}
k32-sae-mlp-32-seed2/layers.11.mlp/sae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fc5f01c1902724ba4df2f5f4e29e4798ae102af6c1c70972ef7adfc402e37ab
3
+ size 200112
k32-sae-mlp-32-seed2/layers.2.mlp/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 32, "k": 32, "multi_topk": false, "skip_connection": false, "d_in": 768}
k32-sae-mlp-32-seed2/layers.2.mlp/sae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd808d179120860cef241ec92c0f2598f7926dcc88263749f42ce3b27494b9ea
3
+ size 200112
k32-sae-mlp-32-seed2/layers.3.mlp/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 32, "k": 32, "multi_topk": false, "skip_connection": false, "d_in": 768}
k32-sae-mlp-32-seed2/layers.3.mlp/sae.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10f6e149b0ba5f2ec371e722eb6379dad50a3d89d985b7d6621edaa7e6f42599
3
+ size 200112
k32-sae-mlp-32-seed2/layers.4.mlp/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"expansion_factor": 32, "normalize_decoder": true, "num_latents": 32, "k": 32, "multi_topk": false, "skip_connection": false, "d_in": 768}