sharpenb commited on
Commit
964ebe3
·
verified ·
1 Parent(s): 8b31ce9

72cc8f19f6f8d2751017cecb7d53e9ce7bbd8d1016ab28b079b1e577eec7c728

Browse files
Files changed (2) hide show
  1. config.json +21 -1
  2. smash_config.json +1 -1
config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_attn_implementation_autoset": true,
3
- "_name_or_path": "google/gemma-2-2b-it",
4
  "architectures": [
5
  "Gemma2ForCausalLM"
6
  ],
@@ -26,6 +26,26 @@
26
  "num_hidden_layers": 26,
27
  "num_key_value_heads": 4,
28
  "pad_token_id": 0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  "query_pre_attn_scalar": 256,
30
  "rms_norm_eps": 1e-06,
31
  "rope_theta": 10000.0,
 
1
  {
2
  "_attn_implementation_autoset": true,
3
+ "_name_or_path": "/tmp/models/tmplvpqq264/tmp7lo1_uef",
4
  "architectures": [
5
  "Gemma2ForCausalLM"
6
  ],
 
26
  "num_hidden_layers": 26,
27
  "num_key_value_heads": 4,
28
  "pad_token_id": 0,
29
+ "quantization_config": {
30
+ "quant_config": {
31
+ "offload_meta": false,
32
+ "scale_quant_params": null,
33
+ "weight_quant_params": {
34
+ "axis": 1,
35
+ "channel_wise": true,
36
+ "group_size": 64,
37
+ "nbits": 4,
38
+ "optimize": true,
39
+ "round_zero": true,
40
+ "view_as_float": false
41
+ },
42
+ "zero_quant_params": null
43
+ },
44
+ "quant_method": "hqq",
45
+ "skip_modules": [
46
+ "lm_head"
47
+ ]
48
+ },
49
  "query_pre_attn_scalar": 256,
50
  "rms_norm_eps": 1e-06,
51
  "rope_theta": 10000.0,
smash_config.json CHANGED
@@ -11,7 +11,7 @@
11
  "quant_hqq_weight_bits": 4,
12
  "max_batch_size": 1,
13
  "device": "cuda",
14
- "cache_dir": "/tmp/models/tmpv_g8jhwn",
15
  "task": "",
16
  "save_load_fn": "hqq",
17
  "save_load_fn_args": {},
 
11
  "quant_hqq_weight_bits": 4,
12
  "max_batch_size": 1,
13
  "device": "cuda",
14
+ "cache_dir": "/tmp/models/tmplvpqq264",
15
  "task": "",
16
  "save_load_fn": "hqq",
17
  "save_load_fn_args": {},