Holy-fox commited on
Commit
7023191
·
verified ·
1 Parent(s): eafc80e

Upload 34 files

Browse files
README.md CHANGED
@@ -1,3 +1,41 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: []
3
+ library_name: transformers
4
+ tags:
5
+ - mergekit
6
+ - merge
7
+
8
+ ---
9
+ # test_model_14B
10
+
11
+ This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
12
+
13
+ ## Merge Details
14
+ ### Merge Method
15
+
16
+ This model was merged using the [TIES](https://arxiv.org/abs/2306.01708) merge method using /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-14B/snapshots/97e1e76335b7017d8f67c08a19d103c0504298c9 as a base.
17
+
18
+ ### Models Merged
19
+
20
+ The following models were included in the merge:
21
+ * /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-14B-Instruct/snapshots/cf98f3b3bbb457ad9e2bb7baf9a0125b6b88caa8
22
+
23
+ ### Configuration
24
+
25
+ The following YAML configuration was used to produce this model:
26
+
27
+ ```yaml
28
+ models:
29
+ - model: /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-14B-Instruct/snapshots/cf98f3b3bbb457ad9e2bb7baf9a0125b6b88caa8
30
+ parameters:
31
+ weight: 1
32
+ density: 1
33
+ merge_method: ties
34
+ base_model: /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-14B/snapshots/97e1e76335b7017d8f67c08a19d103c0504298c9
35
+ parameters:
36
+ weight: 1
37
+ density: 1
38
+ normalize: true
39
+ int8_mask: true
40
+ dtype: float16
41
+ ```
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-14B/snapshots/97e1e76335b7017d8f67c08a19d103c0504298c9",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151643,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 5120,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 13824,
13
+ "max_position_embeddings": 131072,
14
+ "max_window_layers": 48,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 40,
17
+ "num_hidden_layers": 48,
18
+ "num_key_value_heads": 8,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_scaling": null,
21
+ "rope_theta": 1000000.0,
22
+ "sliding_window": null,
23
+ "tie_word_embeddings": false,
24
+ "torch_dtype": "float16",
25
+ "transformers_version": "4.48.1",
26
+ "use_cache": true,
27
+ "use_sliding_window": false,
28
+ "vocab_size": 152064
29
+ }
mergekit_config.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ models:
2
+ - model: /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-14B-Instruct/snapshots/cf98f3b3bbb457ad9e2bb7baf9a0125b6b88caa8
3
+ parameters:
4
+ weight: 1
5
+ density: 1
6
+ merge_method: ties
7
+ base_model: /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-14B/snapshots/97e1e76335b7017d8f67c08a19d103c0504298c9
8
+ parameters:
9
+ weight: 1
10
+ density: 1
11
+ normalize: true
12
+ int8_mask: true
13
+ dtype: float16
model-00001-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a848cbfde35ac4bdaeb346df3bc2e4725c7aa792de73ad86394466f1d989087
3
+ size 1557135488
model-00002-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d22076be13b92f6f6cb305d40c41a95e0a1ffe68d2dbc0bae08825c10750441a
3
+ size 1557135496
model-00003-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0b0d0205e95a2e75ffed19fb7811d88aec5cf039f2f063fe45bae715fa90ea1
3
+ size 985720928
model-00004-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:837fbf5ae072c486aeeedd1b8668f02249a3b0321fee4d5a04df9cf3fa4f6b14
3
+ size 949020896
model-00005-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8901af8efb990e82876c254f85414c1888c1891f484835a9f495a4e4de395cbb
3
+ size 959519288
model-00006-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a3882db619eac8bcc6e77321e861b6226f2d06b8946d957fcdf64b0a6a6875c
3
+ size 959519288
model-00007-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:face3f012b28934e73063872aae2f44ff047474335f13bfcdd8a7cfc8226cc97
3
+ size 985710608
model-00008-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a87d4d75d7b511f2a582ad5252d7900bf76b53fb8e7779045c896cf49a5d098e
3
+ size 949020896
model-00009-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e6716900210ba75a45f79e0f51e7605ea40126a118a388fe91116e169739589
3
+ size 959519288
model-00010-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad389e2f8acdb487777c3b142870ee9e7b7c6a87955387bf6698eb8caf8bb85a
3
+ size 959519280
model-00011-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7287af8e3fd49725021d04bbaffbfe968159cc2cbb3dabc52fc6d27cbf613468
3
+ size 985710608
model-00012-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:927c9d441366dbb330f838e87f001dedb288a51351d3eced6c99835da79d65d6
3
+ size 949020896
model-00013-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38dbe661fccd52f46f5c5d8b4504ad6402d042269ef0754e868000b4719ccb55
3
+ size 959519288
model-00014-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1faf4519bc74bc8bf4ff9af045f8f7d20e12e93c915ed84b2e2d6f9da7bf95d7
3
+ size 959519288
model-00015-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62a23dae4f78155c34cf961adbafffc89a63859ef29148e0097fc4feb5b8e12d
3
+ size 985710608
model-00016-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea57391c328e8837d7a6f5b2f1f5893be6e1c2760639b1333a3968282c51ca4f
3
+ size 949020888
model-00017-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41944e2696e23cbaec0155fbb9391a28732482fb0859140815d2ceedab388993
3
+ size 959519288
model-00018-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13523c1e8867c8082fc92bc72ed84eb324b4d4e8403ec4236a242781048be24c
3
+ size 959519288
model-00019-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fa273f76ef7d326836c8b8413dad84ac15a8d819fffbf308c971b4c70fe1bf2
3
+ size 985710608
model-00020-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5ec6f5f7a70f462a9d9297f35d0486061b0102ca92d77102114a8ef5274997c
3
+ size 949020896
model-00021-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f3a8c58baaca3b180eb8018d5f7b6282d6b91e3060d2346db511e8634d4624a
3
+ size 959519288
model-00022-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53dff6eeed0e0dcda18b41f20719219cbacd8bdbd3964fb7a3674781893c7711
3
+ size 959519280
model-00023-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd84d6bc4eb62bef9ca3aa7b938033af9e1ebc48c69914ad3a6d5393799bcb60
3
+ size 985710608
model-00024-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee0f7417d1fb4d69f8ba78281c69c9f9610ba75237576795e606477a87586fac
3
+ size 949020896
model-00025-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62767e0b74ae659405eb5d922750bee5338a58e7ae25d86c289ad463c126de31
3
+ size 959519288
model-00026-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f52119fe58493cdaa05c0a90e081c6550a9dcafdbe41afc8fa3ae1c631778ee
3
+ size 959519288
model-00027-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e60e5b0d127463386b397e439e5c76d58647d0aa6982ac02652b88f6a276c594
3
+ size 985710600
model-00028-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:130e7b80034de724c75135ccd545d4b747ba11c3c29800dcc076787c94fb80fe
3
+ size 949020880
model-00029-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9504c20991eebe57affe77b3959a2966f2fd5e42e5051224ba4ae7a2172cf84
3
+ size 959519264
model-00030-of-00030.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:964b781142c07a51585a02243209c3667fca48d72e967811077fcda7eb05ceff
3
+ size 408980696
model.safetensors.index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata": {"mergekit_version": "0.0.6", "total_size": 29540067328}, "weight_map": {"lm_head.weight": "model-00001-of-00030.safetensors", "model.embed_tokens.weight": "model-00002-of-00030.safetensors", "model.layers.0.input_layernorm.weight": "model-00003-of-00030.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00003-of-00030.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00003-of-00030.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00003-of-00030.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00003-of-00030.safetensors", "model.layers.0.self_attn.k_proj.bias": "model-00003-of-00030.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00003-of-00030.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00003-of-00030.safetensors", "model.layers.0.self_attn.q_proj.bias": "model-00003-of-00030.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00003-of-00030.safetensors", "model.layers.0.self_attn.v_proj.bias": "model-00003-of-00030.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00003-of-00030.safetensors", "model.layers.1.input_layernorm.weight": "model-00003-of-00030.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00003-of-00030.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00003-of-00030.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00003-of-00030.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00003-of-00030.safetensors", "model.layers.1.self_attn.k_proj.bias": "model-00003-of-00030.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00003-of-00030.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00004-of-00030.safetensors", "model.layers.1.self_attn.q_proj.bias": "model-00004-of-00030.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00004-of-00030.safetensors", "model.layers.1.self_attn.v_proj.bias": "model-00004-of-00030.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00004-of-00030.safetensors", "model.layers.10.input_layernorm.weight": "model-00004-of-00030.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00004-of-00030.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00004-of-00030.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00004-of-00030.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00004-of-00030.safetensors", "model.layers.10.self_attn.k_proj.bias": "model-00004-of-00030.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00004-of-00030.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00004-of-00030.safetensors", "model.layers.10.self_attn.q_proj.bias": "model-00004-of-00030.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00004-of-00030.safetensors", "model.layers.10.self_attn.v_proj.bias": "model-00004-of-00030.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00004-of-00030.safetensors", "model.layers.11.input_layernorm.weight": "model-00004-of-00030.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00004-of-00030.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00004-of-00030.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00005-of-00030.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00005-of-00030.safetensors", "model.layers.11.self_attn.k_proj.bias": "model-00005-of-00030.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00005-of-00030.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00005-of-00030.safetensors", "model.layers.11.self_attn.q_proj.bias": "model-00005-of-00030.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00005-of-00030.safetensors", "model.layers.11.self_attn.v_proj.bias": "model-00005-of-00030.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00005-of-00030.safetensors", "model.layers.12.input_layernorm.weight": "model-00005-of-00030.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00005-of-00030.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00005-of-00030.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00005-of-00030.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00005-of-00030.safetensors", "model.layers.12.self_attn.k_proj.bias": "model-00005-of-00030.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00005-of-00030.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00005-of-00030.safetensors", "model.layers.12.self_attn.q_proj.bias": "model-00005-of-00030.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00005-of-00030.safetensors", "model.layers.12.self_attn.v_proj.bias": "model-00005-of-00030.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00005-of-00030.safetensors", "model.layers.13.input_layernorm.weight": "model-00005-of-00030.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00005-of-00030.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00006-of-00030.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00006-of-00030.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00006-of-00030.safetensors", "model.layers.13.self_attn.k_proj.bias": "model-00006-of-00030.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00006-of-00030.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00006-of-00030.safetensors", "model.layers.13.self_attn.q_proj.bias": "model-00006-of-00030.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00006-of-00030.safetensors", "model.layers.13.self_attn.v_proj.bias": "model-00006-of-00030.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00006-of-00030.safetensors", "model.layers.14.input_layernorm.weight": "model-00006-of-00030.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00006-of-00030.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00006-of-00030.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00006-of-00030.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00006-of-00030.safetensors", "model.layers.14.self_attn.k_proj.bias": "model-00006-of-00030.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00006-of-00030.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00006-of-00030.safetensors", "model.layers.14.self_attn.q_proj.bias": "model-00006-of-00030.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00006-of-00030.safetensors", "model.layers.14.self_attn.v_proj.bias": "model-00006-of-00030.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00006-of-00030.safetensors", "model.layers.15.input_layernorm.weight": "model-00006-of-00030.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00007-of-00030.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00007-of-00030.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00007-of-00030.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00007-of-00030.safetensors", "model.layers.15.self_attn.k_proj.bias": "model-00007-of-00030.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00007-of-00030.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00007-of-00030.safetensors", "model.layers.15.self_attn.q_proj.bias": "model-00007-of-00030.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00007-of-00030.safetensors", "model.layers.15.self_attn.v_proj.bias": "model-00007-of-00030.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00007-of-00030.safetensors", "model.layers.16.input_layernorm.weight": "model-00007-of-00030.safetensors", "model.layers.16.mlp.down_proj.weight": "model-00007-of-00030.safetensors", "model.layers.16.mlp.gate_proj.weight": "model-00007-of-00030.safetensors", "model.layers.16.mlp.up_proj.weight": "model-00007-of-00030.safetensors", "model.layers.16.post_attention_layernorm.weight": "model-00007-of-00030.safetensors", "model.layers.16.self_attn.k_proj.bias": "model-00007-of-00030.safetensors", "model.layers.16.self_attn.k_proj.weight": "model-00007-of-00030.safetensors", "model.layers.16.self_attn.o_proj.weight": "model-00008-of-00030.safetensors", "model.layers.16.self_attn.q_proj.bias": "model-00008-of-00030.safetensors", "model.layers.16.self_attn.q_proj.weight": "model-00008-of-00030.safetensors", "model.layers.16.self_attn.v_proj.bias": "model-00008-of-00030.safetensors", "model.layers.16.self_attn.v_proj.weight": "model-00008-of-00030.safetensors", "model.layers.17.input_layernorm.weight": "model-00008-of-00030.safetensors", "model.layers.17.mlp.down_proj.weight": "model-00008-of-00030.safetensors", "model.layers.17.mlp.gate_proj.weight": "model-00008-of-00030.safetensors", "model.layers.17.mlp.up_proj.weight": "model-00008-of-00030.safetensors", "model.layers.17.post_attention_layernorm.weight": "model-00008-of-00030.safetensors", "model.layers.17.self_attn.k_proj.bias": "model-00008-of-00030.safetensors", "model.layers.17.self_attn.k_proj.weight": "model-00008-of-00030.safetensors", "model.layers.17.self_attn.o_proj.weight": "model-00008-of-00030.safetensors", "model.layers.17.self_attn.q_proj.bias": "model-00008-of-00030.safetensors", "model.layers.17.self_attn.q_proj.weight": "model-00008-of-00030.safetensors", "model.layers.17.self_attn.v_proj.bias": "model-00008-of-00030.safetensors", "model.layers.17.self_attn.v_proj.weight": "model-00008-of-00030.safetensors", "model.layers.18.input_layernorm.weight": "model-00008-of-00030.safetensors", "model.layers.18.mlp.down_proj.weight": "model-00008-of-00030.safetensors", "model.layers.18.mlp.gate_proj.weight": "model-00008-of-00030.safetensors", "model.layers.18.mlp.up_proj.weight": "model-00009-of-00030.safetensors", "model.layers.18.post_attention_layernorm.weight": "model-00009-of-00030.safetensors", "model.layers.18.self_attn.k_proj.bias": "model-00009-of-00030.safetensors", "model.layers.18.self_attn.k_proj.weight": "model-00009-of-00030.safetensors", "model.layers.18.self_attn.o_proj.weight": "model-00009-of-00030.safetensors", "model.layers.18.self_attn.q_proj.bias": "model-00009-of-00030.safetensors", "model.layers.18.self_attn.q_proj.weight": "model-00009-of-00030.safetensors", "model.layers.18.self_attn.v_proj.bias": "model-00009-of-00030.safetensors", "model.layers.18.self_attn.v_proj.weight": "model-00009-of-00030.safetensors", "model.layers.19.input_layernorm.weight": "model-00009-of-00030.safetensors", "model.layers.19.mlp.down_proj.weight": "model-00009-of-00030.safetensors", "model.layers.19.mlp.gate_proj.weight": "model-00009-of-00030.safetensors", "model.layers.19.mlp.up_proj.weight": "model-00009-of-00030.safetensors", "model.layers.19.post_attention_layernorm.weight": "model-00009-of-00030.safetensors", "model.layers.19.self_attn.k_proj.bias": "model-00009-of-00030.safetensors", "model.layers.19.self_attn.k_proj.weight": "model-00009-of-00030.safetensors", "model.layers.19.self_attn.o_proj.weight": "model-00009-of-00030.safetensors", "model.layers.19.self_attn.q_proj.bias": "model-00009-of-00030.safetensors", "model.layers.19.self_attn.q_proj.weight": "model-00009-of-00030.safetensors", "model.layers.19.self_attn.v_proj.bias": "model-00009-of-00030.safetensors", "model.layers.19.self_attn.v_proj.weight": "model-00009-of-00030.safetensors", "model.layers.2.input_layernorm.weight": "model-00009-of-00030.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00009-of-00030.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00010-of-00030.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00010-of-00030.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00010-of-00030.safetensors", "model.layers.2.self_attn.k_proj.bias": "model-00010-of-00030.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00010-of-00030.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00010-of-00030.safetensors", "model.layers.2.self_attn.q_proj.bias": "model-00010-of-00030.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00010-of-00030.safetensors", "model.layers.2.self_attn.v_proj.bias": "model-00010-of-00030.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00010-of-00030.safetensors", "model.layers.20.input_layernorm.weight": "model-00010-of-00030.safetensors", "model.layers.20.mlp.down_proj.weight": "model-00010-of-00030.safetensors", "model.layers.20.mlp.gate_proj.weight": "model-00010-of-00030.safetensors", "model.layers.20.mlp.up_proj.weight": "model-00010-of-00030.safetensors", "model.layers.20.post_attention_layernorm.weight": "model-00010-of-00030.safetensors", "model.layers.20.self_attn.k_proj.bias": "model-00010-of-00030.safetensors", "model.layers.20.self_attn.k_proj.weight": "model-00010-of-00030.safetensors", "model.layers.20.self_attn.o_proj.weight": "model-00010-of-00030.safetensors", "model.layers.20.self_attn.q_proj.bias": "model-00010-of-00030.safetensors", "model.layers.20.self_attn.q_proj.weight": "model-00010-of-00030.safetensors", "model.layers.20.self_attn.v_proj.bias": "model-00010-of-00030.safetensors", "model.layers.20.self_attn.v_proj.weight": "model-00010-of-00030.safetensors", "model.layers.21.input_layernorm.weight": "model-00010-of-00030.safetensors", "model.layers.21.mlp.down_proj.weight": "model-00011-of-00030.safetensors", "model.layers.21.mlp.gate_proj.weight": "model-00011-of-00030.safetensors", "model.layers.21.mlp.up_proj.weight": "model-00011-of-00030.safetensors", "model.layers.21.post_attention_layernorm.weight": "model-00011-of-00030.safetensors", "model.layers.21.self_attn.k_proj.bias": "model-00011-of-00030.safetensors", "model.layers.21.self_attn.k_proj.weight": "model-00011-of-00030.safetensors", "model.layers.21.self_attn.o_proj.weight": "model-00011-of-00030.safetensors", "model.layers.21.self_attn.q_proj.bias": "model-00011-of-00030.safetensors", "model.layers.21.self_attn.q_proj.weight": "model-00011-of-00030.safetensors", "model.layers.21.self_attn.v_proj.bias": "model-00011-of-00030.safetensors", "model.layers.21.self_attn.v_proj.weight": "model-00011-of-00030.safetensors", "model.layers.22.input_layernorm.weight": "model-00011-of-00030.safetensors", "model.layers.22.mlp.down_proj.weight": "model-00011-of-00030.safetensors", "model.layers.22.mlp.gate_proj.weight": "model-00011-of-00030.safetensors", "model.layers.22.mlp.up_proj.weight": "model-00011-of-00030.safetensors", "model.layers.22.post_attention_layernorm.weight": "model-00011-of-00030.safetensors", "model.layers.22.self_attn.k_proj.bias": "model-00011-of-00030.safetensors", "model.layers.22.self_attn.k_proj.weight": "model-00011-of-00030.safetensors", "model.layers.22.self_attn.o_proj.weight": "model-00012-of-00030.safetensors", "model.layers.22.self_attn.q_proj.bias": "model-00012-of-00030.safetensors", "model.layers.22.self_attn.q_proj.weight": "model-00012-of-00030.safetensors", "model.layers.22.self_attn.v_proj.bias": "model-00012-of-00030.safetensors", "model.layers.22.self_attn.v_proj.weight": "model-00012-of-00030.safetensors", "model.layers.23.input_layernorm.weight": "model-00012-of-00030.safetensors", "model.layers.23.mlp.down_proj.weight": "model-00012-of-00030.safetensors", "model.layers.23.mlp.gate_proj.weight": "model-00012-of-00030.safetensors", "model.layers.23.mlp.up_proj.weight": "model-00012-of-00030.safetensors", "model.layers.23.post_attention_layernorm.weight": "model-00012-of-00030.safetensors", "model.layers.23.self_attn.k_proj.bias": "model-00012-of-00030.safetensors", "model.layers.23.self_attn.k_proj.weight": "model-00012-of-00030.safetensors", "model.layers.23.self_attn.o_proj.weight": "model-00012-of-00030.safetensors", "model.layers.23.self_attn.q_proj.bias": "model-00012-of-00030.safetensors", "model.layers.23.self_attn.q_proj.weight": "model-00012-of-00030.safetensors", "model.layers.23.self_attn.v_proj.bias": "model-00012-of-00030.safetensors", "model.layers.23.self_attn.v_proj.weight": "model-00012-of-00030.safetensors", "model.layers.24.input_layernorm.weight": "model-00012-of-00030.safetensors", "model.layers.24.mlp.down_proj.weight": "model-00012-of-00030.safetensors", "model.layers.24.mlp.gate_proj.weight": "model-00012-of-00030.safetensors", "model.layers.24.mlp.up_proj.weight": "model-00013-of-00030.safetensors", "model.layers.24.post_attention_layernorm.weight": "model-00013-of-00030.safetensors", "model.layers.24.self_attn.k_proj.bias": "model-00013-of-00030.safetensors", "model.layers.24.self_attn.k_proj.weight": "model-00013-of-00030.safetensors", "model.layers.24.self_attn.o_proj.weight": "model-00013-of-00030.safetensors", "model.layers.24.self_attn.q_proj.bias": "model-00013-of-00030.safetensors", "model.layers.24.self_attn.q_proj.weight": "model-00013-of-00030.safetensors", "model.layers.24.self_attn.v_proj.bias": "model-00013-of-00030.safetensors", "model.layers.24.self_attn.v_proj.weight": "model-00013-of-00030.safetensors", "model.layers.25.input_layernorm.weight": "model-00013-of-00030.safetensors", "model.layers.25.mlp.down_proj.weight": "model-00013-of-00030.safetensors", "model.layers.25.mlp.gate_proj.weight": "model-00013-of-00030.safetensors", "model.layers.25.mlp.up_proj.weight": "model-00013-of-00030.safetensors", "model.layers.25.post_attention_layernorm.weight": "model-00013-of-00030.safetensors", "model.layers.25.self_attn.k_proj.bias": "model-00013-of-00030.safetensors", "model.layers.25.self_attn.k_proj.weight": "model-00013-of-00030.safetensors", "model.layers.25.self_attn.o_proj.weight": "model-00013-of-00030.safetensors", "model.layers.25.self_attn.q_proj.bias": "model-00013-of-00030.safetensors", "model.layers.25.self_attn.q_proj.weight": "model-00013-of-00030.safetensors", "model.layers.25.self_attn.v_proj.bias": "model-00013-of-00030.safetensors", "model.layers.25.self_attn.v_proj.weight": "model-00013-of-00030.safetensors", "model.layers.26.input_layernorm.weight": "model-00013-of-00030.safetensors", "model.layers.26.mlp.down_proj.weight": "model-00013-of-00030.safetensors", "model.layers.26.mlp.gate_proj.weight": "model-00014-of-00030.safetensors", "model.layers.26.mlp.up_proj.weight": "model-00014-of-00030.safetensors", "model.layers.26.post_attention_layernorm.weight": "model-00014-of-00030.safetensors", "model.layers.26.self_attn.k_proj.bias": "model-00014-of-00030.safetensors", "model.layers.26.self_attn.k_proj.weight": "model-00014-of-00030.safetensors", "model.layers.26.self_attn.o_proj.weight": "model-00014-of-00030.safetensors", "model.layers.26.self_attn.q_proj.bias": "model-00014-of-00030.safetensors", "model.layers.26.self_attn.q_proj.weight": "model-00014-of-00030.safetensors", "model.layers.26.self_attn.v_proj.bias": "model-00014-of-00030.safetensors", "model.layers.26.self_attn.v_proj.weight": "model-00014-of-00030.safetensors", "model.layers.27.input_layernorm.weight": "model-00014-of-00030.safetensors", "model.layers.27.mlp.down_proj.weight": "model-00014-of-00030.safetensors", "model.layers.27.mlp.gate_proj.weight": "model-00014-of-00030.safetensors", "model.layers.27.mlp.up_proj.weight": "model-00014-of-00030.safetensors", "model.layers.27.post_attention_layernorm.weight": "model-00014-of-00030.safetensors", "model.layers.27.self_attn.k_proj.bias": "model-00014-of-00030.safetensors", "model.layers.27.self_attn.k_proj.weight": "model-00014-of-00030.safetensors", "model.layers.27.self_attn.o_proj.weight": "model-00014-of-00030.safetensors", "model.layers.27.self_attn.q_proj.bias": "model-00014-of-00030.safetensors", "model.layers.27.self_attn.q_proj.weight": "model-00014-of-00030.safetensors", "model.layers.27.self_attn.v_proj.bias": "model-00014-of-00030.safetensors", "model.layers.27.self_attn.v_proj.weight": "model-00014-of-00030.safetensors", "model.layers.28.input_layernorm.weight": "model-00014-of-00030.safetensors", "model.layers.28.mlp.down_proj.weight": "model-00015-of-00030.safetensors", "model.layers.28.mlp.gate_proj.weight": "model-00015-of-00030.safetensors", "model.layers.28.mlp.up_proj.weight": "model-00015-of-00030.safetensors", "model.layers.28.post_attention_layernorm.weight": "model-00015-of-00030.safetensors", "model.layers.28.self_attn.k_proj.bias": "model-00015-of-00030.safetensors", "model.layers.28.self_attn.k_proj.weight": "model-00015-of-00030.safetensors", "model.layers.28.self_attn.o_proj.weight": "model-00015-of-00030.safetensors", "model.layers.28.self_attn.q_proj.bias": "model-00015-of-00030.safetensors", "model.layers.28.self_attn.q_proj.weight": "model-00015-of-00030.safetensors", "model.layers.28.self_attn.v_proj.bias": "model-00015-of-00030.safetensors", "model.layers.28.self_attn.v_proj.weight": "model-00015-of-00030.safetensors", "model.layers.29.input_layernorm.weight": "model-00015-of-00030.safetensors", "model.layers.29.mlp.down_proj.weight": "model-00015-of-00030.safetensors", "model.layers.29.mlp.gate_proj.weight": "model-00015-of-00030.safetensors", "model.layers.29.mlp.up_proj.weight": "model-00015-of-00030.safetensors", "model.layers.29.post_attention_layernorm.weight": "model-00015-of-00030.safetensors", "model.layers.29.self_attn.k_proj.bias": "model-00015-of-00030.safetensors", "model.layers.29.self_attn.k_proj.weight": "model-00015-of-00030.safetensors", "model.layers.29.self_attn.o_proj.weight": "model-00016-of-00030.safetensors", "model.layers.29.self_attn.q_proj.bias": "model-00016-of-00030.safetensors", "model.layers.29.self_attn.q_proj.weight": "model-00016-of-00030.safetensors", "model.layers.29.self_attn.v_proj.bias": "model-00016-of-00030.safetensors", "model.layers.29.self_attn.v_proj.weight": "model-00016-of-00030.safetensors", "model.layers.3.input_layernorm.weight": "model-00016-of-00030.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00016-of-00030.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00016-of-00030.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00016-of-00030.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00016-of-00030.safetensors", "model.layers.3.self_attn.k_proj.bias": "model-00016-of-00030.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00016-of-00030.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00016-of-00030.safetensors", "model.layers.3.self_attn.q_proj.bias": "model-00016-of-00030.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00016-of-00030.safetensors", "model.layers.3.self_attn.v_proj.bias": "model-00016-of-00030.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00016-of-00030.safetensors", "model.layers.30.input_layernorm.weight": "model-00016-of-00030.safetensors", "model.layers.30.mlp.down_proj.weight": "model-00016-of-00030.safetensors", "model.layers.30.mlp.gate_proj.weight": "model-00016-of-00030.safetensors", "model.layers.30.mlp.up_proj.weight": "model-00017-of-00030.safetensors", "model.layers.30.post_attention_layernorm.weight": "model-00017-of-00030.safetensors", "model.layers.30.self_attn.k_proj.bias": "model-00017-of-00030.safetensors", "model.layers.30.self_attn.k_proj.weight": "model-00017-of-00030.safetensors", "model.layers.30.self_attn.o_proj.weight": "model-00017-of-00030.safetensors", "model.layers.30.self_attn.q_proj.bias": "model-00017-of-00030.safetensors", "model.layers.30.self_attn.q_proj.weight": "model-00017-of-00030.safetensors", "model.layers.30.self_attn.v_proj.bias": "model-00017-of-00030.safetensors", "model.layers.30.self_attn.v_proj.weight": "model-00017-of-00030.safetensors", "model.layers.31.input_layernorm.weight": "model-00017-of-00030.safetensors", "model.layers.31.mlp.down_proj.weight": "model-00017-of-00030.safetensors", "model.layers.31.mlp.gate_proj.weight": "model-00017-of-00030.safetensors", "model.layers.31.mlp.up_proj.weight": "model-00017-of-00030.safetensors", "model.layers.31.post_attention_layernorm.weight": "model-00017-of-00030.safetensors", "model.layers.31.self_attn.k_proj.bias": "model-00017-of-00030.safetensors", "model.layers.31.self_attn.k_proj.weight": "model-00017-of-00030.safetensors", "model.layers.31.self_attn.o_proj.weight": "model-00017-of-00030.safetensors", "model.layers.31.self_attn.q_proj.bias": "model-00017-of-00030.safetensors", "model.layers.31.self_attn.q_proj.weight": "model-00017-of-00030.safetensors", "model.layers.31.self_attn.v_proj.bias": "model-00017-of-00030.safetensors", "model.layers.31.self_attn.v_proj.weight": "model-00017-of-00030.safetensors", "model.layers.32.input_layernorm.weight": "model-00017-of-00030.safetensors", "model.layers.32.mlp.down_proj.weight": "model-00017-of-00030.safetensors", "model.layers.32.mlp.gate_proj.weight": "model-00018-of-00030.safetensors", "model.layers.32.mlp.up_proj.weight": "model-00018-of-00030.safetensors", "model.layers.32.post_attention_layernorm.weight": "model-00018-of-00030.safetensors", "model.layers.32.self_attn.k_proj.bias": "model-00018-of-00030.safetensors", "model.layers.32.self_attn.k_proj.weight": "model-00018-of-00030.safetensors", "model.layers.32.self_attn.o_proj.weight": "model-00018-of-00030.safetensors", "model.layers.32.self_attn.q_proj.bias": "model-00018-of-00030.safetensors", "model.layers.32.self_attn.q_proj.weight": "model-00018-of-00030.safetensors", "model.layers.32.self_attn.v_proj.bias": "model-00018-of-00030.safetensors", "model.layers.32.self_attn.v_proj.weight": "model-00018-of-00030.safetensors", "model.layers.33.input_layernorm.weight": "model-00018-of-00030.safetensors", "model.layers.33.mlp.down_proj.weight": "model-00018-of-00030.safetensors", "model.layers.33.mlp.gate_proj.weight": "model-00018-of-00030.safetensors", "model.layers.33.mlp.up_proj.weight": "model-00018-of-00030.safetensors", "model.layers.33.post_attention_layernorm.weight": "model-00018-of-00030.safetensors", "model.layers.33.self_attn.k_proj.bias": "model-00018-of-00030.safetensors", "model.layers.33.self_attn.k_proj.weight": "model-00018-of-00030.safetensors", "model.layers.33.self_attn.o_proj.weight": "model-00018-of-00030.safetensors", "model.layers.33.self_attn.q_proj.bias": "model-00018-of-00030.safetensors", "model.layers.33.self_attn.q_proj.weight": "model-00018-of-00030.safetensors", "model.layers.33.self_attn.v_proj.bias": "model-00018-of-00030.safetensors", "model.layers.33.self_attn.v_proj.weight": "model-00018-of-00030.safetensors", "model.layers.34.input_layernorm.weight": "model-00018-of-00030.safetensors", "model.layers.34.mlp.down_proj.weight": "model-00019-of-00030.safetensors", "model.layers.34.mlp.gate_proj.weight": "model-00019-of-00030.safetensors", "model.layers.34.mlp.up_proj.weight": "model-00019-of-00030.safetensors", "model.layers.34.post_attention_layernorm.weight": "model-00019-of-00030.safetensors", "model.layers.34.self_attn.k_proj.bias": "model-00019-of-00030.safetensors", "model.layers.34.self_attn.k_proj.weight": "model-00019-of-00030.safetensors", "model.layers.34.self_attn.o_proj.weight": "model-00019-of-00030.safetensors", "model.layers.34.self_attn.q_proj.bias": "model-00019-of-00030.safetensors", "model.layers.34.self_attn.q_proj.weight": "model-00019-of-00030.safetensors", "model.layers.34.self_attn.v_proj.bias": "model-00019-of-00030.safetensors", "model.layers.34.self_attn.v_proj.weight": "model-00019-of-00030.safetensors", "model.layers.35.input_layernorm.weight": "model-00019-of-00030.safetensors", "model.layers.35.mlp.down_proj.weight": "model-00019-of-00030.safetensors", "model.layers.35.mlp.gate_proj.weight": "model-00019-of-00030.safetensors", "model.layers.35.mlp.up_proj.weight": "model-00019-of-00030.safetensors", "model.layers.35.post_attention_layernorm.weight": "model-00019-of-00030.safetensors", "model.layers.35.self_attn.k_proj.bias": "model-00019-of-00030.safetensors", "model.layers.35.self_attn.k_proj.weight": "model-00019-of-00030.safetensors", "model.layers.35.self_attn.o_proj.weight": "model-00020-of-00030.safetensors", "model.layers.35.self_attn.q_proj.bias": "model-00020-of-00030.safetensors", "model.layers.35.self_attn.q_proj.weight": "model-00020-of-00030.safetensors", "model.layers.35.self_attn.v_proj.bias": "model-00020-of-00030.safetensors", "model.layers.35.self_attn.v_proj.weight": "model-00020-of-00030.safetensors", "model.layers.36.input_layernorm.weight": "model-00020-of-00030.safetensors", "model.layers.36.mlp.down_proj.weight": "model-00020-of-00030.safetensors", "model.layers.36.mlp.gate_proj.weight": "model-00020-of-00030.safetensors", "model.layers.36.mlp.up_proj.weight": "model-00020-of-00030.safetensors", "model.layers.36.post_attention_layernorm.weight": "model-00020-of-00030.safetensors", "model.layers.36.self_attn.k_proj.bias": "model-00020-of-00030.safetensors", "model.layers.36.self_attn.k_proj.weight": "model-00020-of-00030.safetensors", "model.layers.36.self_attn.o_proj.weight": "model-00020-of-00030.safetensors", "model.layers.36.self_attn.q_proj.bias": "model-00020-of-00030.safetensors", "model.layers.36.self_attn.q_proj.weight": "model-00020-of-00030.safetensors", "model.layers.36.self_attn.v_proj.bias": "model-00020-of-00030.safetensors", "model.layers.36.self_attn.v_proj.weight": "model-00020-of-00030.safetensors", "model.layers.37.input_layernorm.weight": "model-00020-of-00030.safetensors", "model.layers.37.mlp.down_proj.weight": "model-00020-of-00030.safetensors", "model.layers.37.mlp.gate_proj.weight": "model-00020-of-00030.safetensors", "model.layers.37.mlp.up_proj.weight": "model-00021-of-00030.safetensors", "model.layers.37.post_attention_layernorm.weight": "model-00021-of-00030.safetensors", "model.layers.37.self_attn.k_proj.bias": "model-00021-of-00030.safetensors", "model.layers.37.self_attn.k_proj.weight": "model-00021-of-00030.safetensors", "model.layers.37.self_attn.o_proj.weight": "model-00021-of-00030.safetensors", "model.layers.37.self_attn.q_proj.bias": "model-00021-of-00030.safetensors", "model.layers.37.self_attn.q_proj.weight": "model-00021-of-00030.safetensors", "model.layers.37.self_attn.v_proj.bias": "model-00021-of-00030.safetensors", "model.layers.37.self_attn.v_proj.weight": "model-00021-of-00030.safetensors", "model.layers.38.input_layernorm.weight": "model-00021-of-00030.safetensors", "model.layers.38.mlp.down_proj.weight": "model-00021-of-00030.safetensors", "model.layers.38.mlp.gate_proj.weight": "model-00021-of-00030.safetensors", "model.layers.38.mlp.up_proj.weight": "model-00021-of-00030.safetensors", "model.layers.38.post_attention_layernorm.weight": "model-00021-of-00030.safetensors", "model.layers.38.self_attn.k_proj.bias": "model-00021-of-00030.safetensors", "model.layers.38.self_attn.k_proj.weight": "model-00021-of-00030.safetensors", "model.layers.38.self_attn.o_proj.weight": "model-00021-of-00030.safetensors", "model.layers.38.self_attn.q_proj.bias": "model-00021-of-00030.safetensors", "model.layers.38.self_attn.q_proj.weight": "model-00021-of-00030.safetensors", "model.layers.38.self_attn.v_proj.bias": "model-00021-of-00030.safetensors", "model.layers.38.self_attn.v_proj.weight": "model-00021-of-00030.safetensors", "model.layers.39.input_layernorm.weight": "model-00021-of-00030.safetensors", "model.layers.39.mlp.down_proj.weight": "model-00021-of-00030.safetensors", "model.layers.39.mlp.gate_proj.weight": "model-00022-of-00030.safetensors", "model.layers.39.mlp.up_proj.weight": "model-00022-of-00030.safetensors", "model.layers.39.post_attention_layernorm.weight": "model-00022-of-00030.safetensors", "model.layers.39.self_attn.k_proj.bias": "model-00022-of-00030.safetensors", "model.layers.39.self_attn.k_proj.weight": "model-00022-of-00030.safetensors", "model.layers.39.self_attn.o_proj.weight": "model-00022-of-00030.safetensors", "model.layers.39.self_attn.q_proj.bias": "model-00022-of-00030.safetensors", "model.layers.39.self_attn.q_proj.weight": "model-00022-of-00030.safetensors", "model.layers.39.self_attn.v_proj.bias": "model-00022-of-00030.safetensors", "model.layers.39.self_attn.v_proj.weight": "model-00022-of-00030.safetensors", "model.layers.4.input_layernorm.weight": "model-00022-of-00030.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00022-of-00030.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00022-of-00030.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00022-of-00030.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00022-of-00030.safetensors", "model.layers.4.self_attn.k_proj.bias": "model-00022-of-00030.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00022-of-00030.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00022-of-00030.safetensors", "model.layers.4.self_attn.q_proj.bias": "model-00022-of-00030.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00022-of-00030.safetensors", "model.layers.4.self_attn.v_proj.bias": "model-00022-of-00030.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00022-of-00030.safetensors", "model.layers.40.input_layernorm.weight": "model-00022-of-00030.safetensors", "model.layers.40.mlp.down_proj.weight": "model-00023-of-00030.safetensors", "model.layers.40.mlp.gate_proj.weight": "model-00023-of-00030.safetensors", "model.layers.40.mlp.up_proj.weight": "model-00023-of-00030.safetensors", "model.layers.40.post_attention_layernorm.weight": "model-00023-of-00030.safetensors", "model.layers.40.self_attn.k_proj.bias": "model-00023-of-00030.safetensors", "model.layers.40.self_attn.k_proj.weight": "model-00023-of-00030.safetensors", "model.layers.40.self_attn.o_proj.weight": "model-00023-of-00030.safetensors", "model.layers.40.self_attn.q_proj.bias": "model-00023-of-00030.safetensors", "model.layers.40.self_attn.q_proj.weight": "model-00023-of-00030.safetensors", "model.layers.40.self_attn.v_proj.bias": "model-00023-of-00030.safetensors", "model.layers.40.self_attn.v_proj.weight": "model-00023-of-00030.safetensors", "model.layers.41.input_layernorm.weight": "model-00023-of-00030.safetensors", "model.layers.41.mlp.down_proj.weight": "model-00023-of-00030.safetensors", "model.layers.41.mlp.gate_proj.weight": "model-00023-of-00030.safetensors", "model.layers.41.mlp.up_proj.weight": "model-00023-of-00030.safetensors", "model.layers.41.post_attention_layernorm.weight": "model-00023-of-00030.safetensors", "model.layers.41.self_attn.k_proj.bias": "model-00023-of-00030.safetensors", "model.layers.41.self_attn.k_proj.weight": "model-00023-of-00030.safetensors", "model.layers.41.self_attn.o_proj.weight": "model-00024-of-00030.safetensors", "model.layers.41.self_attn.q_proj.bias": "model-00024-of-00030.safetensors", "model.layers.41.self_attn.q_proj.weight": "model-00024-of-00030.safetensors", "model.layers.41.self_attn.v_proj.bias": "model-00024-of-00030.safetensors", "model.layers.41.self_attn.v_proj.weight": "model-00024-of-00030.safetensors", "model.layers.42.input_layernorm.weight": "model-00024-of-00030.safetensors", "model.layers.42.mlp.down_proj.weight": "model-00024-of-00030.safetensors", "model.layers.42.mlp.gate_proj.weight": "model-00024-of-00030.safetensors", "model.layers.42.mlp.up_proj.weight": "model-00024-of-00030.safetensors", "model.layers.42.post_attention_layernorm.weight": "model-00024-of-00030.safetensors", "model.layers.42.self_attn.k_proj.bias": "model-00024-of-00030.safetensors", "model.layers.42.self_attn.k_proj.weight": "model-00024-of-00030.safetensors", "model.layers.42.self_attn.o_proj.weight": "model-00024-of-00030.safetensors", "model.layers.42.self_attn.q_proj.bias": "model-00024-of-00030.safetensors", "model.layers.42.self_attn.q_proj.weight": "model-00024-of-00030.safetensors", "model.layers.42.self_attn.v_proj.bias": "model-00024-of-00030.safetensors", "model.layers.42.self_attn.v_proj.weight": "model-00024-of-00030.safetensors", "model.layers.43.input_layernorm.weight": "model-00024-of-00030.safetensors", "model.layers.43.mlp.down_proj.weight": "model-00024-of-00030.safetensors", "model.layers.43.mlp.gate_proj.weight": "model-00024-of-00030.safetensors", "model.layers.43.mlp.up_proj.weight": "model-00025-of-00030.safetensors", "model.layers.43.post_attention_layernorm.weight": "model-00025-of-00030.safetensors", "model.layers.43.self_attn.k_proj.bias": "model-00025-of-00030.safetensors", "model.layers.43.self_attn.k_proj.weight": "model-00025-of-00030.safetensors", "model.layers.43.self_attn.o_proj.weight": "model-00025-of-00030.safetensors", "model.layers.43.self_attn.q_proj.bias": "model-00025-of-00030.safetensors", "model.layers.43.self_attn.q_proj.weight": "model-00025-of-00030.safetensors", "model.layers.43.self_attn.v_proj.bias": "model-00025-of-00030.safetensors", "model.layers.43.self_attn.v_proj.weight": "model-00025-of-00030.safetensors", "model.layers.44.input_layernorm.weight": "model-00025-of-00030.safetensors", "model.layers.44.mlp.down_proj.weight": "model-00025-of-00030.safetensors", "model.layers.44.mlp.gate_proj.weight": "model-00025-of-00030.safetensors", "model.layers.44.mlp.up_proj.weight": "model-00025-of-00030.safetensors", "model.layers.44.post_attention_layernorm.weight": "model-00025-of-00030.safetensors", "model.layers.44.self_attn.k_proj.bias": "model-00025-of-00030.safetensors", "model.layers.44.self_attn.k_proj.weight": "model-00025-of-00030.safetensors", "model.layers.44.self_attn.o_proj.weight": "model-00025-of-00030.safetensors", "model.layers.44.self_attn.q_proj.bias": "model-00025-of-00030.safetensors", "model.layers.44.self_attn.q_proj.weight": "model-00025-of-00030.safetensors", "model.layers.44.self_attn.v_proj.bias": "model-00025-of-00030.safetensors", "model.layers.44.self_attn.v_proj.weight": "model-00025-of-00030.safetensors", "model.layers.45.input_layernorm.weight": "model-00025-of-00030.safetensors", "model.layers.45.mlp.down_proj.weight": "model-00025-of-00030.safetensors", "model.layers.45.mlp.gate_proj.weight": "model-00026-of-00030.safetensors", "model.layers.45.mlp.up_proj.weight": "model-00026-of-00030.safetensors", "model.layers.45.post_attention_layernorm.weight": "model-00026-of-00030.safetensors", "model.layers.45.self_attn.k_proj.bias": "model-00026-of-00030.safetensors", "model.layers.45.self_attn.k_proj.weight": "model-00026-of-00030.safetensors", "model.layers.45.self_attn.o_proj.weight": "model-00026-of-00030.safetensors", "model.layers.45.self_attn.q_proj.bias": "model-00026-of-00030.safetensors", "model.layers.45.self_attn.q_proj.weight": "model-00026-of-00030.safetensors", "model.layers.45.self_attn.v_proj.bias": "model-00026-of-00030.safetensors", "model.layers.45.self_attn.v_proj.weight": "model-00026-of-00030.safetensors", "model.layers.46.input_layernorm.weight": "model-00026-of-00030.safetensors", "model.layers.46.mlp.down_proj.weight": "model-00026-of-00030.safetensors", "model.layers.46.mlp.gate_proj.weight": "model-00026-of-00030.safetensors", "model.layers.46.mlp.up_proj.weight": "model-00026-of-00030.safetensors", "model.layers.46.post_attention_layernorm.weight": "model-00026-of-00030.safetensors", "model.layers.46.self_attn.k_proj.bias": "model-00026-of-00030.safetensors", "model.layers.46.self_attn.k_proj.weight": "model-00026-of-00030.safetensors", "model.layers.46.self_attn.o_proj.weight": "model-00026-of-00030.safetensors", "model.layers.46.self_attn.q_proj.bias": "model-00026-of-00030.safetensors", "model.layers.46.self_attn.q_proj.weight": "model-00026-of-00030.safetensors", "model.layers.46.self_attn.v_proj.bias": "model-00026-of-00030.safetensors", "model.layers.46.self_attn.v_proj.weight": "model-00026-of-00030.safetensors", "model.layers.47.input_layernorm.weight": "model-00026-of-00030.safetensors", "model.layers.47.mlp.down_proj.weight": "model-00027-of-00030.safetensors", "model.layers.47.mlp.gate_proj.weight": "model-00027-of-00030.safetensors", "model.layers.47.mlp.up_proj.weight": "model-00027-of-00030.safetensors", "model.layers.47.post_attention_layernorm.weight": "model-00027-of-00030.safetensors", "model.layers.47.self_attn.k_proj.bias": "model-00027-of-00030.safetensors", "model.layers.47.self_attn.k_proj.weight": "model-00027-of-00030.safetensors", "model.layers.47.self_attn.o_proj.weight": "model-00027-of-00030.safetensors", "model.layers.47.self_attn.q_proj.bias": "model-00027-of-00030.safetensors", "model.layers.47.self_attn.q_proj.weight": "model-00027-of-00030.safetensors", "model.layers.47.self_attn.v_proj.bias": "model-00027-of-00030.safetensors", "model.layers.47.self_attn.v_proj.weight": "model-00027-of-00030.safetensors", "model.layers.5.input_layernorm.weight": "model-00027-of-00030.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00027-of-00030.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00027-of-00030.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00027-of-00030.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00027-of-00030.safetensors", "model.layers.5.self_attn.k_proj.bias": "model-00027-of-00030.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00027-of-00030.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00028-of-00030.safetensors", "model.layers.5.self_attn.q_proj.bias": "model-00028-of-00030.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00028-of-00030.safetensors", "model.layers.5.self_attn.v_proj.bias": "model-00028-of-00030.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00028-of-00030.safetensors", "model.layers.6.input_layernorm.weight": "model-00028-of-00030.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00028-of-00030.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00028-of-00030.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00028-of-00030.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00028-of-00030.safetensors", "model.layers.6.self_attn.k_proj.bias": "model-00028-of-00030.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00028-of-00030.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00028-of-00030.safetensors", "model.layers.6.self_attn.q_proj.bias": "model-00028-of-00030.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00028-of-00030.safetensors", "model.layers.6.self_attn.v_proj.bias": "model-00028-of-00030.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00028-of-00030.safetensors", "model.layers.7.input_layernorm.weight": "model-00028-of-00030.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00028-of-00030.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00028-of-00030.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00029-of-00030.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00029-of-00030.safetensors", "model.layers.7.self_attn.k_proj.bias": "model-00029-of-00030.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00029-of-00030.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00029-of-00030.safetensors", "model.layers.7.self_attn.q_proj.bias": "model-00029-of-00030.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00029-of-00030.safetensors", "model.layers.7.self_attn.v_proj.bias": "model-00029-of-00030.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00029-of-00030.safetensors", "model.layers.8.input_layernorm.weight": "model-00029-of-00030.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00029-of-00030.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00029-of-00030.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00029-of-00030.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00029-of-00030.safetensors", "model.layers.8.self_attn.k_proj.bias": "model-00029-of-00030.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00029-of-00030.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00029-of-00030.safetensors", "model.layers.8.self_attn.q_proj.bias": "model-00029-of-00030.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00029-of-00030.safetensors", "model.layers.8.self_attn.v_proj.bias": "model-00029-of-00030.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00029-of-00030.safetensors", "model.layers.9.input_layernorm.weight": "model-00029-of-00030.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00029-of-00030.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00030-of-00030.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00030-of-00030.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00030-of-00030.safetensors", "model.layers.9.self_attn.k_proj.bias": "model-00030-of-00030.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00030-of-00030.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00030-of-00030.safetensors", "model.layers.9.self_attn.q_proj.bias": "model-00030-of-00030.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00030-of-00030.safetensors", "model.layers.9.self_attn.v_proj.bias": "model-00030-of-00030.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00030-of-00030.safetensors", "model.norm.weight": "model-00030-of-00030.safetensors"}}