diff --git a/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/config.json b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/config.json new file mode 100755 index 0000000000000000000000000000000000000000..68e39d8743a9a1b3cee53ea380bcaad7e073a96b --- /dev/null +++ b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/config.json @@ -0,0 +1,97 @@ +{ + "architectures": [ + "LlavaInterleaveForConditionalGeneration" + ], + "btadapter": true, + "btadapter_depth": 4, + "clip_post_pretrain": null, + "clip_weight": "/group/40043/ruyangliu/weight/CLIP-L334", + "frame_shape": [ + 24, + 24 + ], + "hidden_size": 4096, + "ignore_index": -100, + "image_grid_pinpoints": [ + [ + 336, + 672 + ], + [ + 672, + 336 + ], + [ + 672, + 672 + ], + [ + 1008, + 336 + ], + [ + 336, + 1008 + ] + ], + "image_pooling_kernel": [ + 1, + 3, + 3 + ], + "image_pooling_stride": [ + 1, + 3, + 3 + ], + "image_token_index": 32000, + "long_clip": true, + "max_T": 64, + "model_type": "llava_next", + "pad_token_id": 0, + "pllava_pooling_shape": null, + "pooling": "clipST_3d", + "pooling_kernel": [ + 2, + 3, + 3 + ], + "pooling_stride": [ + 2, + 3, + 3 + ], + "pooling_temp": 0.01, + "projector_hidden_act": "gelu", + "text_config": { + "_name_or_path": "lmsys/vicuna-7b-v1.5", + "architectures": [ + "LlamaForCausalLM" + ], + "max_position_embeddings": 4096, + "model_type": "llama", + "pad_token_id": 0, + "rms_norm_eps": 1e-05, + "torch_dtype": "float16", + "vocab_size": 32064 + }, + "torch_dtype": "bfloat16", + "transformers_version": "4.40.0", + "use_image_newline_parameter": true, + "vision_config": { + "depth": 4, + "hidden_size": 1024, + "image_size": 336, + "intermediate_size": 4096, + "max_T": 64, + "model_type": "clip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 24, + "patch_size": 14, + "projection_dim": 768, + "vocab_size": 32000 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "default", + "vocab_size": 32064 +} diff --git a/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/generation_config.json b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/generation_config.json new file mode 100755 index 0000000000000000000000000000000000000000..49983f9ee9df788697993efc7d1101054be4ce03 --- /dev/null +++ b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/generation_config.json @@ -0,0 +1,7 @@ +{ + "_from_model_config": true, + "bos_token_id": 1, + "eos_token_id": 2, + "pad_token_id": 0, + "transformers_version": "4.40.0" +} diff --git a/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/model-00001-of-00003.safetensors b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/model-00001-of-00003.safetensors new file mode 100755 index 0000000000000000000000000000000000000000..73e19aeedf11f395de41ffcf9c67b1284569082f --- /dev/null +++ b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/model-00001-of-00003.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:badbca0f8b6f276fcf9f0b6f35ad29370715f5d9e5bde69804ab78457e611ced +size 4980641554 diff --git a/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/model-00002-of-00003.safetensors b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/model-00002-of-00003.safetensors new file mode 100755 index 0000000000000000000000000000000000000000..0dd14cc54137ebb0f04fb4a229cf41edccdafbf3 --- /dev/null +++ b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/model-00002-of-00003.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c486b79a3c76b9a43cfd068b592bfc88cfac2e5564d2552c55b24b518581eb0d +size 4957878536 diff --git a/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/model-00003-of-00003.safetensors b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/model-00003-of-00003.safetensors new file mode 100755 index 0000000000000000000000000000000000000000..cbbff778b472f76d1b60b7925f56ba51fd080948 --- /dev/null +++ b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/model-00003-of-00003.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4df79bed0e8f9f55e5457c7bc44022a71d5a4d7f5e4402b4da25a356401f2cf +size 4580905400 diff --git a/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/model.safetensors.index.json b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/model.safetensors.index.json new file mode 100755 index 0000000000000000000000000000000000000000..f63131239adc196fb1c06e1379ca59940dc5ae1b --- /dev/null +++ b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/model.safetensors.index.json @@ -0,0 +1,1010 @@ +{ + "metadata": { + "total_size": 14519286786 + }, + "weight_map": { + "image_newline": "model-00001-of-00003.safetensors", + "language_model.lm_head.weight": "model-00003-of-00003.safetensors", + "language_model.model.embed_tokens.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.21.input_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.21.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.21.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.21.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.21.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.22.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.22.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.22.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.22.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.9.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.9.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "language_model.model.layers.9.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "language_model.model.norm.weight": "model-00003-of-00003.safetensors", + "multi_modal_projector.clip_text_model.embeddings.position_embedding": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.embeddings.position_embedding_res": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.embeddings.token_embedding.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.layer_norm1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.layer_norm1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.layer_norm2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.layer_norm2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.layer_norm1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.layer_norm1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.layer_norm2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.layer_norm2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.layer_norm1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.layer_norm1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.layer_norm2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.layer_norm2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.layer_norm1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.layer_norm1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.layer_norm2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.layer_norm2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.layer_norm1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.layer_norm1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.layer_norm2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.layer_norm2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.layer_norm1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.layer_norm1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.layer_norm2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.layer_norm2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.layer_norm1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.layer_norm1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.layer_norm2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.layer_norm2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.layer_norm1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.layer_norm1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.layer_norm2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.layer_norm2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.layer_norm1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.layer_norm1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.layer_norm2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.layer_norm2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.layer_norm1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.layer_norm1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.layer_norm2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.layer_norm2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.layer_norm1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.layer_norm1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.layer_norm2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.layer_norm2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.layer_norm1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.layer_norm1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.layer_norm2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.layer_norm2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.final_layer_norm.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_model.final_layer_norm.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_text_projection.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_visual_ln.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_visual_ln.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.clip_visual_projection.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.linear_1.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.linear_1.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.linear_2.bias": "model-00001-of-00003.safetensors", + "multi_modal_projector.linear_2.weight": "model-00001-of-00003.safetensors", + "multi_modal_projector.logit_scale": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_S_layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.0.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.0.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.0.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.0.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.0.temporal_fc.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.0.temporal_fc.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.1.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.1.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.1.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.1.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.1.temporal_fc.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.1.temporal_fc.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.2.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.2.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.2.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.2.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.2.temporal_fc.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.2.temporal_fc.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.3.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.3.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.3.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.3.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.3.temporal_fc.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_T_layers.3.temporal_fc.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_cls_token": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.btadapter_time_embed.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.embeddings.class_embedding": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.embeddings.position_embedding.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.post_layernorm.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.post_layernorm.weight": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.pre_layrnorm.bias": "model-00001-of-00003.safetensors", + "vision_tower.vision_model.pre_layrnorm.weight": "model-00001-of-00003.safetensors" + } +} diff --git a/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/trainer_state.json b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/trainer_state.json new file mode 100755 index 0000000000000000000000000000000000000000..bf7f3731cbb2cb816010a298f9aed627f205f667 --- /dev/null +++ b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/trainer_state.json @@ -0,0 +1,422 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.9999115122555526, + "eval_steps": 500, + "global_step": 2825, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.017697548889478807, + "grad_norm": 1.3246452072458292, + "learning_rate": 1.1764705882352942e-05, + "loss": 1.3482, + "step": 50 + }, + { + "epoch": 0.035395097778957614, + "grad_norm": 1.184986868355016, + "learning_rate": 1.9998521094455198e-05, + "loss": 1.0655, + "step": 100 + }, + { + "epoch": 0.053092646668436425, + "grad_norm": 0.9956602761532123, + "learning_rate": 1.9972241607451552e-05, + "loss": 1.0325, + "step": 150 + }, + { + "epoch": 0.07079019555791523, + "grad_norm": 0.8985264278021503, + "learning_rate": 1.9913196946839304e-05, + "loss": 1.0115, + "step": 200 + }, + { + "epoch": 0.08848774444739403, + "grad_norm": 0.9761536009178263, + "learning_rate": 1.9821581111985072e-05, + "loss": 1.0087, + "step": 250 + }, + { + "epoch": 0.10618529333687285, + "grad_norm": 0.9582085355410745, + "learning_rate": 1.9697695119335547e-05, + "loss": 0.9959, + "step": 300 + }, + { + "epoch": 0.12388284222635165, + "grad_norm": 0.9502507945345694, + "learning_rate": 1.954194601338651e-05, + "loss": 0.9832, + "step": 350 + }, + { + "epoch": 0.14158039111583046, + "grad_norm": 0.9312008953589136, + "learning_rate": 1.9354845529282042e-05, + "loss": 0.9838, + "step": 400 + }, + { + "epoch": 0.15927794000530926, + "grad_norm": 0.9921814925845447, + "learning_rate": 1.9137008411438213e-05, + "loss": 0.9743, + "step": 450 + }, + { + "epoch": 0.17697548889478806, + "grad_norm": 0.9946357977754318, + "learning_rate": 1.8889150393715627e-05, + "loss": 0.9674, + "step": 500 + }, + { + "epoch": 0.19467303778426687, + "grad_norm": 0.936001151695521, + "learning_rate": 1.8612085847777215e-05, + "loss": 0.9679, + "step": 550 + }, + { + "epoch": 0.2123705866737457, + "grad_norm": 0.9739097270294552, + "learning_rate": 1.8306725107357933e-05, + "loss": 0.9622, + "step": 600 + }, + { + "epoch": 0.2300681355632245, + "grad_norm": 1.0799028416482208, + "learning_rate": 1.7974071477237887e-05, + "loss": 0.976, + "step": 650 + }, + { + "epoch": 0.2477656844527033, + "grad_norm": 0.9330950566244366, + "learning_rate": 1.7615217936746246e-05, + "loss": 0.9567, + "step": 700 + }, + { + "epoch": 0.2654632333421821, + "grad_norm": 0.8832817518420761, + "learning_rate": 1.7231343548627085e-05, + "loss": 0.9523, + "step": 750 + }, + { + "epoch": 0.2831607822316609, + "grad_norm": 0.9002124999100791, + "learning_rate": 1.6823709585066308e-05, + "loss": 0.9466, + "step": 800 + }, + { + "epoch": 0.30085833112113974, + "grad_norm": 0.9157647153217237, + "learning_rate": 1.6393655383608132e-05, + "loss": 0.9429, + "step": 850 + }, + { + "epoch": 0.3185558800106185, + "grad_norm": 5.464496113406752, + "learning_rate": 1.594259394657707e-05, + "loss": 0.9695, + "step": 900 + }, + { + "epoch": 0.33625342890009735, + "grad_norm": 0.9883051330334294, + "learning_rate": 1.5472007298464117e-05, + "loss": 0.944, + "step": 950 + }, + { + "epoch": 0.3539509777895761, + "grad_norm": 0.8832131211036971, + "learning_rate": 1.4983441616531152e-05, + "loss": 0.937, + "step": 1000 + }, + { + "epoch": 0.37164852667905496, + "grad_norm": 0.9595043070379917, + "learning_rate": 1.4478502150632503e-05, + "loss": 0.9313, + "step": 1050 + }, + { + "epoch": 0.38934607556853373, + "grad_norm": 1.035782879874017, + "learning_rate": 1.3958847948945428e-05, + "loss": 0.9264, + "step": 1100 + }, + { + "epoch": 0.40704362445801257, + "grad_norm": 0.961077579119761, + "learning_rate": 1.3426186406938769e-05, + "loss": 0.924, + "step": 1150 + }, + { + "epoch": 0.4247411733474914, + "grad_norm": 0.882487785914029, + "learning_rate": 1.2882267657489908e-05, + "loss": 0.9226, + "step": 1200 + }, + { + "epoch": 0.44243872223697017, + "grad_norm": 0.8676230908883836, + "learning_rate": 1.2328878820582122e-05, + "loss": 0.9134, + "step": 1250 + }, + { + "epoch": 0.460136271126449, + "grad_norm": 0.8663581800411811, + "learning_rate": 1.1767838131475654e-05, + "loss": 0.917, + "step": 1300 + }, + { + "epoch": 0.4778338200159278, + "grad_norm": 1.1913367079153512, + "learning_rate": 1.1200988966645286e-05, + "loss": 0.9181, + "step": 1350 + }, + { + "epoch": 0.4955313689054066, + "grad_norm": 0.8563122357771151, + "learning_rate": 1.0630193787112994e-05, + "loss": 0.903, + "step": 1400 + }, + { + "epoch": 0.5132289177948854, + "grad_norm": 0.8518715044091224, + "learning_rate": 1.005732801907567e-05, + "loss": 0.9085, + "step": 1450 + }, + { + "epoch": 0.5309264666843642, + "grad_norm": 0.9224776221871871, + "learning_rate": 9.484273891933982e-06, + "loss": 0.9086, + "step": 1500 + }, + { + "epoch": 0.548624015573843, + "grad_norm": 0.9846231662432429, + "learning_rate": 8.912914253968391e-06, + "loss": 0.9017, + "step": 1550 + }, + { + "epoch": 0.5663215644633218, + "grad_norm": 0.8883898612449065, + "learning_rate": 8.345126385981737e-06, + "loss": 0.896, + "step": 1600 + }, + { + "epoch": 0.5840191133528007, + "grad_norm": 0.9045281001313086, + "learning_rate": 7.782775833234522e-06, + "loss": 0.9033, + "step": 1650 + }, + { + "epoch": 0.6017166622422795, + "grad_norm": 0.8962005631393778, + "learning_rate": 7.227710275938987e-06, + "loss": 0.8979, + "step": 1700 + }, + { + "epoch": 0.6194142111317582, + "grad_norm": 0.8638732399362996, + "learning_rate": 6.68175345845119e-06, + "loss": 0.8889, + "step": 1750 + }, + { + "epoch": 0.637111760021237, + "grad_norm": 1.0900143725646285, + "learning_rate": 6.146699197107715e-06, + "loss": 0.8957, + "step": 1800 + }, + { + "epoch": 0.6548093089107159, + "grad_norm": 0.9103789814206331, + "learning_rate": 5.6243054863949675e-06, + "loss": 0.8858, + "step": 1850 + }, + { + "epoch": 0.6725068578001947, + "grad_norm": 0.9300189349082482, + "learning_rate": 5.116288722816087e-06, + "loss": 0.8822, + "step": 1900 + }, + { + "epoch": 0.6902044066896735, + "grad_norm": 0.8743852772084869, + "learning_rate": 4.6243180654337975e-06, + "loss": 0.9172, + "step": 1950 + }, + { + "epoch": 0.7079019555791523, + "grad_norm": 0.9546014887497036, + "learning_rate": 4.1500099516183555e-06, + "loss": 1.1497, + "step": 2000 + }, + { + "epoch": 0.7255995044686311, + "grad_norm": 0.9614448138630033, + "learning_rate": 3.6949227860198712e-06, + "loss": 1.0692, + "step": 2050 + }, + { + "epoch": 0.7432970533581099, + "grad_norm": 0.848345849582495, + "learning_rate": 3.2605518202151577e-06, + "loss": 1.0286, + "step": 2100 + }, + { + "epoch": 0.7609946022475887, + "grad_norm": 1.0998684999586237, + "learning_rate": 2.8483242398526723e-06, + "loss": 0.8803, + "step": 2150 + }, + { + "epoch": 0.7786921511370675, + "grad_norm": 0.8279471296348094, + "learning_rate": 2.4595944754374723e-06, + "loss": 0.8885, + "step": 2200 + }, + { + "epoch": 0.7963897000265463, + "grad_norm": 0.8602056476985009, + "learning_rate": 2.0956397521631666e-06, + "loss": 0.8704, + "step": 2250 + }, + { + "epoch": 0.8140872489160251, + "grad_norm": 0.9945305020433657, + "learning_rate": 1.757655893412622e-06, + "loss": 0.8677, + "step": 2300 + }, + { + "epoch": 0.831784797805504, + "grad_norm": 0.850430777727199, + "learning_rate": 1.4467533917154842e-06, + "loss": 0.877, + "step": 2350 + }, + { + "epoch": 0.8494823466949828, + "grad_norm": 0.896244091668865, + "learning_rate": 1.1639537600719764e-06, + "loss": 0.8752, + "step": 2400 + }, + { + "epoch": 0.8671798955844615, + "grad_norm": 0.9590853387394762, + "learning_rate": 9.101861756312369e-07, + "loss": 0.8737, + "step": 2450 + }, + { + "epoch": 0.8848774444739403, + "grad_norm": 0.805579311652691, + "learning_rate": 6.862844267517643e-07, + "loss": 0.8792, + "step": 2500 + }, + { + "epoch": 0.9025749933634192, + "grad_norm": 0.8767296521208048, + "learning_rate": 4.929841734749063e-07, + "loss": 0.875, + "step": 2550 + }, + { + "epoch": 0.920272542252898, + "grad_norm": 0.9004753221903812, + "learning_rate": 3.309205304124552e-07, + "loss": 0.8703, + "step": 2600 + }, + { + "epoch": 0.9379700911423767, + "grad_norm": 0.8622876486385493, + "learning_rate": 2.0062597999009114e-07, + "loss": 0.8815, + "step": 2650 + }, + { + "epoch": 0.9556676400318556, + "grad_norm": 0.896417353499626, + "learning_rate": 1.0252862290301092e-07, + "loss": 0.8715, + "step": 2700 + }, + { + "epoch": 0.9733651889213344, + "grad_norm": 0.9142726428404561, + "learning_rate": 3.6950771532126004e-08, + "loss": 0.8657, + "step": 2750 + }, + { + "epoch": 0.9910627378108132, + "grad_norm": 0.8664983124317569, + "learning_rate": 4.1078909423253325e-09, + "loss": 0.8791, + "step": 2800 + }, + { + "epoch": 0.9999115122555526, + "step": 2825, + "total_flos": 4.909801179132723e+16, + "train_loss": 0.9400995777771536, + "train_runtime": 88050.3139, + "train_samples_per_second": 0.513, + "train_steps_per_second": 0.032 + } + ], + "logging_steps": 50, + "max_steps": 2825, + "num_input_tokens_seen": 0, + "num_train_epochs": 1, + "save_steps": 500, + "total_flos": 4.909801179132723e+16, + "train_batch_size": 4, + "trial_name": null, + "trial_params": null +} diff --git a/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/training_args.bin b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/training_args.bin new file mode 100755 index 0000000000000000000000000000000000000000..43ec60d3e17b771ee321ba264e5919138276130b --- /dev/null +++ b/llavav16bta_cp3d1688_longb_qafull_llhound_fullimg_multiimg_trainllm_e1_bz512_32f1688/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c61e1b93e453464c82a151998343e20e07c872d423dcc4a4a2e46bd6e53948b +size 7096