File size: 3,440 Bytes
8588405
 
d3f23e7
8588405
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
{
  "metadata": {
    "total_size": 33664640
  },
  "weight_map": {
    "language_model.model.embed_tokens.weight": "model-00001-of-00002.safetensors",
    "language_model.model.layers.0.input_layernorm.weight": "model-00002-of-00002.safetensors",
    "language_model.model.layers.0.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
    "language_model.model.layers.0.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
    "language_model.model.layers.0.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
    "language_model.model.layers.0.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
    "language_model.model.layers.0.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
    "language_model.model.layers.0.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
    "language_model.model.layers.0.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
    "language_model.model.layers.0.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
    "language_model.model.norm.weight": "model-00002-of-00002.safetensors",
    "multi_modal_projector.linear.bias": "model-00002-of-00002.safetensors",
    "multi_modal_projector.linear.weight": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.embeddings.patch_embedding.bias": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.embeddings.position_embedding.weight": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.post_layernorm.bias": "model-00002-of-00002.safetensors",
    "vision_tower.vision_model.post_layernorm.weight": "model-00002-of-00002.safetensors"
  }
}