jaronfei
commited on
Commit
·
c61bdc1
1
Parent(s):
9163239
first commit
Browse files- README.md +86 -0
- config.json +19 -0
- configuration_videoccam.py +38 -0
- llm_adapter/README.md +202 -0
- llm_adapter/adapter_config.json +31 -0
- llm_adapter/adapter_model.safetensors +3 -0
- modeling_videoccam.py +215 -0
- projector/config.json +26 -0
- projector/configuration_ccam.py +51 -0
- projector/model.safetensors +3 -0
- projector/modeling_ccam.py +196 -0
- vision_encoder_adapter/README.md +202 -0
- vision_encoder_adapter/adapter_config.json +36 -0
- vision_encoder_adapter/adapter_model.safetensors +3 -0
README.md
CHANGED
@@ -1,3 +1,89 @@
|
|
1 |
---
|
2 |
license: mit
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
---
|
4 |
+
|
5 |
+
## Model Summary
|
6 |
+
|
7 |
+
Video-CCAM-14B-v1.1 is a lightweight Video-MLLM developed by TencentQQ Multimedia Research Team.
|
8 |
+
|
9 |
+
## Usage
|
10 |
+
|
11 |
+
Inference using Huggingface transformers on NVIDIA GPUs. Requirements tested on python 3.9/3.10.
|
12 |
+
```
|
13 |
+
pip install -U pip torch transformers peft decord pysubs2 imageio
|
14 |
+
```
|
15 |
+
|
16 |
+
## Inference
|
17 |
+
|
18 |
+
```
|
19 |
+
import os
|
20 |
+
import torch
|
21 |
+
from PIL import Image
|
22 |
+
from transformers import AutoModel
|
23 |
+
|
24 |
+
from eval import load_decord
|
25 |
+
|
26 |
+
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
|
27 |
+
|
28 |
+
videoccam = AutoModel.from_pretrained(
|
29 |
+
'<your_local_path_1>',
|
30 |
+
trust_remote_code=True,
|
31 |
+
torch_dtype=torch.bfloat16,
|
32 |
+
device_map='auto',
|
33 |
+
_attn_implementation='flash_attention_2',
|
34 |
+
# llm_name_or_path='<your_local_llm_path>',
|
35 |
+
# vision_encoder_name_or_path='<your_local_vision_encoder_path>'
|
36 |
+
)
|
37 |
+
|
38 |
+
|
39 |
+
messages = [
|
40 |
+
[
|
41 |
+
{
|
42 |
+
'role': 'user',
|
43 |
+
'content': '<image>\nDescribe this image in detail.'
|
44 |
+
}
|
45 |
+
], [
|
46 |
+
{
|
47 |
+
'role': 'user',
|
48 |
+
'content': '<video>\nDescribe this video in detail.'
|
49 |
+
}
|
50 |
+
]
|
51 |
+
]
|
52 |
+
|
53 |
+
images = [
|
54 |
+
Image.open('assets/example_image.jpg').convert('RGB'),
|
55 |
+
load_decord('assets/example_video.mp4', sample_type='uniform', num_frames=32)
|
56 |
+
]
|
57 |
+
|
58 |
+
response = videoccam.chat(messages, images, max_new_tokens=512, do_sample=False)
|
59 |
+
|
60 |
+
print(response)
|
61 |
+
```
|
62 |
+
|
63 |
+
Please refer to [Video-CCAM](https://github.com/QQ-MM/Video-CCAM) for more details.
|
64 |
+
|
65 |
+
### Benchmarks
|
66 |
+
|
67 |
+
|Benchmark|Video-CCAM-14B|Video-CCAM-14B-v1.1|
|
68 |
+
|:-:|:-:|:-:|
|
69 |
+
|MVBench (32 frames)|61.88|63.08|
|
70 |
+
|MSVD-QA (32 frames)|76.3/4.1|78.6/4.2|
|
71 |
+
|MSRVTT-QA (32 frames)|59.0/3.5|66.3/3.8|
|
72 |
+
|ActivityNet-QA (32 frames)|58.3/3.7|60.4/3.8|
|
73 |
+
|TGIF-QA (32 frames)|84.1/4.5|84.4/4.5|
|
74 |
+
|Video-MME (w/o sub, 96 frames)|53.2|53.9|
|
75 |
+
|Video-MME (w sub, 96 frames)|57.2|56.1|
|
76 |
+
|MLVU (M-Avg, 96 frames)|60.2|63.1|
|
77 |
+
|MLVU (G-Avg, 96 frames)|4.11|4.01|
|
78 |
+
|VideoVista (96 frames)|68.43|76.55|
|
79 |
+
|
80 |
+
* The accuracies and scores of MSVD-QA,MSRVTT-QA,ActivityNet-QA,TGIF-QA are evaluated by `gpt-3.5-turbo-0125`.
|
81 |
+
|
82 |
+
## Acknowledgement
|
83 |
+
|
84 |
+
* [xtuner](https://github.com/InternLM/xtuner): Video-CCAM-14B is trained using the xtuner framework. Thanks for their excellent works!
|
85 |
+
* [Phi-3-medium-4k-instruct](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct): Powerful language models developed by Microsoft.
|
86 |
+
* [SigLIP SO400M](https://huggingface.co/google/siglip-so400m-patch14-384): Outstanding vision encoder developed by Google.
|
87 |
+
|
88 |
+
## License
|
89 |
+
The model is licensed under the MIT license.
|
config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "",
|
3 |
+
"architectures": [
|
4 |
+
"VideoCCAM"
|
5 |
+
],
|
6 |
+
"llm_name_or_path": "microsoft/Phi-3-medium-4k-instruct",
|
7 |
+
"vision_encoder_name_or_path": "google/siglip-so400m-patch14-384",
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_videoccam.VideoCCAMConfig",
|
10 |
+
"AutoModel": "modeling_videoccam.VideoCCAM"
|
11 |
+
},
|
12 |
+
"image_token": "<image>",
|
13 |
+
"video_token": "<video>",
|
14 |
+
"vision_select_layer": -2,
|
15 |
+
"vision_max_chunk_size": 0,
|
16 |
+
"_attn_implementation": "flash_attention_2",
|
17 |
+
"torch_dtype": "bfloat16",
|
18 |
+
"transformers_version": "4.44.0"
|
19 |
+
}
|
configuration_videoccam.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
================================================
|
5 |
+
@author: Jaron
|
6 |
+
@time: 2024/08/21 17:51:45
|
7 |
+
@email: [email protected]
|
8 |
+
@description:
|
9 |
+
================================================
|
10 |
+
"""
|
11 |
+
from transformers import PretrainedConfig
|
12 |
+
|
13 |
+
|
14 |
+
class VideoCCAMConfig(PretrainedConfig):
|
15 |
+
model_type = 'videoccam'
|
16 |
+
_auto_class = 'AutoConfig'
|
17 |
+
|
18 |
+
def __init__(
|
19 |
+
self,
|
20 |
+
llm_name_or_path: str = None,
|
21 |
+
projector_name_or_path: str = None,
|
22 |
+
vision_encoder_name_or_path: str = None,
|
23 |
+
image_token: str = '<image>',
|
24 |
+
video_token: str = '<video>',
|
25 |
+
vision_select_layer: int = -2,
|
26 |
+
vision_max_chunk_size: int = 0,
|
27 |
+
_attn_implementation: str = 'flash_attention_2',
|
28 |
+
**kwargs
|
29 |
+
):
|
30 |
+
super().__init__(**kwargs)
|
31 |
+
self.llm_name_or_path = llm_name_or_path
|
32 |
+
self.projector_name_or_path = projector_name_or_path
|
33 |
+
self.vision_encoder_name_or_path = vision_encoder_name_or_path
|
34 |
+
self.image_token = image_token
|
35 |
+
self.video_token = video_token
|
36 |
+
self.vision_select_layer = vision_select_layer
|
37 |
+
self.vision_max_chunk_size = vision_max_chunk_size
|
38 |
+
self._attn_implementation = _attn_implementation
|
llm_adapter/README.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: microsoft/Phi-3-medium-4k-instruct
|
3 |
+
library_name: peft
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
200 |
+
### Framework versions
|
201 |
+
|
202 |
+
- PEFT 0.12.0
|
llm_adapter/adapter_config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "microsoft/Phi-3-medium-4k-instruct",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 256,
|
14 |
+
"lora_dropout": 0.05,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 512,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"o_proj",
|
24 |
+
"down_proj",
|
25 |
+
"gate_up_proj",
|
26 |
+
"qkv_proj"
|
27 |
+
],
|
28 |
+
"task_type": "CAUSAL_LM",
|
29 |
+
"use_dora": false,
|
30 |
+
"use_rslora": false
|
31 |
+
}
|
llm_adapter/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2315ef91ae836c70d8a6bfc3b47cdf40cce109d40b880b38cf978061ded96be1
|
3 |
+
size 3565203280
|
modeling_videoccam.py
ADDED
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
================================================
|
5 |
+
@author: Jaron
|
6 |
+
@time: 2024/08/21 17:41:52
|
7 |
+
@email: [email protected]
|
8 |
+
@description: Video-CCAM
|
9 |
+
================================================
|
10 |
+
"""
|
11 |
+
import torch
|
12 |
+
import os.path as osp
|
13 |
+
|
14 |
+
from PIL import Image
|
15 |
+
from peft import PeftModel
|
16 |
+
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, PreTrainedModel, SiglipVisionModel, SiglipImageProcessor, GenerationConfig
|
17 |
+
|
18 |
+
|
19 |
+
from .configuration_videoccam import VideoCCAMConfig
|
20 |
+
|
21 |
+
|
22 |
+
class VideoCCAM(PreTrainedModel):
|
23 |
+
config_class = VideoCCAMConfig
|
24 |
+
_auto_class = 'AutoModel'
|
25 |
+
supports_gradient_checkpointing = True
|
26 |
+
_supports_flash_attn_2 = True
|
27 |
+
_supports_sdpa = True
|
28 |
+
|
29 |
+
def __init__(self, config, device_map: str = 'auto'):
|
30 |
+
super().__init__(config)
|
31 |
+
self.image_token = config.image_token
|
32 |
+
self.video_token = config.video_token
|
33 |
+
self.vision_select_layer = config.vision_select_layer
|
34 |
+
self.vision_max_chunk_size = config.vision_max_chunk_size
|
35 |
+
self.gradient_checkpointing = False
|
36 |
+
|
37 |
+
self.projector = AutoModel.from_pretrained(
|
38 |
+
config.projector_name_or_path,
|
39 |
+
device_map=device_map,
|
40 |
+
trust_remote_code=True,
|
41 |
+
torch_dtype=config.torch_dtype,
|
42 |
+
attn_implementation='sdpa' if config._attn_implementation == 'flash_attention_2' else config._attn_implementation # CCAM does not support flash_attention_2
|
43 |
+
)
|
44 |
+
self.llm = AutoModelForCausalLM.from_pretrained(
|
45 |
+
config.llm_name_or_path,
|
46 |
+
device_map=device_map,
|
47 |
+
torch_dtype=config.torch_dtype,
|
48 |
+
attn_implementation=config._attn_implementation
|
49 |
+
)
|
50 |
+
self.tokenizer = AutoTokenizer.from_pretrained(
|
51 |
+
config.llm_name_or_path,
|
52 |
+
additional_special_tokens=[self.image_token, self.video_token]
|
53 |
+
)
|
54 |
+
self.generation_config = GenerationConfig.from_pretrained(config.llm_name_or_path)
|
55 |
+
self.image_token_id, self.video_token_id = self.tokenizer.convert_tokens_to_ids([self.image_token, self.video_token])
|
56 |
+
self.vision_encoder = SiglipVisionModel.from_pretrained(
|
57 |
+
config.vision_encoder_name_or_path,
|
58 |
+
device_map=device_map,
|
59 |
+
torch_dtype=config.torch_dtype,
|
60 |
+
attn_implementation=config._attn_implementation
|
61 |
+
)
|
62 |
+
self.image_processor = SiglipImageProcessor.from_pretrained(
|
63 |
+
config.vision_encoder_name_or_path
|
64 |
+
)
|
65 |
+
|
66 |
+
def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None):
|
67 |
+
if gradient_checkpointing_kwargs is None:
|
68 |
+
gradient_checkpointing_kwargs = dict(use_reentrant=False)
|
69 |
+
self.llm.gradient_checkpointing_enable(gradient_checkpointing_kwargs)
|
70 |
+
self.vision_encoder.gradient_checkpointing_enable(gradient_checkpointing_kwargs)
|
71 |
+
|
72 |
+
def forward_visual_embeds(self, pixel_values: torch.Tensor) -> torch.Tensor:
|
73 |
+
if self.vision_select_layer in {-1, self.vision_encoder.config.num_hidden_layers}:
|
74 |
+
visual_embeds = self.vision_encoder(pixel_values, output_hidden_states=False).last_hidden_state
|
75 |
+
else:
|
76 |
+
visual_embeds = self.vision_encoder(pixel_values, output_hidden_states=True).hidden_states[self.vision_select_layer]
|
77 |
+
return visual_embeds
|
78 |
+
|
79 |
+
@torch.inference_mode
|
80 |
+
def chat(
|
81 |
+
self,
|
82 |
+
messages: list[list[dict]],
|
83 |
+
images: list[Image.Image, list[Image.Image]] = None,
|
84 |
+
generation_config = None,
|
85 |
+
batch_generate: bool = False,
|
86 |
+
visual_embeds: torch.Tensor = None,
|
87 |
+
return_visual_embeds: bool = False,
|
88 |
+
**kwargs
|
89 |
+
):
|
90 |
+
if generation_config is None:
|
91 |
+
generation_config = self.generation_config
|
92 |
+
|
93 |
+
# compute visual embeds
|
94 |
+
if visual_embeds is None:
|
95 |
+
_images, split_size = [], []
|
96 |
+
for i in images:
|
97 |
+
if isinstance(i, Image.Image):
|
98 |
+
_images.append(i)
|
99 |
+
split_size.append(1)
|
100 |
+
else:
|
101 |
+
_images += i
|
102 |
+
split_size.append(len(i))
|
103 |
+
pixel_values = self.image_processor(
|
104 |
+
_images,
|
105 |
+
return_tensors='pt'
|
106 |
+
)['pixel_values'].to(
|
107 |
+
dtype=self.vision_encoder.get_input_embeddings().weight.dtype,
|
108 |
+
device=self.vision_encoder.get_input_embeddings().weight.device
|
109 |
+
)
|
110 |
+
if 0 < self.vision_max_chunk_size < len(pixel_values):
|
111 |
+
split_idx = list(range(0, len(pixel_values), self.vision_max_chunk_size)) + [-1]
|
112 |
+
visual_embeds = torch.cat([
|
113 |
+
self.forward_visual_embeds(pixel_values[le:ri])
|
114 |
+
for le, ri in zip(split_idx[:-1], split_idx[1:])
|
115 |
+
], dim=0)
|
116 |
+
else:
|
117 |
+
visual_embeds = self.forward_visual_embeds(pixel_values)
|
118 |
+
visual_embeds = self.projector(visual_embeds.split(split_size, dim=0))
|
119 |
+
|
120 |
+
# compute textual embeds
|
121 |
+
device = self.llm.get_input_embeddings().weight.device
|
122 |
+
input_ids = self.tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True) # list[list[int]]
|
123 |
+
_input_ids, split_idx = [], [0]
|
124 |
+
for i in input_ids:
|
125 |
+
_input_ids += i
|
126 |
+
split_idx.append(split_idx[-1] + len(i))
|
127 |
+
_input_ids = torch.tensor(_input_ids, dtype=torch.long, device=device)
|
128 |
+
visual_idx = torch.where((_input_ids == self.image_token_id) | (_input_ids == self.video_token_id))[0].tolist()
|
129 |
+
assert len(visual_idx) == len(visual_embeds), f'The number of visual tokens ({len(visual_idx)}) should be equal to the number of visual features ({len(visual_embeds)}).'
|
130 |
+
|
131 |
+
_input_ids[visual_idx] = 0 # avoid index overflow
|
132 |
+
_inputs_embeds = self.llm.get_input_embeddings()(_input_ids)
|
133 |
+
inputs_embeds, cur_visual_pointer = [], 0
|
134 |
+
for start_idx, end_idx in zip(split_idx[:-1], split_idx[1:]):
|
135 |
+
if cur_visual_pointer < len(visual_idx) and visual_idx[cur_visual_pointer] < end_idx:
|
136 |
+
mid_idx = visual_idx[cur_visual_pointer]
|
137 |
+
embeds = [_inputs_embeds[start_idx:mid_idx], visual_embeds[cur_visual_pointer]]
|
138 |
+
cur_visual_pointer += 1
|
139 |
+
while cur_visual_pointer < len(visual_idx) and visual_idx[cur_visual_pointer] < end_idx:
|
140 |
+
embeds += [_inputs_embeds[mid_idx+1:visual_idx[cur_visual_pointer]], visual_embeds[cur_visual_pointer]]
|
141 |
+
mid_idx = visual_idx[cur_visual_pointer]
|
142 |
+
cur_visual_pointer += 1
|
143 |
+
embeds.append(_inputs_embeds[mid_idx+1:end_idx])
|
144 |
+
inputs_embeds.append(torch.cat(embeds, dim=0))
|
145 |
+
# Pure Text
|
146 |
+
else:
|
147 |
+
inputs_embeds.append(_inputs_embeds[start_idx:end_idx])
|
148 |
+
|
149 |
+
if batch_generate:
|
150 |
+
B, L = len(inputs_embeds), max(i.size(0) for i in inputs_embeds)
|
151 |
+
pad_embeds = self.llm.get_input_embeddings()(
|
152 |
+
torch.tensor([self.tokenizer.pad_token_id], dtype=torch.long, device=device)
|
153 |
+
) # (1, C)
|
154 |
+
inputs_embeds_list = []
|
155 |
+
attention_mask = torch.zeros(B, L, dtype=torch.long, device=device)
|
156 |
+
for i, embeds in enumerate(inputs_embeds):
|
157 |
+
l = embeds.size(0)
|
158 |
+
inputs_embeds_list += [pad_embeds.expand(L - l, -1), embeds]
|
159 |
+
attention_mask[i, -l:] = 1
|
160 |
+
inputs_embeds = torch.cat(inputs_embeds_list, dim=0).view(B, L, -1)
|
161 |
+
output_ids = self.llm.generate(
|
162 |
+
inputs_embeds=inputs_embeds,
|
163 |
+
attention_mask=attention_mask,
|
164 |
+
generation_config=generation_config,
|
165 |
+
**kwargs
|
166 |
+
)
|
167 |
+
else:
|
168 |
+
output_ids = []
|
169 |
+
for embeds in inputs_embeds:
|
170 |
+
output_ids.append(self.llm.generate(
|
171 |
+
inputs_embeds=embeds[None],
|
172 |
+
attention_mask=torch.ones(1, embeds.size(0), dtype=torch.long, device=device),
|
173 |
+
generation_config=generation_config,
|
174 |
+
**kwargs
|
175 |
+
)[0])
|
176 |
+
prediction = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)
|
177 |
+
|
178 |
+
if return_visual_embeds:
|
179 |
+
return prediction, visual_embeds
|
180 |
+
else:
|
181 |
+
return prediction
|
182 |
+
|
183 |
+
@classmethod
|
184 |
+
def from_pretrained(
|
185 |
+
cls,
|
186 |
+
pretrained_model_name_or_path: str,
|
187 |
+
*args,
|
188 |
+
config: VideoCCAMConfig = None,
|
189 |
+
torch_dtype: torch.dtype = torch.bfloat16,
|
190 |
+
device_map: str = 'auto',
|
191 |
+
**kwargs
|
192 |
+
) -> PreTrainedModel:
|
193 |
+
merge_pretrained_lora = kwargs.pop('merge_pretrained_lora', True)
|
194 |
+
|
195 |
+
config.torch_dtype = torch_dtype
|
196 |
+
config.projector_name_or_path = osp.join(pretrained_model_name_or_path, 'projector')
|
197 |
+
if osp.isdir(cur_path := osp.join(pretrained_model_name_or_path, 'llm')):
|
198 |
+
config.llm_name_or_path = cur_path
|
199 |
+
if osp.isdir(cur_path := osp.join(pretrained_model_name_or_path, 'vision_encoder')):
|
200 |
+
config.vision_encoder_name_or_path = cur_path
|
201 |
+
model = cls(config, device_map)
|
202 |
+
|
203 |
+
# load LoRA if exists
|
204 |
+
if osp.exists(cur_path := osp.join(pretrained_model_name_or_path, 'llm_adapter')):
|
205 |
+
model.llm = PeftModel.from_pretrained(model.llm, cur_path, device_map=device_map)
|
206 |
+
print(f'Load LLM adapter from {cur_path}.')
|
207 |
+
if merge_pretrained_lora:
|
208 |
+
model.llm = model.llm.merge_and_unload()
|
209 |
+
if osp.exists(cur_path := osp.join(pretrained_model_name_or_path, 'vision_encoder_adapter')):
|
210 |
+
model.vision_encoder = PeftModel.from_pretrained(model.vision_encoder, cur_path, device_map=device_map)
|
211 |
+
print(f'Load vision encoder adapter from {cur_path}.')
|
212 |
+
if merge_pretrained_lora:
|
213 |
+
model.vision_encoder = model.vision_encoder.merge_and_unload()
|
214 |
+
|
215 |
+
return model
|
projector/config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "",
|
3 |
+
"architectures": [
|
4 |
+
"CCAMModel"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_ccam.CCAMConfig",
|
10 |
+
"AutoModel": "modeling_ccam.CCAMModel"
|
11 |
+
},
|
12 |
+
"cross_hidden_size": 1152,
|
13 |
+
"dropout": 0.1,
|
14 |
+
"hidden_act": "swiglu",
|
15 |
+
"hidden_size": 1024,
|
16 |
+
"intermediate_size": 4096,
|
17 |
+
"layer_norm_eps": 1e-05,
|
18 |
+
"mlp_bias": true,
|
19 |
+
"model_type": "ccam",
|
20 |
+
"num_heads": 16,
|
21 |
+
"num_key_value_heads": 16,
|
22 |
+
"num_query": 1024,
|
23 |
+
"output_size": 5120,
|
24 |
+
"torch_dtype": "bfloat16",
|
25 |
+
"transformers_version": "4.44.0"
|
26 |
+
}
|
projector/configuration_ccam.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
================================================
|
5 |
+
@author: Jaron
|
6 |
+
@time: 2024/07/10 19:43:31
|
7 |
+
@email: [email protected]
|
8 |
+
@description: Causal Cross-Attention Mask (CCAM)
|
9 |
+
================================================
|
10 |
+
"""
|
11 |
+
|
12 |
+
from transformers import PretrainedConfig
|
13 |
+
|
14 |
+
|
15 |
+
class CCAMConfig(PretrainedConfig):
|
16 |
+
model_type = 'ccam'
|
17 |
+
_auto_class = 'AutoConfig'
|
18 |
+
|
19 |
+
def __init__(
|
20 |
+
self,
|
21 |
+
num_query: int = 1024,
|
22 |
+
num_heads: int = 16,
|
23 |
+
hidden_size: int = 1024,
|
24 |
+
intermediate_size: int = 4096,
|
25 |
+
num_key_value_heads: int = 16,
|
26 |
+
dropout: float = 0.1,
|
27 |
+
mlp_bias: bool = True,
|
28 |
+
hidden_act: str = 'swiglu',
|
29 |
+
output_size: int = None, # inferred from llm
|
30 |
+
attention_bias: bool = True,
|
31 |
+
layer_norm_eps: float = 1e-5,
|
32 |
+
cross_hidden_size: int = None, # inferred from vision encoder
|
33 |
+
attention_dropout: float = 0.1,
|
34 |
+
_attn_implementation: str = 'sdpa',
|
35 |
+
**kwargs
|
36 |
+
):
|
37 |
+
super().__init__(**kwargs)
|
38 |
+
self.dropout = dropout
|
39 |
+
self.mlp_bias = mlp_bias
|
40 |
+
self.num_query = num_query
|
41 |
+
self.num_heads = num_heads
|
42 |
+
self.hidden_act = hidden_act
|
43 |
+
self.hidden_size = hidden_size
|
44 |
+
self.output_size = output_size
|
45 |
+
self.layer_norm_eps = layer_norm_eps
|
46 |
+
self.attention_bias = attention_bias
|
47 |
+
self.intermediate_size = intermediate_size
|
48 |
+
self.cross_hidden_size = cross_hidden_size
|
49 |
+
self.attention_dropout = attention_dropout
|
50 |
+
self.num_key_value_heads = num_key_value_heads
|
51 |
+
self._attn_implementation = _attn_implementation
|
projector/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3cfa3239281d5ddea3c0709716d548f21c8640fcfe063ee35339e8f393d8a3b4
|
3 |
+
size 69774840
|
projector/modeling_ccam.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
================================================
|
5 |
+
@author: Jaron
|
6 |
+
@time: 2024/07/10 19:47:01
|
7 |
+
@email: [email protected]
|
8 |
+
@description: Causal Cross-Attention Mask (CCAM)
|
9 |
+
================================================
|
10 |
+
"""
|
11 |
+
|
12 |
+
import torch
|
13 |
+
import torch.nn as nn
|
14 |
+
import torch.nn.functional as F
|
15 |
+
|
16 |
+
from transformers import PreTrainedModel
|
17 |
+
from transformers.activations import ACT2FN
|
18 |
+
|
19 |
+
from .configuration_ccam import CCAMConfig
|
20 |
+
|
21 |
+
|
22 |
+
class CCAMMLP(nn.Module):
|
23 |
+
|
24 |
+
def __init__(self, config):
|
25 |
+
super().__init__()
|
26 |
+
self.hidden_act = config.hidden_act
|
27 |
+
self.hidden_size = config.hidden_size
|
28 |
+
self.intermediate_size = config.intermediate_size
|
29 |
+
self.output_size = config.output_size
|
30 |
+
if self.hidden_act == 'swiglu':
|
31 |
+
self.fc1 = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=config.mlp_bias)
|
32 |
+
self.act_fn = ACT2FN['silu']
|
33 |
+
else:
|
34 |
+
self.fc1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
|
35 |
+
self.act_fn = ACT2FN[self.hidden_act]
|
36 |
+
self.fc2 = nn.Linear(self.intermediate_size, self.output_size, bias=config.mlp_bias)
|
37 |
+
|
38 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
39 |
+
hidden_states = self.fc1(hidden_states)
|
40 |
+
if self.hidden_act == 'swiglu':
|
41 |
+
gate, up = hidden_states.chunk(2, dim=-1)
|
42 |
+
hidden_states = self.act_fn(gate) * up
|
43 |
+
else:
|
44 |
+
hidden_states = self.act_fn(hidden_states)
|
45 |
+
hidden_states = self.fc2(hidden_states)
|
46 |
+
return hidden_states
|
47 |
+
|
48 |
+
|
49 |
+
class CCAMCrossAttention(nn.Module):
|
50 |
+
"""Cross-attention layer of the CCAM projector.
|
51 |
+
|
52 |
+
Flash Attention 2 is not supported since the mask may be neither full nor causal. Only support `attn_implementation` as `eager` and `sdpa`.
|
53 |
+
"""
|
54 |
+
|
55 |
+
def __init__(self, config):
|
56 |
+
super().__init__()
|
57 |
+
self.num_heads = config.num_heads
|
58 |
+
self.hidden_size = config.hidden_size
|
59 |
+
self.attention_bias = config.attention_bias
|
60 |
+
self.attention_dropout = config.attention_dropout
|
61 |
+
self.cross_hidden_size = config.cross_hidden_size
|
62 |
+
self.num_key_value_heads = config.num_key_value_heads
|
63 |
+
self.attn_implementation = config._attn_implementation
|
64 |
+
self.head_dim = self.hidden_size // self.num_heads
|
65 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
66 |
+
|
67 |
+
assert self.head_dim * self.num_heads == self.hidden_size, f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).'
|
68 |
+
|
69 |
+
self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=self.attention_bias)
|
70 |
+
self.k_proj = nn.Linear(self.cross_hidden_size, self.num_key_value_heads * self.head_dim, bias=self.attention_bias)
|
71 |
+
self.v_proj = nn.Linear(self.cross_hidden_size, self.num_key_value_heads * self.head_dim, bias=self.attention_bias)
|
72 |
+
self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=self.attention_bias)
|
73 |
+
|
74 |
+
def forward(
|
75 |
+
self,
|
76 |
+
hidden_states: torch.Tensor, # (B, Q, C)
|
77 |
+
cross_hidden_states: torch.Tensor, # (B, L, C')
|
78 |
+
attention_mask: torch.Tensor = None # (Q, L), '-inf' means masked, 0 means not masked
|
79 |
+
) -> torch.Tensor: # (B, Q, C)
|
80 |
+
B, Q, C = hidden_states.size()
|
81 |
+
query_states = self.q_proj(hidden_states) # (B, Q, C)
|
82 |
+
key_states = self.k_proj(cross_hidden_states)
|
83 |
+
value_states = self.v_proj(cross_hidden_states)
|
84 |
+
|
85 |
+
L = key_states.size(1)
|
86 |
+
query_states = query_states.view(B, Q, self.num_heads, self.head_dim).transpose(1, 2)
|
87 |
+
key_states = key_states.view(B, L, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
88 |
+
value_states = value_states.view(B, L, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
89 |
+
if self.num_key_value_groups > 1:
|
90 |
+
key_states = key_states.repeat_interleave(repeats=self.num_key_value_groups, dim=1)
|
91 |
+
value_states = value_states.repeat_interleave(repeats=self.num_key_value_groups, dim=1)
|
92 |
+
|
93 |
+
if self.attn_implementation == 'eager':
|
94 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / self.head_dim ** 0.5 # (B, num_heads, Q, L)
|
95 |
+
if attention_mask is not None:
|
96 |
+
attn_weights = attn_weights + attention_mask.view(1, 1, Q, L)
|
97 |
+
# upcast attention to fp32
|
98 |
+
attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
99 |
+
attn_weights = F.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
100 |
+
attn_output = torch.matmul(attn_weights, value_states) # (B, num_heads, Q, head_dim)
|
101 |
+
else: # 'sdpa'
|
102 |
+
# there are bugs in torch <=2.1.0, requiring qkv as contiguous(), be careful
|
103 |
+
attn_output = F.scaled_dot_product_attention(
|
104 |
+
query_states,
|
105 |
+
key_states,
|
106 |
+
value_states,
|
107 |
+
attn_mask=attention_mask,
|
108 |
+
dropout_p=self.attention_dropout if self.training else 0.0
|
109 |
+
)
|
110 |
+
attn_output = attn_output.transpose(1, 2).reshape(B, Q, C) # (B, Q, C)
|
111 |
+
attn_output = self.o_proj(attn_output)
|
112 |
+
|
113 |
+
return attn_output
|
114 |
+
|
115 |
+
|
116 |
+
class CCAMModel(PreTrainedModel):
|
117 |
+
"""Causal Cross-Attention Mask Projector"""
|
118 |
+
config_class = CCAMConfig
|
119 |
+
_auto_class = 'AutoModel'
|
120 |
+
_supports_sdpa = True
|
121 |
+
_no_split_modules = ['CCAMCrossAttention', 'CCAMMLP']
|
122 |
+
|
123 |
+
def __init__(self, config):
|
124 |
+
super().__init__(config)
|
125 |
+
self.num_query = config.num_query
|
126 |
+
self.hidden_size = config.hidden_size
|
127 |
+
self.output_size = config.output_size
|
128 |
+
self.cross_hidden_size = config.cross_hidden_size
|
129 |
+
|
130 |
+
self.query = nn.Parameter(torch.empty(1, self.num_query, self.hidden_size).normal_(mean=.0, std=.02))
|
131 |
+
self.pre_ccam = nn.Sequential(
|
132 |
+
nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps),
|
133 |
+
nn.Dropout(config.dropout)
|
134 |
+
)
|
135 |
+
self.ccam = CCAMCrossAttention(config)
|
136 |
+
self.post_ccam = nn.Sequential(
|
137 |
+
nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps),
|
138 |
+
nn.Dropout(config.dropout),
|
139 |
+
CCAMMLP(config)
|
140 |
+
)
|
141 |
+
|
142 |
+
self.post_init()
|
143 |
+
|
144 |
+
def _init_weights(self, module):
|
145 |
+
"""Initialize the weights"""
|
146 |
+
if isinstance(module, nn.Linear):
|
147 |
+
module.weight.data.normal_(mean=.0, std=.02)
|
148 |
+
if hasattr(module, "bias") and module.bias is not None:
|
149 |
+
module.bias.data.zero_()
|
150 |
+
elif isinstance(module, nn.LayerNorm):
|
151 |
+
module.weight.data.fill_(1.0)
|
152 |
+
module.bias.data.zero_()
|
153 |
+
|
154 |
+
def _get_mask(self, vision_hidden_state: torch.Tensor) -> torch.Tensor: # (Q, T*L)
|
155 |
+
"""Compute CCAM Mask for vision hidden state
|
156 |
+
|
157 |
+
Args:
|
158 |
+
vision_hidden_state (torch.Tensor): (T, L, C)
|
159 |
+
|
160 |
+
Returns:
|
161 |
+
torch.Tensor: (Q, T*L) -inf means masked
|
162 |
+
"""
|
163 |
+
T, L, _ = vision_hidden_state.size()
|
164 |
+
dtype, device = vision_hidden_state.dtype, vision_hidden_state.device
|
165 |
+
base_mask = torch.zeros(T, T, dtype=dtype, device=device)
|
166 |
+
t = torch.arange(T, device=device)
|
167 |
+
base_mask.masked_fill_(t > t[:, None], float('-inf'))
|
168 |
+
attention_mask = torch.zeros(self.num_query, T * L, dtype=dtype, device=device)
|
169 |
+
attention_mask[:self.num_query // T * T] = torch.kron(base_mask, torch.ones(self.num_query // T, L, dtype=dtype, device=device))
|
170 |
+
return attention_mask
|
171 |
+
|
172 |
+
def forward(self, vision_hidden_states: list[torch.Tensor]) -> torch.Tensor: # (B, Q, C)
|
173 |
+
"""Forward function, do not collect batch due to the support of zero3
|
174 |
+
|
175 |
+
Args:
|
176 |
+
vision_hidden_states (list[torch.Tensor]): [(t0, L, C), (t1, L, C), ...]
|
177 |
+
|
178 |
+
Returns:
|
179 |
+
torch.Tensor: (B, Q, C)
|
180 |
+
"""
|
181 |
+
output = []
|
182 |
+
for hidden_states in vision_hidden_states:
|
183 |
+
# reshape inputs and construct ccam masks
|
184 |
+
attention_mask = self._get_mask(hidden_states) # (Q, ti * L)
|
185 |
+
# forward
|
186 |
+
x = self.pre_ccam(self.query) # (1, Q, C)
|
187 |
+
x = self.ccam(
|
188 |
+
hidden_states=x, # (1, Q, C)
|
189 |
+
cross_hidden_states=hidden_states.flatten(0, 1)[None], # (1, ti * L, C')
|
190 |
+
attention_mask=attention_mask[None] # (1, Q, ti * L)
|
191 |
+
) + x
|
192 |
+
x = self.post_ccam(x)
|
193 |
+
output.append(x)
|
194 |
+
output = torch.cat(output, dim=0)
|
195 |
+
|
196 |
+
return output
|
vision_encoder_adapter/README.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: /group/40056/jaronfei/models/siglip-so400m-patch14-384
|
3 |
+
library_name: peft
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
200 |
+
### Framework versions
|
201 |
+
|
202 |
+
- PEFT 0.12.0
|
vision_encoder_adapter/adapter_config.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": {
|
4 |
+
"base_model_class": "SiglipVisionModel",
|
5 |
+
"parent_library": "transformers.models.siglip.modeling_siglip"
|
6 |
+
},
|
7 |
+
"base_model_name_or_path": "/group/40056/jaronfei/models/siglip-so400m-patch14-384",
|
8 |
+
"bias": "none",
|
9 |
+
"fan_in_fan_out": false,
|
10 |
+
"inference_mode": true,
|
11 |
+
"init_lora_weights": true,
|
12 |
+
"layer_replication": null,
|
13 |
+
"layers_pattern": null,
|
14 |
+
"layers_to_transform": null,
|
15 |
+
"loftq_config": {},
|
16 |
+
"lora_alpha": 32,
|
17 |
+
"lora_dropout": 0.05,
|
18 |
+
"megatron_config": null,
|
19 |
+
"megatron_core": "megatron.core",
|
20 |
+
"modules_to_save": null,
|
21 |
+
"peft_type": "LORA",
|
22 |
+
"r": 128,
|
23 |
+
"rank_pattern": {},
|
24 |
+
"revision": null,
|
25 |
+
"target_modules": [
|
26 |
+
"q_proj",
|
27 |
+
"k_proj",
|
28 |
+
"fc1",
|
29 |
+
"v_proj",
|
30 |
+
"out_proj",
|
31 |
+
"fc2"
|
32 |
+
],
|
33 |
+
"task_type": null,
|
34 |
+
"use_dora": false,
|
35 |
+
"use_rslora": false
|
36 |
+
}
|
vision_encoder_adapter/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0dde55c1ecf838bc59c8d5292962b0a972be1a8a6e53d25ef4b5cdd97d41c94e
|
3 |
+
size 142557288
|