Spaces:
Runtime error
Runtime error
Update models/videochat.py
Browse files- models/videochat.py +25 -3
models/videochat.py
CHANGED
@@ -143,19 +143,41 @@ class VideoChat(Blip2Base):
|
|
143 |
for name, param in self.llama_model.named_parameters():
|
144 |
param.requires_grad = False
|
145 |
print('Loading LLAMA Done')
|
146 |
-
|
|
|
|
|
|
|
|
|
147 |
self.llama_proj = nn.Linear(
|
148 |
self.Qformer.config.hidden_size, self.llama_model.config.hidden_size
|
149 |
)
|
150 |
self.max_txt_len = max_txt_len
|
151 |
-
|
|
|
|
|
|
|
|
|
152 |
# load weights of VideoChat
|
153 |
if videochat_model_path:
|
|
|
|
|
|
|
|
|
|
|
154 |
print(f"Load VideoChat from: {videochat_model_path}")
|
155 |
ckpt = torch.load(videochat_model_path, map_location="cpu")
|
|
|
|
|
|
|
|
|
|
|
156 |
msg = self.load_state_dict(ckpt['model'], strict=False)
|
157 |
print(msg)
|
158 |
-
|
|
|
|
|
|
|
|
|
159 |
def vit_to_cpu(self):
|
160 |
self.ln_vision.to("cpu")
|
161 |
self.ln_vision.float()
|
|
|
143 |
for name, param in self.llama_model.named_parameters():
|
144 |
param.requires_grad = False
|
145 |
print('Loading LLAMA Done')
|
146 |
+
print(u'ε½εθΏη¨ηε
εδ½Ώη¨οΌ%.4f GB' % (psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024) )
|
147 |
+
info = psutil.virtual_memory()
|
148 |
+
print( u'η΅θζ»ε
εοΌ%.4f GB' % (info.total / 1024 / 1024 / 1024) )
|
149 |
+
print(u'ε½εδ½Ώη¨ηζ»ε
εε ζ―οΌ',info.percent)
|
150 |
+
print(u'cpuδΈͺζ°οΌ',psutil.cpu_count())
|
151 |
self.llama_proj = nn.Linear(
|
152 |
self.Qformer.config.hidden_size, self.llama_model.config.hidden_size
|
153 |
)
|
154 |
self.max_txt_len = max_txt_len
|
155 |
+
print(u'ε½εθΏη¨ηε
εδ½Ώη¨οΌ%.4f GB' % (psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024) )
|
156 |
+
info = psutil.virtual_memory()
|
157 |
+
print( u'η΅θζ»ε
εοΌ%.4f GB' % (info.total / 1024 / 1024 / 1024) )
|
158 |
+
print(u'ε½εδ½Ώη¨ηζ»ε
εε ζ―οΌ',info.percent)
|
159 |
+
print(u'cpuδΈͺζ°οΌ',psutil.cpu_count())
|
160 |
# load weights of VideoChat
|
161 |
if videochat_model_path:
|
162 |
+
print(u'ε½εθΏη¨ηε
εδ½Ώη¨οΌ%.4f GB' % (psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024) )
|
163 |
+
info = psutil.virtual_memory()
|
164 |
+
print( u'η΅θζ»ε
εοΌ%.4f GB' % (info.total / 1024 / 1024 / 1024) )
|
165 |
+
print(u'ε½εδ½Ώη¨ηζ»ε
εε ζ―οΌ',info.percent)
|
166 |
+
print(u'cpuδΈͺζ°οΌ',psutil.cpu_count())
|
167 |
print(f"Load VideoChat from: {videochat_model_path}")
|
168 |
ckpt = torch.load(videochat_model_path, map_location="cpu")
|
169 |
+
print(u'ckpt load success.ε½εθΏη¨ηε
εδ½Ώη¨οΌ%.4f GB' % (psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024) )
|
170 |
+
info = psutil.virtual_memory()
|
171 |
+
print( u'η΅θζ»ε
εοΌ%.4f GB' % (info.total / 1024 / 1024 / 1024) )
|
172 |
+
print(u'ε½εδ½Ώη¨ηζ»ε
εε ζ―οΌ',info.percent)
|
173 |
+
print(u'cpuδΈͺζ°οΌ',psutil.cpu_count())
|
174 |
msg = self.load_state_dict(ckpt['model'], strict=False)
|
175 |
print(msg)
|
176 |
+
print(u'ε½εθΏη¨ηε
εδ½Ώη¨οΌ%.4f GB' % (psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024) )
|
177 |
+
info = psutil.virtual_memory()
|
178 |
+
print( u'η΅θζ»ε
εοΌ%.4f GB' % (info.total / 1024 / 1024 / 1024) )
|
179 |
+
print(u'ε½εδ½Ώη¨ηζ»ε
εε ζ―οΌ',info.percent)
|
180 |
+
print(u'cpuδΈͺζ°οΌ',psutil.cpu_count())
|
181 |
def vit_to_cpu(self):
|
182 |
self.ln_vision.to("cpu")
|
183 |
self.ln_vision.float()
|