Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer , AutoModelForImageTextToText , LlavaNextProcessor ,LlavaNextForConditionalGeneration ,Qwen2VLForConditionalGeneration
|
2 |
from PIL import Image
|
3 |
import requests
|
4 |
import torch
|
@@ -7,8 +7,8 @@ import gradio as gr
|
|
7 |
from gradio import FileData
|
8 |
import time
|
9 |
import spaces
|
10 |
-
ckpt = "
|
11 |
-
model =
|
12 |
torch_dtype=torch.bfloat16, trust_remote_code=True ).to("cuda")
|
13 |
processor = AutoProcessor.from_pretrained(ckpt,trust_remote_code=True)
|
14 |
|
|
|
1 |
+
from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer , AutoModelForImageTextToText , LlavaNextProcessor ,LlavaNextForConditionalGeneration ,Qwen2VLForConditionalGeneration , Qwen2_5_VLForConditionalGeneration
|
2 |
from PIL import Image
|
3 |
import requests
|
4 |
import torch
|
|
|
7 |
from gradio import FileData
|
8 |
import time
|
9 |
import spaces
|
10 |
+
ckpt = "Qwen/Qwen2.5-VL-7B-Instruct"
|
11 |
+
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(ckpt,
|
12 |
torch_dtype=torch.bfloat16, trust_remote_code=True ).to("cuda")
|
13 |
processor = AutoProcessor.from_pretrained(ckpt,trust_remote_code=True)
|
14 |
|