Spaces:
Sleeping
Sleeping
greencatted
commited on
Commit
·
8621c56
1
Parent(s):
ff08c8a
Fixes
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ from transformers import Blip2Processor, Blip2ForConditionalGeneration
|
|
6 |
|
7 |
processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
|
8 |
model = Blip2ForConditionalGeneration.from_pretrained(
|
9 |
-
"Salesforce/blip2-opt-2.7b",
|
10 |
)
|
11 |
|
12 |
|
@@ -16,7 +16,7 @@ picture = st.camera_input("Take a picture", disabled=not enable)
|
|
16 |
if picture:
|
17 |
image = Image.open(picture)
|
18 |
prompt = "Question: At what location is this person most likely attending this online meeting? Answer:"
|
19 |
-
inputs = processor(images=image, text=prompt, return_tensors="pt")
|
20 |
|
21 |
generated_ids = model.generate(**inputs)
|
22 |
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
|
|
6 |
|
7 |
processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
|
8 |
model = Blip2ForConditionalGeneration.from_pretrained(
|
9 |
+
"Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16
|
10 |
)
|
11 |
|
12 |
|
|
|
16 |
if picture:
|
17 |
image = Image.open(picture)
|
18 |
prompt = "Question: At what location is this person most likely attending this online meeting? Answer:"
|
19 |
+
inputs = processor(images=image, text=prompt, return_tensors="pt")
|
20 |
|
21 |
generated_ids = model.generate(**inputs)
|
22 |
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
conda
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Pillow
|
2 |
+
transformers
|
3 |
+
torch
|
4 |
+
accelerate
|