kiddobellamy
commited on
Commit
•
8e761cc
1
Parent(s):
42c2dda
Update handler.py
Browse files- handler.py +53 -39
handler.py
CHANGED
@@ -1,39 +1,53 @@
|
|
1 |
-
import
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import torch
|
3 |
+
from PIL import Image
|
4 |
+
from transformers import MllamaForConditionalGeneration, AutoProcessor
|
5 |
+
|
6 |
+
# Define the model ID and load the model and processor
|
7 |
+
model_id = "meta-llama/Llama-3.2-90B-Vision-Instruct"
|
8 |
+
|
9 |
+
def load_model():
|
10 |
+
"""Loads the Llama 3.2-90B Vision-Instruct model and processor."""
|
11 |
+
model = MllamaForConditionalGeneration.from_pretrained(
|
12 |
+
model_id,
|
13 |
+
torch_dtype=torch.bfloat16,
|
14 |
+
device_map="auto",
|
15 |
+
)
|
16 |
+
processor = AutoProcessor.from_pretrained(model_id)
|
17 |
+
return model, processor
|
18 |
+
|
19 |
+
def process_image(url):
|
20 |
+
"""Processes the image from the given URL."""
|
21 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
22 |
+
return image
|
23 |
+
|
24 |
+
def generate_response(model, processor, image, prompt):
|
25 |
+
"""Generates a text response based on the image and the prompt."""
|
26 |
+
messages = [
|
27 |
+
{"role": "user", "content": [
|
28 |
+
{"type": "image"},
|
29 |
+
{"type": "text", "text": prompt}
|
30 |
+
]}
|
31 |
+
]
|
32 |
+
input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
|
33 |
+
inputs = processor(image, input_text, return_tensors="pt").to(model.device)
|
34 |
+
output = model.generate(**inputs, max_new_tokens=30)
|
35 |
+
return processor.decode(output[0])
|
36 |
+
|
37 |
+
def main():
|
38 |
+
# Load model and processor
|
39 |
+
model, processor = load_model()
|
40 |
+
|
41 |
+
# Sample image URL
|
42 |
+
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"
|
43 |
+
image = process_image(url)
|
44 |
+
|
45 |
+
# Define a sample prompt
|
46 |
+
prompt = "If I had to write a haiku for this one, it would be:"
|
47 |
+
|
48 |
+
# Generate response
|
49 |
+
response = generate_response(model, processor, image, prompt)
|
50 |
+
print(response)
|
51 |
+
|
52 |
+
if __name__ == "__main__":
|
53 |
+
main()
|