File size: 1,337 Bytes
8c4ab6b e76263f b51e1ff 8c4ab6b b51e1ff e76263f b51e1ff e76263f b51e1ff e76263f 20a4635 e76263f b51e1ff e76263f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
from transformers import AutoModelForCausalLM, AutoTokenizer
from PIL import Image
import gradio as gr
import numpy as np
# Load the model and tokenizer
model_id = "vikhyatk/moondream2"
revision = "2024-05-20"
model = AutoModelForCausalLM.from_pretrained(
model_id, trust_remote_code=True, revision=revision
)
tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision)
def analyze_image_direct(image, question):
# Convert PIL Image to the format expected by the model
# Note: This step depends on the model's expected input format
# For demonstration, assuming the model accepts PIL images directly
enc_image = model.encode_image(image) # This method might not exist; adjust based on actual model capabilities
# Generate an answer to the question based on the encoded image
# Note: This step is hypothetical and depends on the model's capabilities
answer = model.answer_question(enc_image, question, tokenizer) # Adjust based on actual model capabilities
return answer
# Create a Gradio interface
with gr.Blocks() as block:
image = gr.inputs.Image(label="Image")
question = gr.inputs.Textbox(label="Question")
output = gr.outputs.Textbox(label="Answer")
gr.Interface(fn=analyze_image_direct, inputs=[image, question], outputs=output).launch()
block.launch()
|