Spaces:
Sleeping
Sleeping
from PIL import Image | |
import gradio as gr | |
from transformers import BlipProcessor, BlipForConditionalGeneration | |
model_id = "Salesforce/blip-image-captioning-base" | |
model = BlipForConditionalGeneration.from_pretrained(model_id) | |
processor = BlipProcessor.from_pretrained(model_id) | |
def generate_caption(image_array): | |
# Convert numpy array to PIL Image | |
image = Image.fromarray(image_array.astype('uint8')).convert('RGB') | |
# Process the image to generate tensor inputs | |
inputs = processor(image, return_tensors="pt") | |
# Generate caption for the image | |
out = model.generate(**inputs) | |
# Decode and return the generated caption | |
return processor.decode(out[0], skip_special_tokens=True) | |
# Gradio interface setup to accept image input and produce text output | |
iface = gr.Interface(generate_caption, inputs="image", outputs="text") | |
# Launch the interface | |
iface.launch() | |