mahmudunnabi commited on
Commit
6c33d37
·
verified ·
1 Parent(s): 6cb4d45

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +32 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers.utils import logging
3
+ from transformers import BlipForQuestionAnswering, AutoProcessor
4
+ from PIL import Image
5
+ logging.set_verbosity_error()
6
+
7
+ # Load the model and processor
8
+ model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
9
+ processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
10
+
11
+ # Define a function to process inputs and generate outputs
12
+ def predict(image, question):
13
+ inputs = processor(image, question, return_tensors="pt")
14
+ out = model.generate(**inputs)
15
+ answer = processor.decode(out[0], skip_special_tokens=True)
16
+ return answer
17
+
18
+ # Create the Gradio interface with custom Markdown and HTML formatting
19
+ demo = gr.Interface(
20
+ fn=predict,
21
+ inputs=[
22
+ gr.Image(type="pil", label="Upload Image"),
23
+ gr.Textbox(label="Question", placeholder="Ask a question about the image")
24
+ ],
25
+ outputs=gr.Textbox(label="Answer"),
26
+ description="<h1 style='text-align: center; font-family: Times New Roman;'>Visual Question Answering</h1> \
27
+ <p style='text-align: center; font-family: Times New Roman;'><strong>Model name:</strong> Salesforce/blip-vqa-base</p> \
28
+ <p style='text-align: center; font-family: Times New Roman;'><strong>Made by:</strong> MD MAHMUDUN NABI</p>"
29
+ )
30
+
31
+ # Launch the Gradio interface
32
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ transformers
3
+ pillow