Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from PIL import Image, ImageDraw, ImageFont
|
3 |
+
import scipy.io.wavfile as wavfile
|
4 |
+
from transformers import pipeline
|
5 |
+
|
6 |
+
# Load pipelines
|
7 |
+
narrator = pipeline("text-to-speech", model="kakao-enterprise/vits-ljs")
|
8 |
+
object_detector = pipeline("object-detection", model="facebook/detr-resnet-50")
|
9 |
+
|
10 |
+
# Function to generate audio from text
|
11 |
+
def generate_audio(text):
|
12 |
+
narrated_text = narrator(text)
|
13 |
+
wavfile.write("output.wav", rate=narrated_text["sampling_rate"], data=narrated_text["audio"][0])
|
14 |
+
return "output.wav"
|
15 |
+
|
16 |
+
# Function to read and summarize detected objects
|
17 |
+
def read_objects(detection_objects):
|
18 |
+
object_counts = {}
|
19 |
+
for detection in detection_objects:
|
20 |
+
label = detection['label']
|
21 |
+
object_counts[label] = object_counts.get(label, 0) + 1
|
22 |
+
|
23 |
+
response = "This picture contains"
|
24 |
+
labels = list(object_counts.keys())
|
25 |
+
for i, label in enumerate(labels):
|
26 |
+
response += f" {object_counts[label]} {label}"
|
27 |
+
if object_counts[label] > 1:
|
28 |
+
response += "s"
|
29 |
+
if i < len(labels) - 2:
|
30 |
+
response += ","
|
31 |
+
elif i == len(labels) - 2:
|
32 |
+
response += " and"
|
33 |
+
response += "."
|
34 |
+
return response
|
35 |
+
|
36 |
+
# Function to draw bounding boxes on the image
|
37 |
+
def draw_bounding_boxes(image, detections):
|
38 |
+
draw_image = image.copy()
|
39 |
+
draw = ImageDraw.Draw(draw_image)
|
40 |
+
font = ImageFont.load_default()
|
41 |
+
|
42 |
+
for detection in detections:
|
43 |
+
box = detection['box']
|
44 |
+
xmin, ymin, xmax, ymax = box['xmin'], box['ymin'], box['xmax'], box['ymax']
|
45 |
+
draw.rectangle([(xmin, ymin), (xmax, ymax)], outline="red", width=3)
|
46 |
+
|
47 |
+
label = detection['label']
|
48 |
+
score = detection['score']
|
49 |
+
text = f"{label} {score:.2f}"
|
50 |
+
text_size = draw.textbbox((xmin, ymin), text, font=font)
|
51 |
+
draw.rectangle([(text_size[0], text_size[1]), (text_size[2], text_size[3])], fill="red")
|
52 |
+
draw.text((xmin, ymin), text, fill="white", font=font)
|
53 |
+
|
54 |
+
return draw_image
|
55 |
+
|
56 |
+
# Main function to process the image
|
57 |
+
def detect_object(image):
|
58 |
+
detections = object_detector(image)
|
59 |
+
processed_image = draw_bounding_boxes(image, detections)
|
60 |
+
description_text = read_objects(detections)
|
61 |
+
processed_audio = generate_audio(description_text)
|
62 |
+
return processed_image, processed_audio
|
63 |
+
|
64 |
+
# Gradio interface
|
65 |
+
description_text = """
|
66 |
+
# Multi-Object Detection with Audio Narration
|
67 |
+
|
68 |
+
Upload an image to detect objects and hear a natural language description.
|
69 |
+
|
70 |
+
### Credits:
|
71 |
+
Developed by Taizun S
|
72 |
+
"""
|
73 |
+
|
74 |
+
demo = gr.Interface(
|
75 |
+
fn=detect_object,
|
76 |
+
inputs=gr.Image(label="Upload an Image", type="pil"),
|
77 |
+
outputs=[
|
78 |
+
gr.Image(label="Processed Image", type="pil"),
|
79 |
+
gr.Audio(label="Generated Audio")
|
80 |
+
],
|
81 |
+
title="Multi-Object Detection and Narration",
|
82 |
+
description=description_text,
|
83 |
+
)
|
84 |
+
|
85 |
+
demo.launch()
|