Spaces:
Sleeping
Sleeping
File size: 3,291 Bytes
13bf6b4 f3b7d6b 2ec5f23 f3b7d6b 2ec5f23 f3b7d6b 2ec5f23 f3b7d6b 2ec5f23 f3b7d6b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import gradio as gr
import cv2
import torch
import numpy as np
# Load the YOLOv5 model
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
# Function to run inference on an image
def run_inference(image):
# Convert the image from PIL format to a format compatible with OpenCV
image = np.array(image)
# Run YOLOv5 inference
results = model(image)
# Convert the annotated image from BGR to RGB for display
annotated_image = results.render()[0]
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
return annotated_image
# Function to generate a summary for the detected objects
def generate_summary(image):
results = model(image)
detected_objects = results.pandas().xyxy[0]
summary = "Detected objects:\n\n"
for idx, obj in detected_objects.iterrows():
summary += f"- {obj['name']} with confidence {obj['confidence']:.2f}\n"
return summary
# Create the Gradio interface with improved UI
with gr.Blocks(css="""
body {
font-family: 'Poppins', sans-serif;
background-color: #2B3D41;
color: #F9B9D2;
}
header {
background-color: #83A0A0;
padding: 20px;
text-align: center;
border-radius: 10px;
color: white;
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.3);
}
footer {
background-color: #4C5F6B;
padding: 10px;
text-align: center;
border-radius: 10px;
color: white;
margin-top: 20px;
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.3);
}
.btn-primary {
background-color: #BCA0BC;
color: #2B3D41;
padding: 10px 20px;
border-radius: 5px;
font-weight: bold;
border: none;
cursor: pointer;
transition: all 0.3s;
}
.btn-primary:hover {
background-color: #F9B9D2;
color: #2B3D41;
}
.gr-box {
background-color: #4C5F6B;
border-radius: 10px;
padding: 20px;
color: #F9B9D2;
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.3);
}
.gr-input {
background-color: #BCA0BC;
border-radius: 5px;
border: none;
padding: 10px;
color: #2B3D41;
}
""") as demo:
with gr.Row():
gr.Markdown("<h1 style='text-align:center; color:#F9B9D2;'>✨ InsightVision: Detect, Analyze, Summarize ✨</h1>")
with gr.Row():
with gr.Column(scale=2):
image_input = gr.Image(label="Upload Image", type="pil", elem_classes="gr-input")
with gr.Row():
detect_button = gr.Button("Run Detection", elem_classes="btn-primary")
with gr.Column(scale=3):
annotated_image_output = gr.Image(label="Detected Image", type="pil", elem_classes="gr-box")
summary_output = gr.Textbox(label="Detection Summary", lines=10, interactive=False, elem_classes="gr-box")
# Actions for buttons
detect_button.click(
fn=lambda image: (run_inference(image), generate_summary(np.array(image))),
inputs=[image_input],
outputs=[annotated_image_output, summary_output]
)
gr.Markdown("<footer>Made with ❤️ using Gradio and YOLOv5 | © 2024 InsightVision</footer>")
# Launch the interface
demo.launch() |