Spaces:
Sleeping
Sleeping
BhumikaMak
commited on
Commit
·
4f93ba9
1
Parent(s):
34678a5
Add: support for yolov8
Browse files- requirements.txt +2 -1
- yolov8.py +13 -40
requirements.txt
CHANGED
@@ -6,4 +6,5 @@ pillow
|
|
6 |
opencv-python
|
7 |
git+https://github.com/jacobgil/pytorch-grad-cam.git
|
8 |
gradio
|
9 |
-
ultralytics
|
|
|
|
6 |
opencv-python
|
7 |
git+https://github.com/jacobgil/pytorch-grad-cam.git
|
8 |
gradio
|
9 |
+
ultralytics
|
10 |
+
pip install yolov8-explainer
|
yolov8.py
CHANGED
@@ -1,19 +1,18 @@
|
|
1 |
-
import
|
|
|
2 |
import cv2
|
3 |
import numpy as np
|
4 |
from PIL import Image
|
5 |
-
import torchvision.transforms as transforms
|
6 |
-
from pytorch_grad_cam import EigenCAM
|
7 |
-
from pytorch_grad_cam.utils.image import show_cam_on_image, scale_cam_image
|
8 |
-
from ultralytics import YOLO
|
9 |
|
|
|
10 |
COLORS = np.random.uniform(0, 255, size=(80, 3))
|
|
|
11 |
def parse_detections_yolov8(results):
|
12 |
boxes, colors, names = [], [], []
|
13 |
detections = results.boxes
|
14 |
for box in detections:
|
15 |
confidence = box.conf[0].item()
|
16 |
-
if confidence < 0.2:
|
17 |
continue
|
18 |
xmin, ymin, xmax, ymax = map(int, box.xyxy[0].tolist())
|
19 |
category = int(box.cls[0].item())
|
@@ -23,7 +22,6 @@ def parse_detections_yolov8(results):
|
|
23 |
names.append(name)
|
24 |
return boxes, colors, names
|
25 |
|
26 |
-
|
27 |
def draw_detections(boxes, colors, names, img):
|
28 |
for box, color, name in zip(boxes, colors, names):
|
29 |
xmin, ymin, xmax, ymax = box
|
@@ -33,49 +31,24 @@ def draw_detections(boxes, colors, names, img):
|
|
33 |
lineType=cv2.LINE_AA)
|
34 |
return img
|
35 |
|
36 |
-
|
37 |
-
def generate_cam_image(model, target_layers, tensor, rgb_img, boxes):
|
38 |
-
class YOLOWrapper(torch.nn.Module):
|
39 |
-
def __init__(self, model):
|
40 |
-
super(YOLOWrapper, self).__init__()
|
41 |
-
self.model = model
|
42 |
-
|
43 |
-
def forward(self, x):
|
44 |
-
return self.model.model.forward_once(x) # Ensure correct layer is called
|
45 |
-
|
46 |
-
wrapped_model = YOLOWrapper(model)
|
47 |
-
cam = EigenCAM(wrapped_model, target_layers)
|
48 |
-
grayscale_cam = cam(tensor)[0, :, :]
|
49 |
-
img_float = np.float32(rgb_img) / 255
|
50 |
-
cam_image = show_cam_on_image(img_float, grayscale_cam, use_rgb=True)
|
51 |
-
renormalized_cam = np.zeros(grayscale_cam.shape, dtype=np.float32)
|
52 |
-
for x1, y1, x2, y2 in boxes:
|
53 |
-
renormalized_cam[y1:y2, x1:x2] = scale_cam_image(grayscale_cam[y1:y2, x1:x2].copy())
|
54 |
-
renormalized_cam = scale_cam_image(renormalized_cam)
|
55 |
-
renormalized_cam_image = show_cam_on_image(img_float, renormalized_cam, use_rgb=True)
|
56 |
-
return cam_image, renormalized_cam_image
|
57 |
-
|
58 |
def xai_yolov8(image):
|
59 |
# Load YOLOv8 model
|
60 |
model = YOLO('yolov8n.pt') # Load YOLOv8 nano model
|
61 |
model.to('cpu')
|
62 |
model.eval()
|
63 |
|
|
|
|
|
|
|
64 |
# Run YOLO detection
|
65 |
results = model(image)
|
66 |
boxes, colors, names = parse_detections_yolov8(results[0])
|
67 |
detections_img = draw_detections(boxes, colors, names, image.copy())
|
68 |
|
69 |
-
#
|
70 |
-
|
71 |
-
transform = transforms.ToTensor()
|
72 |
-
tensor = transform(img_float).unsqueeze(0)
|
73 |
-
|
74 |
-
# Grad-CAM visualization
|
75 |
-
target_layers = [model.model.model[-2]] # Adjust the target layer if required
|
76 |
-
cam_image, renormalized_cam_image = generate_cam_image(model.model, target_layers, tensor, image, boxes)
|
77 |
|
78 |
-
# Combine
|
79 |
final_image = np.hstack((image, cam_image, renormalized_cam_image))
|
80 |
-
caption = "Results using YOLOv8"
|
81 |
-
return Image.fromarray(final_image), caption
|
|
|
1 |
+
from yolov8_explainer import YOLOv8Explainer
|
2 |
+
from ultralytics import YOLO
|
3 |
import cv2
|
4 |
import numpy as np
|
5 |
from PIL import Image
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
# Set random colors for detection bounding boxes
|
8 |
COLORS = np.random.uniform(0, 255, size=(80, 3))
|
9 |
+
|
10 |
def parse_detections_yolov8(results):
|
11 |
boxes, colors, names = [], [], []
|
12 |
detections = results.boxes
|
13 |
for box in detections:
|
14 |
confidence = box.conf[0].item()
|
15 |
+
if confidence < 0.2: # Filter out low-confidence detections
|
16 |
continue
|
17 |
xmin, ymin, xmax, ymax = map(int, box.xyxy[0].tolist())
|
18 |
category = int(box.cls[0].item())
|
|
|
22 |
names.append(name)
|
23 |
return boxes, colors, names
|
24 |
|
|
|
25 |
def draw_detections(boxes, colors, names, img):
|
26 |
for box, color, name in zip(boxes, colors, names):
|
27 |
xmin, ymin, xmax, ymax = box
|
|
|
31 |
lineType=cv2.LINE_AA)
|
32 |
return img
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
def xai_yolov8(image):
|
35 |
# Load YOLOv8 model
|
36 |
model = YOLO('yolov8n.pt') # Load YOLOv8 nano model
|
37 |
model.to('cpu')
|
38 |
model.eval()
|
39 |
|
40 |
+
# Initialize YOLOv8 Explainer
|
41 |
+
explainer = YOLOv8Explainer(model)
|
42 |
+
|
43 |
# Run YOLO detection
|
44 |
results = model(image)
|
45 |
boxes, colors, names = parse_detections_yolov8(results[0])
|
46 |
detections_img = draw_detections(boxes, colors, names, image.copy())
|
47 |
|
48 |
+
# Generate Grad-CAM visualization
|
49 |
+
cam_image, renormalized_cam_image = explainer.visualize(image, results)
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
+
# Combine original, Grad-CAM, and renormalized Grad-CAM images
|
52 |
final_image = np.hstack((image, cam_image, renormalized_cam_image))
|
53 |
+
caption = "Results using YOLOv8 and YOLOv8Explainer"
|
54 |
+
return Image.fromarray(final_image), caption
|