vibha-mah's picture
Update app.py (#1)
44e58ed
raw
history blame contribute delete
No virus
10.6 kB
try:
import detectron2
except:
import os
os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
from matplotlib.pyplot import axis
import gradio as gr
import requests
import numpy as np
from torch import nn
import requests
import cv2
import torch
import detectron2
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from detectron2.utils.visualizer import ColorMode
from detectron2.structures import Instances
from detectron2.structures import Boxes
damage_model_path = 'damage/model_final.pth'
scratch_model_path = 'scratch/model_final.pth'
parts_model_path = 'parts/model_final.pth'
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
cfg_scratches = get_cfg()
cfg_scratches.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg_scratches.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8
cfg_scratches.MODEL.ROI_HEADS.NUM_CLASSES = 1
cfg_scratches.MODEL.WEIGHTS = scratch_model_path
cfg_scratches.MODEL.DEVICE = device
predictor_scratches = DefaultPredictor(cfg_scratches)
metadata_scratch = MetadataCatalog.get("car_dataset_val")
metadata_scratch.thing_classes = ["scratch"]
cfg_damage = get_cfg()
cfg_damage.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg_damage.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
cfg_damage.MODEL.ROI_HEADS.NUM_CLASSES = 1
cfg_damage.MODEL.WEIGHTS = damage_model_path
cfg_damage.MODEL.DEVICE = device
predictor_damage = DefaultPredictor(cfg_damage)
metadata_damage = MetadataCatalog.get("car_damage_dataset_val")
metadata_damage.thing_classes = ["damage"]
cfg_parts = get_cfg()
cfg_parts.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg_parts.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.75
cfg_parts.MODEL.ROI_HEADS.NUM_CLASSES = 19
cfg_parts.MODEL.WEIGHTS = parts_model_path
cfg_parts.MODEL.DEVICE = device
predictor_parts = DefaultPredictor(cfg_parts)
metadata_parts = MetadataCatalog.get("car_parts_dataset_val")
metadata_parts.thing_classes = ['_background_',
'back_bumper',
'back_glass',
'back_left_door',
'back_left_light',
'back_right_door',
'back_right_light',
'front_bumper',
'front_glass',
'front_left_door',
'front_left_light',
'front_right_door',
'front_right_light',
'hood',
'left_mirror',
'right_mirror',
'tailgate',
'trunk',
'wheel']
def merge_segment(pred_segm):
merge_dict = {}
for i in range(len(pred_segm)):
merge_dict[i] = []
for j in range(i+1,len(pred_segm)):
if torch.sum(pred_segm[i]*pred_segm[j])>0:
merge_dict[i].append(j)
to_delete = []
for key in merge_dict:
for element in merge_dict[key]:
to_delete.append(element)
for element in to_delete:
merge_dict.pop(element,None)
empty_delete = []
for key in merge_dict:
if merge_dict[key] == []:
empty_delete.append(key)
for element in empty_delete:
merge_dict.pop(element,None)
for key in merge_dict:
for element in merge_dict[key]:
pred_segm[key]+=pred_segm[element]
except_elem = list(set(to_delete))
new_indexes = list(range(len(pred_segm)))
for elem in except_elem:
new_indexes.remove(elem)
return pred_segm[new_indexes]
def inference(image):
img = np.array(image)
outputs_damage = predictor_damage(img)
outputs_parts = predictor_parts(img)
outputs_scratch = predictor_scratches(img)
out_dict = outputs_damage["instances"].to("cpu").get_fields()
merged_damage_masks = merge_segment(out_dict['pred_masks'])
scratch_data = outputs_scratch["instances"].get_fields()
scratch_masks = scratch_data['pred_masks']
damage_data = outputs_damage["instances"].get_fields()
damage_masks = damage_data['pred_masks']
parts_data = outputs_parts["instances"].get_fields()
parts_masks = parts_data['pred_masks']
parts_classes = parts_data['pred_classes']
new_inst = detectron2.structures.Instances((1024,1024))
new_inst.set('pred_masks',merge_segment(out_dict['pred_masks']))
parts_damage_dict = {}
parts_list_damages = []
for part in parts_classes:
parts_damage_dict[metadata_parts.thing_classes[part]] = []
for mask in scratch_masks:
for i in range(len(parts_masks)):
if torch.sum(parts_masks[i]*mask)>0:
parts_damage_dict[metadata_parts.thing_classes[parts_classes[i]]].append('scratch')
parts_list_damages.append(f'{metadata_parts.thing_classes[parts_classes[i]]} has scratch')
print(f'{metadata_parts.thing_classes[parts_classes[i]]} has scratch')
for mask in merged_damage_masks:
for i in range(len(parts_masks)):
if torch.sum(parts_masks[i]*mask)>0:
parts_damage_dict[metadata_parts.thing_classes[parts_classes[i]]].append('damage')
parts_list_damages.append(f'{metadata_parts.thing_classes[parts_classes[i]]} has damage')
print(f'{metadata_parts.thing_classes[parts_classes[i]]} has damage')
# Define the colors for the scratch and damage masks
scratch_color = (0, 0, 255) # red
damage_color = (0, 255, 255) # yellow
# Convert the scratch and damage masks to numpy arrays
scratch_masks_arr = np.array(scratch_masks)
damage_masks_arr = np.array(damage_masks)
# Resize the scratch and damage masks to match the size of the original image
if len(scratch_masks_arr) > 0:
scratch_mask_resized = cv2.resize(scratch_masks_arr[0].astype(np.uint8), (img.shape[1], img.shape[0]))
else:
scratch_mask_resized = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)
if len(damage_masks_arr) > 0:
damage_mask_resized = cv2.resize(damage_masks_arr[0].astype(np.uint8), (img.shape[1], img.shape[0]))
else:
damage_mask_resized = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)
# Merge the scratch and damage masks into a single binary mask
merged_mask = np.zeros_like(scratch_mask_resized)
merged_mask[(scratch_mask_resized> 0) | (damage_mask_resized > 0)] = 255
# Overlay the merged mask on top of the original image
overlay = img.copy()
overlay[merged_mask == 255] = (0, 255, 0) # green color for the merged mask
overlay[damage_mask_resized == 255] = damage_color # yellow color for the damage mask
#output = cv2.addWeighted(overlay, 0.5, img, 0.5, 0)
# Merge the instance predictions from both predictors
image_np = np.array(image)
height, width, channels = image_np.shape
# Get the predicted boxes from the scratches predictor
pred_boxes_scratch = outputs_scratch["instances"].pred_boxes.tensor
# Get the predicted boxes from the damage predictor
pred_boxes_damage = outputs_damage["instances"].pred_boxes.tensor
# Concatenate the predicted boxes along the batch dimension
merged_boxes = torch.cat([pred_boxes_scratch, pred_boxes_damage], dim=0)
# Create a new Instances object with the merged boxes
merged_instances = Instances((image_np.shape[0], image_np.shape[1]))
merged_instances.pred_boxes = Boxes(merged_boxes)
# Visualize the Masks
v_d = Visualizer(img[:, :, ::-1],
metadata=metadata_damage,
scale=0.5,
instance_mode=ColorMode.SEGMENTATION # remove the colors of unsegmented pixels. This option is only available for segmentation models
)
v_d = Visualizer(img,scale=1.2)
out_d = v_d.draw_instance_predictions(new_inst)
img1 = out_d.get_image()[:, :, ::-1]
v_s = Visualizer(img[:, :, ::-1],
metadata=metadata_scratch,
scale=0.5,
instance_mode=ColorMode.SEGMENTATION # remove the colors of unsegmented pixels. This option is only available for segmentation models
)
v_s = Visualizer(img,scale=1.2)
out_s = v_s.draw_instance_predictions(outputs_scratch["instances"])
img2 = out_s.get_image()[:, :, ::-1]
v_p = Visualizer(img[:, :, ::-1],
metadata=metadata_parts,
scale=0.5,
instance_mode=ColorMode.SEGMENTATION # remove the colors of unsegmented pixels. This option is only available for segmentation models
)
v_p = Visualizer(img,scale=1.2)
out_p = v_p.draw_instance_predictions(outputs_parts["instances"])
img3 = out_p.get_image()[:, :, ::-1]
# Visualize the overlay
v_m = Visualizer(overlay[:, :, ::-1],
metadata=metadata_damage,
scale=1.2,
instance_mode=ColorMode.SEGMENTATION # display the overlay in black and white
)
# Draw the overlay with instance predictions
overlay_with_predictions = v_m.draw_instance_predictions(merged_instances.to("cpu")).get_image()[:, :, ::-1]
#v_m = Visualizer(overlay,scale=1.2)
out = v_m.draw_instance_predictions(merged_instances)
output = out.get_image()[:, :, ::-1]
return img1, img2, img3, parts_list_damages, output
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
gr.HTML("<h1 style='text-align: center;'>Damage Detection Dashboard</h1>")
gr.Markdown("## Inputs")
image = gr.Image(type="pil",label="Input")
submit_button = gr.Button(value="Submit", label="Submit")
with gr.Column():
gr.Markdown("## Outputs")
with gr.Tab('Image of damages'):
im1 = gr.Image(type='numpy',label='Image of damages')
with gr.Tab('Image of scratches'):
im2 = gr.Image(type='numpy',label='Image of scratches')
with gr.Tab('Image of parts'):
im3 = gr.Image(type='numpy',label='Image of car parts')
with gr.Tab('Information about damaged parts'):
intersections = gr.Textbox(label='Information about type of damages on each part')
with gr.Tab('Image of overlayed damage parts'):
overlayed = gr.Image(type='numpy',label='Image of overlayed damage parts')
#actions
submit_button.click(
fn=inference,
inputs = [image],
api_name="/predict",
outputs = [im1,im2,im3,intersections, overlayed]
)
if __name__ == "__main__":
demo.launch()