Spaces:
Sleeping
Sleeping
Commit
·
8efa29f
1
Parent(s):
e71fd19
Rename rest (4).py to app.py
Browse files- app.py +40 -0
- rest (4).py +0 -77
app.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#best restoration model
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
import numpy as np
|
5 |
+
from ultralytics import YOLO
|
6 |
+
import cv2
|
7 |
+
|
8 |
+
model = YOLO("best2.pt")
|
9 |
+
|
10 |
+
with gr.Blocks() as demo:
|
11 |
+
with gr.Row():
|
12 |
+
gr.Markdown("## Web app that segement the roof from house image using YLOV8")
|
13 |
+
with gr.Row():
|
14 |
+
gr.Markdown("### Please upload an image of the house, then click on Segement .")
|
15 |
+
with gr.Row():
|
16 |
+
gr.Markdown("Then the yolov8 will segement the roof.")
|
17 |
+
|
18 |
+
|
19 |
+
with gr.Row():
|
20 |
+
input_image = gr.Image(source='upload', elem_id="input_image_upload", label="UploadImage")
|
21 |
+
output_image = gr.Image(label="output")
|
22 |
+
with gr.Row():
|
23 |
+
segement_an = gr.Button("Segement")
|
24 |
+
def segement_(input_):
|
25 |
+
|
26 |
+
img = model.predict(source=input_,
|
27 |
+
stream=True, retina_masks=True)
|
28 |
+
for result in img:
|
29 |
+
mask = result.masks.cpu().numpy()
|
30 |
+
masks = mask.masks.astype(bool)
|
31 |
+
new = np.ones_like(input_, dtype=np.uint8)
|
32 |
+
for m in masks:
|
33 |
+
new[m] = input_[m]
|
34 |
+
return new
|
35 |
+
|
36 |
+
segement_an.click(segement_, inputs=[input_image], outputs=[output_image])
|
37 |
+
demo.launch(share=True)
|
38 |
+
|
39 |
+
|
40 |
+
|
rest (4).py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
#best restoration model
|
2 |
-
|
3 |
-
import gradio as gr
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
from ultralytics import YOLO
|
7 |
-
import cv2
|
8 |
-
from segment_anything import sam_model_registry, SamAutomaticMaskGenerator, SamPredictor
|
9 |
-
import torch
|
10 |
-
import torchvision
|
11 |
-
|
12 |
-
device = "cuda"
|
13 |
-
sam = sam_model_registry["vit_h"](checkpoint="sam_vit_h_4b8939.pth").to(device=torch.device('cuda:0'))
|
14 |
-
mask_predictor = SamPredictor(sam)
|
15 |
-
model = YOLO("/content/window_model.pt")
|
16 |
-
|
17 |
-
with gr.Blocks() as demo:
|
18 |
-
with gr.Row():
|
19 |
-
gr.Markdown("## Web app that segement the windows from house image using YLOV8 and SAM")
|
20 |
-
with gr.Row():
|
21 |
-
gr.Markdown("### Please upload an image of the house, then click on Detect the bounding box.")
|
22 |
-
with gr.Row():
|
23 |
-
gr.Markdown("Then the yolov8 will detect the bounding boxes of the windows, and feed them to SAM.")
|
24 |
-
with gr.Row():
|
25 |
-
gr.Markdown("Note:it is not important if the YOLOV8 detect the wrong item,what is important if SAM segement the detect object very well.")
|
26 |
-
with gr.Row():
|
27 |
-
gr.Markdown("I will imporve the accuracy of YOLOV8 if SAM perform well.")
|
28 |
-
|
29 |
-
with gr.Row():
|
30 |
-
input_image = gr.Image(source='upload', elem_id="input_image_upload", label="UploadImage")
|
31 |
-
detected_output = gr.Image(label="detected windowss")
|
32 |
-
output_image = gr.Image(label="output")
|
33 |
-
with gr.Row():
|
34 |
-
detect_bx = gr.Button("Detect the bounding boxes")
|
35 |
-
segement_an = gr.Button("Segement")
|
36 |
-
def detect_(input_):
|
37 |
-
|
38 |
-
results = model.predict(input_,conf=0.25)
|
39 |
-
predicted_boxes = results[0].boxes.xyxy
|
40 |
-
for result in predicted_boxes:
|
41 |
-
new = np.ones_like(input_, dtype=np.uint8)
|
42 |
-
mask = result.masks.cpu().numpy()
|
43 |
-
masks = mask.masks.astype(bool)
|
44 |
-
for m in masks:
|
45 |
-
new[m] = input_[m]
|
46 |
-
return new
|
47 |
-
def segement_anythings(input_):
|
48 |
-
predicted_boxes = predict_(input_)
|
49 |
-
transformed_boxes = mask_predictor.transform.apply_boxes_torch(predicted_boxes, input_.shape[:2])
|
50 |
-
mask_predictor.set_image(input_)
|
51 |
-
masks, scores, logits = mask_predictor.predict_torch(
|
52 |
-
boxes = transformed_boxes,
|
53 |
-
multimask_output=False,
|
54 |
-
point_coords=None,
|
55 |
-
point_labels=None
|
56 |
-
)
|
57 |
-
new = np.ones_like(input_, dtype=np.uint8)
|
58 |
-
for mask in masks:
|
59 |
-
k = mask.cpu().numpy()[0]
|
60 |
-
new[k] = input_[k]
|
61 |
-
return new
|
62 |
-
def predict_(input_):
|
63 |
-
results = model.predict(source=input_, conf=0.25)
|
64 |
-
predicted_boxes = results[0].boxes.xyxy
|
65 |
-
return predicted_boxes
|
66 |
-
def detect_b(input_):
|
67 |
-
predicted_boxes = predict_(input_)
|
68 |
-
for box in predicted_boxes:
|
69 |
-
cv2.rectangle(input_, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 2)
|
70 |
-
return input_
|
71 |
-
|
72 |
-
detect_bx.click(detect_b, inputs=[input_image], outputs=[detected_output])
|
73 |
-
segement_an.click(segement_anythings, inputs=[input_image], outputs=[output_image])
|
74 |
-
demo.launch(share=True)
|
75 |
-
|
76 |
-
|
77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|