Spaces:
Runtime error
Runtime error
File size: 2,535 Bytes
d7c2a2b 8901ad6 d7c2a2b 8901ad6 39d0cbe d7c2a2b 8901ad6 d7c2a2b 8901ad6 d7c2a2b 8901ad6 d7c2a2b 8901ad6 d7c2a2b 8901ad6 d7c2a2b 8901ad6 d7c2a2b 8901ad6 d7c2a2b 8901ad6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import os
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
import gradio as gr
from PIL import Image
from segment_anything import SamAutomaticMaskGenerator, SamPredictor, sam_model_registry
matplotlib.pyplot.switch_backend('Agg') # for matplotlib to work in gradio
st.markdown(
"""
# Segment Anything Model (SAM)
### A test on remote sensing data
- Paper:[Link](https://scontent-fml2-1.xx.fbcdn.net/v/t39.2365-6/10000000_900554171201033_1602411987825904100_n.pdf?_nc_cat=100&ccb=1-7&_nc_sid=3c67a6&_nc_ohc=Ald4OYhL6hgAX-ZcGmS&_nc_ht=scontent-fml2-1.xx&oh=00_AfDk4FvyiDYeXgflANA2CbdV6HSS8CcJmrvjSfTqsgUmog&oe=643500E7)
- Github:[https://github.com/facebookresearch/segment-anything](https://github.com/facebookresearch/segment-anything)
- Dataset:https://ai.facebook.com/datasets/segment-anything-downloads/(https://ai.facebook.com/datasets/segment-anything-downloads/)
- Official Demo:[https://segment-anything.com/demo](https://segment-anything.com/demo)
"""
)
#setup model
sam_checkpoint = "meta-model.pth"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # use GPU if available
model_type = "default"
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
sam.to(device=device)
mask_generator = SamAutomaticMaskGenerator(sam)
predictor = SamPredictor(sam)
def show_anns(anns):
if len(anns) == 0:
return
sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True)
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in sorted_anns:
m = ann['segmentation']
img = np.ones((m.shape[0], m.shape[1], 3))
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack((img, m*0.35)))
def segment_image(image):
masks = mask_generator.generate(image)
plt.clf()
ppi = 100
height, width, _ = image.shape
plt.figure(figsize=(width / ppi, height / ppi), dpi=ppi)
plt.imshow(image)
show_anns(masks)
plt.axis('off')
plt.savefig('output.png', bbox_inches='tight', pad_inches=0)
output = cv2.imread('output.png')
return Image.fromarray(output)
with gr.Blocks() as demo:
# gr.Markdown("## Segment-anything Demo")
with gr.Row():
image = gr.Image()
image_output = gr.Image()
segment_image_button = gr.Button("Segment")
segment_image_button.click(segment_image, inputs=[image], outputs=image_output)
demo.launch()
|