Spaces:
Sleeping
Sleeping
import gradio as gr | |
from PIL import Image | |
from matplotlib import gridspec | |
import matplotlib.pyplot as plt | |
import numpy as np | |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation | |
from transformers import DetrImageProcessor, DetrForObjectDetection | |
import torch | |
import tensorflow as tf | |
from PIL import ImageDraw | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
# image segmentation ๋ชจ๋ธ | |
feature_extractor = SegformerFeatureExtractor.from_pretrained( | |
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" | |
) | |
model_segmentation = TFSegformerForSemanticSegmentation.from_pretrained( | |
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" | |
) | |
# image detection ๋ชจ๋ธ | |
# processor_detection = DetrImageProcessor.from_pretrained( | |
# "facebook/detr-resnet-50", revision="no_timm" | |
# ) | |
# model_detection = DetrForObjectDetection.from_pretrained( | |
# "facebook/detr-resnet-50", revision="no_timm" | |
# ) | |
def ade_palette(): | |
"""ADE20K ํ๋ ํธ: ๊ฐ ํด๋์ค๋ฅผ RGB ๊ฐ์ ๋งคํํด์ฃผ๋ ํจ์์ ๋๋ค.""" | |
return [ | |
[204, 87, 92], | |
[112, 185, 212], | |
[45, 189, 106], | |
[234, 123, 67], | |
[78, 56, 123], | |
[210, 32, 89], | |
[90, 180, 56], | |
[155, 102, 200], | |
[33, 147, 176], | |
[255, 183, 76], | |
[67, 123, 89], | |
[190, 60, 45], | |
[134, 112, 200], | |
[56, 45, 189], | |
[200, 56, 123], | |
[87, 92, 204], | |
[120, 56, 123], | |
[45, 78, 123], | |
[45, 123, 67], | |
] | |
labels_list = [] | |
with open(r"labels.txt", "r") as fp: | |
for line in fp: | |
labels_list.append(line[:-1]) | |
colormap = np.asarray(ade_palette()) | |
def label_to_color_image(label): | |
"""๋ผ๋ฒจ์ ์ปฌ๋ฌ ์ด๋ฏธ์ง๋ก ๋ณํํด์ฃผ๋ ํจ์์ ๋๋ค.""" | |
if label.ndim != 2: | |
raise ValueError("2์ฐจ์ ์ ๋ ฅ ๋ผ๋ฒจ์ ๊ธฐ๋ํฉ๋๋ค.") | |
if np.max(label) >= len(colormap): | |
raise ValueError("๋ผ๋ฒจ ๊ฐ์ด ๋๋ฌด ํฝ๋๋ค.") | |
return colormap[label] | |
def draw_plot(pred_img, seg): | |
"""์ด๋ฏธ์ง์ ์ธ๊ทธ๋ฉํ ์ด์ ๊ฒฐ๊ณผ๋ฅผ floating ํ๋ ํจ์์ ๋๋ค.""" | |
fig = plt.figure(figsize=(20, 15)) | |
grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1]) | |
plt.subplot(grid_spec[0]) | |
plt.imshow(pred_img) | |
plt.axis("off") | |
LABEL_NAMES = np.asarray(labels_list) | |
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1) | |
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP) | |
unique_labels = np.unique(seg.numpy().astype("uint8")) | |
ax = plt.subplot(grid_spec[1]) | |
plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest") | |
ax.yaxis.tick_right() | |
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels]) | |
plt.xticks([], []) | |
ax.tick_params(width=0.0, labelsize=25) | |
return fig | |
def sepia(inputs, button_text): | |
"""๊ฐ์ฒด ๊ฒ์ถ ๋๋ ์ธ๊ทธ๋ฉํ ์ด์ ์ ์ํํ๊ณ ๊ฒฐ๊ณผ๋ฅผ ๋ฐํํ๋ ํจ์์ ๋๋ค.""" | |
input_img = Image.fromarray(inputs) | |
inputs_segmentation = feature_extractor(images=input_img, return_tensors="tf") | |
outputs_segmentation = model_segmentation(**inputs_segmentation) | |
logits_segmentation = outputs_segmentation.logits | |
logits_segmentation = tf.transpose(logits_segmentation, [0, 2, 3, 1]) | |
logits_segmentation = tf.image.resize(logits_segmentation, input_img.size[::-1]) | |
seg = tf.math.argmax(logits_segmentation, axis=-1)[0] | |
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) | |
for label, color in enumerate(colormap): | |
color_seg[seg.numpy() == label, :] = color | |
pred_img = np.array(input_img) * 0.5 + color_seg * 0.5 | |
pred_img = pred_img.astype(np.uint8) | |
fig = draw_plot(pred_img, seg) | |
return fig | |
def on_button_click(inputs): | |
"""๋ฒํผ ํด๋ฆญ ์ด๋ฒคํธ ํธ๋ค๋ฌ""" | |
image_path, selected_option = inputs | |
if selected_option == "dropout": | |
# 'dropout'์ด๋ฉด ๋ ๊ฐ์ง ์ค์ ํ๋๋ฅผ ๋๋ค์ผ๋ก ์ ํ | |
selected_option = np.random.choice(["segmentation"]) | |
return sepia(image_path, selected_option) | |
# Gr.Dropdown์ ์ฌ์ฉํ์ฌ ์ต์ ์ ์ ํํ ์ ์๋๋ก ๋ณ๊ฒฝ | |
dropdown = gr.Dropdown( | |
["segmentation"], label="Menu", info="Chose Segmentation!" | |
) | |
demo = gr.Interface(fn=sepia, | |
inputs=[gr.Image(shape=(400, 600)), dropdown], | |
outputs=["plot"], | |
examples= [ | |
["01.jpg", "1"], | |
["02.jpeg", "2"], | |
["03.jpeg", "3"], | |
["04.jpeg", "4"], | |
], | |
allow_flagging="never",) | |
demo.launch() |