Spaces:
Sleeping
Sleeping
File size: 4,714 Bytes
fcdbac9 60a2fa3 fcdbac9 bb78b14 fcdbac9 bb78b14 fcdbac9 bb78b14 25313d8 fcdbac9 25313d8 fcdbac9 25313d8 fcdbac9 25313d8 fcdbac9 bb78b14 fcdbac9 bb78b14 fcdbac9 25313d8 4777956 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import gradio as gr
from PIL import Image
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
from transformers import DetrImageProcessor, DetrForObjectDetection
import torch
import tensorflow as tf
from PIL import ImageDraw
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# image segmentation ๋ชจ๋ธ
feature_extractor = SegformerFeatureExtractor.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024"
)
model_segmentation = TFSegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024"
)
# image detection ๋ชจ๋ธ
# processor_detection = DetrImageProcessor.from_pretrained(
# "facebook/detr-resnet-50", revision="no_timm"
# )
# model_detection = DetrForObjectDetection.from_pretrained(
# "facebook/detr-resnet-50", revision="no_timm"
# )
def ade_palette():
"""ADE20K ํ๋ ํธ: ๊ฐ ํด๋์ค๋ฅผ RGB ๊ฐ์ ๋งคํํด์ฃผ๋ ํจ์์
๋๋ค."""
return [
[204, 87, 92],
[112, 185, 212],
[45, 189, 106],
[234, 123, 67],
[78, 56, 123],
[210, 32, 89],
[90, 180, 56],
[155, 102, 200],
[33, 147, 176],
[255, 183, 76],
[67, 123, 89],
[190, 60, 45],
[134, 112, 200],
[56, 45, 189],
[200, 56, 123],
[87, 92, 204],
[120, 56, 123],
[45, 78, 123],
[45, 123, 67],
]
labels_list = []
with open(r"labels.txt", "r") as fp:
for line in fp:
labels_list.append(line[:-1])
colormap = np.asarray(ade_palette())
def label_to_color_image(label):
"""๋ผ๋ฒจ์ ์ปฌ๋ฌ ์ด๋ฏธ์ง๋ก ๋ณํํด์ฃผ๋ ํจ์์
๋๋ค."""
if label.ndim != 2:
raise ValueError("2์ฐจ์ ์
๋ ฅ ๋ผ๋ฒจ์ ๊ธฐ๋ํฉ๋๋ค.")
if np.max(label) >= len(colormap):
raise ValueError("๋ผ๋ฒจ ๊ฐ์ด ๋๋ฌด ํฝ๋๋ค.")
return colormap[label]
def draw_plot(pred_img, seg):
"""์ด๋ฏธ์ง์ ์ธ๊ทธ๋ฉํ
์ด์
๊ฒฐ๊ณผ๋ฅผ floating ํ๋ ํจ์์
๋๋ค."""
fig = plt.figure(figsize=(20, 15))
grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
plt.subplot(grid_spec[0])
plt.imshow(pred_img)
plt.axis("off")
LABEL_NAMES = np.asarray(labels_list)
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
unique_labels = np.unique(seg.numpy().astype("uint8"))
ax = plt.subplot(grid_spec[1])
plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
ax.yaxis.tick_right()
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
plt.xticks([], [])
ax.tick_params(width=0.0, labelsize=25)
return fig
def sepia(inputs, button_text):
"""๊ฐ์ฒด ๊ฒ์ถ ๋๋ ์ธ๊ทธ๋ฉํ
์ด์
์ ์ํํ๊ณ ๊ฒฐ๊ณผ๋ฅผ ๋ฐํํ๋ ํจ์์
๋๋ค."""
input_img = Image.fromarray(inputs)
inputs_segmentation = feature_extractor(images=input_img, return_tensors="tf")
outputs_segmentation = model_segmentation(**inputs_segmentation)
logits_segmentation = outputs_segmentation.logits
logits_segmentation = tf.transpose(logits_segmentation, [0, 2, 3, 1])
logits_segmentation = tf.image.resize(logits_segmentation, input_img.size[::-1])
seg = tf.math.argmax(logits_segmentation, axis=-1)[0]
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
for label, color in enumerate(colormap):
color_seg[seg.numpy() == label, :] = color
pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
pred_img = pred_img.astype(np.uint8)
fig = draw_plot(pred_img, seg)
return fig
def on_button_click(inputs):
"""๋ฒํผ ํด๋ฆญ ์ด๋ฒคํธ ํธ๋ค๋ฌ"""
image_path, selected_option = inputs
if selected_option == "dropout":
# 'dropout'์ด๋ฉด ๋ ๊ฐ์ง ์ค์ ํ๋๋ฅผ ๋๋ค์ผ๋ก ์ ํ
selected_option = np.random.choice(["segmentation"])
return sepia(image_path, selected_option)
# Gr.Dropdown์ ์ฌ์ฉํ์ฌ ์ต์
์ ์ ํํ ์ ์๋๋ก ๋ณ๊ฒฝ
dropdown = gr.Dropdown(
["segmentation"], label="Menu", info="Chose Segmentation!"
)
demo = gr.Interface(fn=sepia,
inputs=[gr.Image(shape=(400, 600)), dropdown],
outputs=["plot"],
examples= [
["01.jpg", "1"],
["02.jpeg", "2"],
["03.jpeg", "3"],
["04.jpeg", "4"],
],
allow_flagging="never",)
demo.launch() |