Spaces:
Sleeping
Sleeping
import tempfile | |
import time | |
from collections.abc import Sequence | |
from typing import Any, cast | |
import os | |
from huggingface_hub import login, hf_hub_download | |
import gradio as gr | |
import numpy as np | |
import pillow_heif | |
import spaces | |
import torch | |
from gradio_image_annotation import image_annotator | |
from gradio_imageslider import ImageSlider | |
from PIL import Image | |
from pymatting.foreground.estimate_foreground_ml import estimate_foreground_ml | |
from refiners.fluxion.utils import no_grad | |
from refiners.solutions import BoxSegmenter | |
from transformers import GroundingDinoForObjectDetection, GroundingDinoProcessor | |
from diffusers import FluxPipeline | |
# 상단에 import 추가 | |
from transformers import pipeline | |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en") | |
# 번역 함수 추가 | |
def translate_to_english(text: str) -> str: | |
"""한글 텍스트를 영어로 번역""" | |
if any(ord('가') <= ord(char) <= ord('힣') for char in text): | |
try: | |
translated = translator(text)[0]['translation_text'] | |
return translated | |
except Exception as e: | |
print(f"Translation error: {e}") | |
return text | |
return text | |
BoundingBox = tuple[int, int, int, int] | |
pillow_heif.register_heif_opener() | |
pillow_heif.register_avif_opener() | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
# HF 토큰 설정 | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
if HF_TOKEN is None: | |
raise ValueError("Please set the HF_TOKEN environment variable") | |
try: | |
login(token=HF_TOKEN) | |
except Exception as e: | |
raise ValueError(f"Failed to login to Hugging Face: {str(e)}") | |
# 모델 초기화 | |
segmenter = BoxSegmenter(device="cpu") | |
segmenter.device = device | |
segmenter.model = segmenter.model.to(device=segmenter.device) | |
gd_model_path = "IDEA-Research/grounding-dino-base" | |
gd_processor = GroundingDinoProcessor.from_pretrained(gd_model_path) | |
gd_model = GroundingDinoForObjectDetection.from_pretrained(gd_model_path, torch_dtype=torch.float32) | |
gd_model = gd_model.to(device=device) | |
assert isinstance(gd_model, GroundingDinoForObjectDetection) | |
# FLUX 파이프라인 초기화 | |
pipe = FluxPipeline.from_pretrained( | |
"black-forest-labs/FLUX.1-dev", | |
torch_dtype=torch.bfloat16, | |
use_auth_token=HF_TOKEN | |
) | |
pipe.load_lora_weights( | |
hf_hub_download( | |
"ByteDance/Hyper-SD", | |
"Hyper-FLUX.1-dev-8steps-lora.safetensors", | |
use_auth_token=HF_TOKEN | |
) | |
) | |
pipe.fuse_lora(lora_scale=0.125) | |
pipe.to(device="cuda", dtype=torch.bfloat16) | |
class timer: | |
def __init__(self, method_name="timed process"): | |
self.method = method_name | |
def __enter__(self): | |
self.start = time.time() | |
print(f"{self.method} starts") | |
def __exit__(self, exc_type, exc_val, exc_tb): | |
end = time.time() | |
print(f"{self.method} took {str(round(end - self.start, 2))}s") | |
def bbox_union(bboxes: Sequence[list[int]]) -> BoundingBox | None: | |
if not bboxes: | |
return None | |
for bbox in bboxes: | |
assert len(bbox) == 4 | |
assert all(isinstance(x, int) for x in bbox) | |
return ( | |
min(bbox[0] for bbox in bboxes), | |
min(bbox[1] for bbox in bboxes), | |
max(bbox[2] for bbox in bboxes), | |
max(bbox[3] for bbox in bboxes), | |
) | |
def corners_to_pixels_format(bboxes: torch.Tensor, width: int, height: int) -> torch.Tensor: | |
x1, y1, x2, y2 = bboxes.round().to(torch.int32).unbind(-1) | |
return torch.stack((x1.clamp_(0, width), y1.clamp_(0, height), x2.clamp_(0, width), y2.clamp_(0, height)), dim=-1) | |
def gd_detect(img: Image.Image, prompt: str) -> BoundingBox | None: | |
inputs = gd_processor(images=img, text=f"{prompt}.", return_tensors="pt").to(device=device) | |
with no_grad(): | |
outputs = gd_model(**inputs) | |
width, height = img.size | |
results: dict[str, Any] = gd_processor.post_process_grounded_object_detection( | |
outputs, | |
inputs["input_ids"], | |
target_sizes=[(height, width)], | |
)[0] | |
assert "boxes" in results and isinstance(results["boxes"], torch.Tensor) | |
bboxes = corners_to_pixels_format(results["boxes"].cpu(), width, height) | |
return bbox_union(bboxes.numpy().tolist()) | |
def apply_mask(img: Image.Image, mask_img: Image.Image, defringe: bool = True) -> Image.Image: | |
assert img.size == mask_img.size | |
img = img.convert("RGB") | |
mask_img = mask_img.convert("L") | |
if defringe: | |
rgb, alpha = np.asarray(img) / 255.0, np.asarray(mask_img) / 255.0 | |
foreground = cast(np.ndarray[Any, np.dtype[np.uint8]], estimate_foreground_ml(rgb, alpha)) | |
img = Image.fromarray((foreground * 255).astype("uint8")) | |
result = Image.new("RGBA", img.size) | |
result.paste(img, (0, 0), mask_img) | |
return result | |
def adjust_size_to_multiple_of_8(width: int, height: int) -> tuple[int, int]: | |
"""이미지 크기를 8의 배수로 조정하는 함수""" | |
new_width = ((width + 7) // 8) * 8 | |
new_height = ((height + 7) // 8) * 8 | |
return new_width, new_height | |
def calculate_dimensions(aspect_ratio: str, base_size: int = 512) -> tuple[int, int]: | |
"""선택된 비율에 따라 이미지 크기 계산""" | |
if aspect_ratio == "1:1": | |
return base_size, base_size | |
elif aspect_ratio == "16:9": | |
return base_size * 16 // 9, base_size | |
elif aspect_ratio == "9:16": | |
return base_size, base_size * 16 // 9 | |
elif aspect_ratio == "4:3": | |
return base_size * 4 // 3, base_size | |
return base_size, base_size | |
def generate_background(prompt: str, aspect_ratio: str) -> Image.Image: | |
"""배경 이미지 생성 함수""" | |
try: | |
# 선택된 비율에 따라 크기 계산 | |
width, height = calculate_dimensions(aspect_ratio) | |
# 8의 배수로 조정 | |
width, height = adjust_size_to_multiple_of_8(width, height) | |
with timer("Background generation"): | |
image = pipe( | |
prompt=prompt, | |
width=width, | |
height=height, | |
num_inference_steps=8, | |
guidance_scale=4.0, | |
).images[0] | |
return image | |
except Exception as e: | |
raise gr.Error(f"Background generation failed: {str(e)}") | |
def create_position_grid(): | |
"""3x3 위치 선택 그리드를 생성하는 HTML""" | |
return """ | |
<div style="display: grid; grid-template-columns: repeat(3, 1fr); gap: 10px; width: 150px; margin: auto;"> | |
<button class="position-btn" data-pos="top-left">↖</button> | |
<button class="position-btn" data-pos="top-center">↑</button> | |
<button class="position-btn" data-pos="top-right">↗</button> | |
<button class="position-btn" data-pos="middle-left">←</button> | |
<button class="position-btn" data-pos="middle-center">•</button> | |
<button class="position-btn" data-pos="middle-right">→</button> | |
<button class="position-btn" data-pos="bottom-left">↙</button> | |
<button class="position-btn" data-pos="bottom-center" data-default="true">↓</button> | |
<button class="position-btn" data-pos="bottom-right">↘</button> | |
</div> | |
<script> | |
const buttons = document.querySelectorAll('.position-btn'); | |
buttons.forEach(btn => { | |
btn.style.width = '40px'; | |
btn.style.height = '40px'; | |
btn.style.border = '1px solid #ccc'; | |
btn.style.borderRadius = '4px'; | |
btn.style.cursor = 'pointer'; | |
if (btn.dataset.default === 'true') { | |
btn.style.backgroundColor = '#2196F3'; | |
btn.style.color = 'white'; | |
} | |
}); | |
</script> | |
""" | |
def calculate_object_position(position: str, bg_size: tuple[int, int], obj_size: tuple[int, int]) -> tuple[int, int]: | |
"""오브젝트의 위치 계산""" | |
bg_width, bg_height = bg_size | |
obj_width, obj_height = obj_size | |
positions = { | |
"top-left": (0, 0), | |
"top-center": ((bg_width - obj_width) // 2, 0), | |
"top-right": (bg_width - obj_width, 0), | |
"middle-left": (0, (bg_height - obj_height) // 2), | |
"middle-center": ((bg_width - obj_width) // 2, (bg_height - obj_height) // 2), | |
"middle-right": (bg_width - obj_width, (bg_height - obj_height) // 2), | |
"bottom-left": (0, bg_height - obj_height), | |
"bottom-center": ((bg_width - obj_width) // 2, bg_height - obj_height), | |
"bottom-right": (bg_width - obj_width, bg_height - obj_height) | |
} | |
return positions.get(position, positions["bottom-center"]) | |
def resize_object(image: Image.Image, scale_percent: float) -> Image.Image: | |
"""오브젝트 크기 조정""" | |
width = int(image.width * scale_percent / 100) | |
height = int(image.height * scale_percent / 100) | |
return image.resize((width, height), Image.Resampling.LANCZOS) | |
def combine_with_background(foreground: Image.Image, background: Image.Image, | |
position: str = "bottom-center", scale_percent: float = 100) -> Image.Image: | |
"""전경과 배경 합성 함수""" | |
# 배경 이미지 준비 | |
result = background.convert('RGBA') | |
# 오브젝트 크기 조정 | |
scaled_foreground = resize_object(foreground, scale_percent) | |
# 오브젝트 위치 계산 | |
x, y = calculate_object_position(position, result.size, scaled_foreground.size) | |
# 합성 | |
result.paste(scaled_foreground, (x, y), scaled_foreground) | |
return result | |
def _gpu_process(img: Image.Image, prompt: str | BoundingBox | None) -> tuple[Image.Image, BoundingBox | None, list[str]]: | |
time_log: list[str] = [] | |
if isinstance(prompt, str): | |
t0 = time.time() | |
bbox = gd_detect(img, prompt) | |
time_log.append(f"detect: {time.time() - t0}") | |
if not bbox: | |
print(time_log[0]) | |
raise gr.Error("No object detected") | |
else: | |
bbox = prompt | |
t0 = time.time() | |
mask = segmenter(img, bbox) | |
time_log.append(f"segment: {time.time() - t0}") | |
return mask, bbox, time_log | |
def _process(img: Image.Image, prompt: str | BoundingBox | None, bg_prompt: str | None = None, aspect_ratio: str = "1:1") -> tuple[tuple[Image.Image, Image.Image, Image.Image], gr.DownloadButton]: | |
try: | |
if img.width > 2048 or img.height > 2048: | |
orig_res = max(img.width, img.height) | |
img.thumbnail((2048, 2048)) | |
if isinstance(prompt, tuple): | |
x0, y0, x1, y1 = (int(x * 2048 / orig_res) for x in prompt) | |
prompt = (x0, y0, x1, y1) | |
mask, bbox, time_log = _gpu_process(img, prompt) | |
masked_alpha = apply_mask(img, mask, defringe=True) | |
if bg_prompt: | |
background = generate_background(bg_prompt, aspect_ratio) | |
combined = combine_with_background(masked_alpha, background) | |
else: | |
combined = Image.alpha_composite(Image.new("RGBA", masked_alpha.size, "white"), masked_alpha) | |
thresholded = mask.point(lambda p: 255 if p > 10 else 0) | |
bbox = thresholded.getbbox() | |
to_dl = masked_alpha.crop(bbox) | |
temp = tempfile.NamedTemporaryFile(delete=False, suffix=".png") | |
to_dl.save(temp, format="PNG") | |
temp.close() | |
return (img, combined, masked_alpha), gr.DownloadButton(value=temp.name, interactive=True) | |
except Exception as e: | |
raise gr.Error(f"Processing failed: {str(e)}") | |
def on_change_bbox(prompts: dict[str, Any] | None): | |
return gr.update(interactive=prompts is not None) | |
def on_change_prompt(img: Image.Image | None, prompt: str | None, bg_prompt: str | None = None): | |
return gr.update(interactive=bool(img and prompt)) | |
# process_prompt 함수 수정 | |
def process_prompt(img: Image.Image, prompt: str, bg_prompt: str | None = None, aspect_ratio: str = "1:1") -> tuple[Image.Image, Image.Image]: | |
try: | |
if img is None or prompt.strip() == "": | |
raise gr.Error("Please provide both image and prompt") | |
# 프롬프트 번역 | |
prompt = translate_to_english(prompt) | |
if bg_prompt: | |
bg_prompt = translate_to_english(bg_prompt) | |
# Process the image | |
results, _ = _process(img, prompt, bg_prompt, aspect_ratio) | |
# 합성된 이미지와 추출된 이미지만 반환 | |
return results[1], results[2] | |
except Exception as e: | |
raise gr.Error(str(e)) | |
def process_bbox(img: Image.Image, box_input: str) -> tuple[Image.Image, Image.Image]: | |
try: | |
if img is None or box_input.strip() == "": | |
raise gr.Error("Please provide both image and bounding box coordinates") | |
try: | |
coords = eval(box_input) | |
if not isinstance(coords, list) or len(coords) != 4: | |
raise ValueError("Invalid box format") | |
bbox = tuple(int(x) for x in coords) | |
except: | |
raise gr.Error("Invalid box format. Please provide [xmin, ymin, xmax, ymax]") | |
# Process the image | |
results, _ = _process(img, bbox) | |
# 합성된 이미지와 추출된 이미지만 반환 | |
return results[1], results[2] | |
except Exception as e: | |
raise gr.Error(str(e)) | |
# Event handler functions 수정 | |
def update_process_button(img, prompt): | |
return gr.update( | |
interactive=bool(img and prompt), | |
variant="primary" if bool(img and prompt) else "secondary" | |
) | |
def update_box_button(img, box_input): | |
try: | |
if img and box_input: | |
coords = eval(box_input) | |
if isinstance(coords, list) and len(coords) == 4: | |
return gr.update(interactive=True, variant="primary") | |
return gr.update(interactive=False, variant="secondary") | |
except: | |
return gr.update(interactive=False, variant="secondary") | |
# CSS 정의 | |
css = """ | |
footer {display: none} | |
.main-title { | |
text-align: center; | |
margin: 2em 0; | |
padding: 1em; | |
background: #f7f7f7; | |
border-radius: 10px; | |
} | |
.main-title h1 { | |
color: #2196F3; | |
font-size: 2.5em; | |
margin-bottom: 0.5em; | |
} | |
.main-title p { | |
color: #666; | |
font-size: 1.2em; | |
} | |
.container { | |
max-width: 1200px; | |
margin: auto; | |
padding: 20px; | |
} | |
.tabs { | |
margin-top: 1em; | |
} | |
.input-group { | |
background: white; | |
padding: 1em; | |
border-radius: 8px; | |
box-shadow: 0 2px 4px rgba(0,0,0,0.1); | |
} | |
.output-group { | |
background: white; | |
padding: 1em; | |
border-radius: 8px; | |
box-shadow: 0 2px 4px rgba(0,0,0,0.1); | |
} | |
button.primary { | |
background: #2196F3; | |
border: none; | |
color: white; | |
padding: 0.5em 1em; | |
border-radius: 4px; | |
cursor: pointer; | |
transition: background 0.3s ease; | |
} | |
button.primary:hover { | |
background: #1976D2; | |
} | |
.position-btn { | |
transition: all 0.3s ease; | |
} | |
.position-btn:hover { | |
background-color: #e3f2fd; | |
} | |
.position-btn.selected { | |
background-color: #2196F3; | |
color: white; | |
} | |
""" | |
# UI 구성 | |
with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo: | |
gr.HTML(""" | |
<div class="main-title"> | |
<h1>🎨 Image Object Extractor</h1> | |
<p>Extract objects from images using text prompts</p> | |
</div> | |
""") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
input_image = gr.Image( | |
type="pil", | |
label="Upload Image", | |
interactive=True | |
) | |
text_prompt = gr.Textbox( | |
label="Object to Extract", | |
placeholder="Enter what you want to extract...", | |
interactive=True | |
) | |
with gr.Row(): | |
bg_prompt = gr.Textbox( | |
label="Background Prompt (optional)", | |
placeholder="Describe the background...", | |
interactive=True, | |
scale=3 | |
) | |
aspect_ratio = gr.Dropdown( | |
choices=["1:1", "16:9", "9:16", "4:3"], | |
value="1:1", | |
label="Aspect Ratio", | |
interactive=True, | |
visible=True, | |
scale=1 | |
) | |
# 오브젝트 위치와 크기 조정 컨트롤 | |
with gr.Row(visible=False) as object_controls: | |
with gr.Column(scale=1): | |
gr.HTML(create_position_grid()) | |
position = gr.State(value="bottom-center") | |
with gr.Column(scale=1): | |
scale_slider = gr.Slider( | |
minimum=10, | |
maximum=200, | |
value=100, | |
step=10, | |
label="Object Size (%)" | |
) | |
process_btn = gr.Button( | |
"Process", | |
variant="primary", | |
interactive=False | |
) | |
with gr.Column(scale=1): | |
with gr.Row(): | |
combined_image = gr.Image( | |
label="Combined Result", | |
show_download_button=True, | |
type="pil", | |
height=512 | |
) | |
with gr.Row(): | |
extracted_image = gr.Image( | |
label="Extracted Object", | |
show_download_button=True, | |
type="pil", | |
height=256 | |
) | |
# Event bindings | |
input_image.change( | |
fn=update_process_button, | |
inputs=[input_image, text_prompt], | |
outputs=process_btn, | |
queue=False | |
) | |
text_prompt.change( | |
fn=update_process_button, | |
inputs=[input_image, text_prompt], | |
outputs=process_btn, | |
queue=False | |
) | |
def update_controls(bg_prompt): | |
"""배경 프롬프트 입력 여부에 따라 컨트롤 표시 업데이트""" | |
is_visible = bool(bg_prompt) | |
return [ | |
gr.update(visible=is_visible), # aspect_ratio | |
gr.update(visible=is_visible), # object_controls | |
] | |
bg_prompt.change( | |
fn=update_controls, | |
inputs=bg_prompt, | |
outputs=[aspect_ratio, object_controls], | |
queue=False | |
) | |
# 위치 선택 버튼 클릭 이벤트 | |
def update_position(evt: gr.SelectData) -> str: | |
"""위치 선택 업데이트""" | |
return evt.value | |
position.change( | |
fn=lambda x: gr.update(value=x), | |
inputs=position, | |
outputs=position | |
) | |
process_btn.click( | |
fn=process_prompt, | |
inputs=[ | |
input_image, | |
text_prompt, | |
bg_prompt, | |
aspect_ratio, | |
position, | |
scale_slider | |
], | |
outputs=[combined_image, extracted_image], | |
queue=True | |
) | |
demo.queue(max_size=30, api_open=False) | |
demo.launch() |