fluxInpaint / app.py
Himanshu806's picture
Update app.py
098289a verified
raw
history blame
4.02 kB
import gradio as gr
import spaces
import numpy as np
import torch
import random
from diffusers import FluxInpaintPipeline
from PIL import Image
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
# Load pipeline with VAE enabled
pipe = FluxInpaintPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Fill-dev",
torch_dtype=torch.bfloat16
).to("cuda")
pipe.load_lora_weights("alvdansen/flux-koda")
pipe.enable_lora()
pipe.vae.enable_slicing() # Enable slicing for better memory efficiency
pipe.vae.enable_tiling() # Enable tiling for larger images
def calculate_optimal_dimensions(image: Image.Image):
original_width, original_height = image.size
MIN_ASPECT_RATIO = 9 / 16
MAX_ASPECT_RATIO = 16 / 9
FIXED_DIMENSION = 1024
original_aspect_ratio = original_width / original_height
if original_aspect_ratio > 1:
width = FIXED_DIMENSION
height = round(FIXED_DIMENSION / original_aspect_ratio)
else:
height = FIXED_DIMENSION
width = round(FIXED_DIMENSION * original_aspect_ratio)
width = (width // 8) * 8
height = (height // 8) * 8
calculated_aspect_ratio = width / height
if calculated_aspect_ratio > MAX_ASPECT_RATIO:
width = (height * MAX_ASPECT_RATIO // 8) * 8
elif calculated_aspect_ratio < MIN_ASPECT_RATIO:
height = (width / MIN_ASPECT_RATIO // 8) * 8
width = max(width, 576) if width == FIXED_DIMENSION else width
height = max(height, 576) if height == FIXED_DIMENSION else height
return width, height
@spaces.GPU
def infer(edit_images, prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28):
image = edit_images["background"]
width, height = calculate_optimal_dimensions(image)
mask = edit_images["layers"][0]
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# Run the inpainting pipeline
output = pipe(
prompt=prompt,
image=image,
mask_image=mask,
height=height,
width=width,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=torch.Generator(device='cuda').manual_seed(seed),
)
output_image = output.images[0]
output_image_jpg = output_image.convert("RGB")
output_image_jpg.save("output.jpg", "JPEG")
return output_image_jpg, seed
css = """
#col-container {
margin: 0 auto;
max-width: 1000px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("# FLUX.1 [dev]")
with gr.Row():
with gr.Column():
edit_image = gr.ImageEditor(
label="Upload and draw mask for inpainting",
type="pil",
sources=["upload", "webcam"],
image_mode="RGB",
layers=True,
brush=gr.Brush(colors=["#FFFFFF"]),
)
prompt = gr.Textbox(
label="Prompt",
show_label=False,
max_lines=2,
placeholder="Enter your prompt",
)
run_button = gr.Button("Run")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(
label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
guidance_scale = gr.Slider(
label="Guidance Scale", minimum=1, maximum=30, step=0.5, value=3.5
)
num_inference_steps = gr.Slider(
label="Number of inference steps", minimum=1, maximum=50, step=1, value=28
)
run_button.click(
fn=infer,
inputs=[edit_image, prompt, seed, randomize_seed, guidance_scale, num_inference_steps],
outputs=[result, seed],
)
demo.launch()