Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -9,9 +9,6 @@ import spaces
|
|
9 |
from huggingface_hub import login
|
10 |
from gradio_imageslider import ImageSlider # Import ImageSlider
|
11 |
|
12 |
-
# Login to Hugging Face
|
13 |
-
login(token=os.getenv("HF_TOKEN"))
|
14 |
-
|
15 |
from image_datasets.canny_dataset import canny_processor, c_crop
|
16 |
from src.flux.sampling import denoise_controlnet, get_noise, get_schedule, prepare, unpack
|
17 |
from src.flux.util import load_ae, load_clip, load_t5, load_flow_model, load_controlnet, load_safetensors
|
@@ -24,7 +21,7 @@ if not os.path.exists(model_path):
|
|
24 |
with open(model_path, 'wb') as f:
|
25 |
f.write(response.content)
|
26 |
|
27 |
-
# https://github.com/XLabs-AI/x-flux.git
|
28 |
name = "flux-dev"
|
29 |
device = torch.device("cuda")
|
30 |
offload = False
|
@@ -45,9 +42,29 @@ def load_models():
|
|
45 |
|
46 |
load_models()
|
47 |
|
48 |
-
def
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
image = canny_processor(image)
|
52 |
return image
|
53 |
|
@@ -55,7 +72,7 @@ def preprocess_canny_image(image, width=1024, height=1024):
|
|
55 |
def generate_image(prompt, control_image, num_steps=50, guidance=4, width=512, height=512, seed=42, random_seed=False):
|
56 |
if random_seed:
|
57 |
seed = np.random.randint(0, 10000)
|
58 |
-
|
59 |
if not os.path.isdir("./controlnet_results/"):
|
60 |
os.makedirs("./controlnet_results/")
|
61 |
|
@@ -71,6 +88,7 @@ def generate_image(prompt, control_image, num_steps=50, guidance=4, width=512, h
|
|
71 |
height = 16 * height // 16
|
72 |
timesteps = get_schedule(num_steps, (width // 8) * (height // 8) // (16 * 16), shift=(not is_schnell))
|
73 |
|
|
|
74 |
canny_processed = preprocess_canny_image(control_image, width, height)
|
75 |
controlnet_cond = torch.from_numpy((np.array(canny_processed) / 127.5) - 1)
|
76 |
controlnet_cond = controlnet_cond.permute(2, 0, 1).unsqueeze(0).to(torch.bfloat16).to(torch_device)
|
@@ -89,7 +107,7 @@ def generate_image(prompt, control_image, num_steps=50, guidance=4, width=512, h
|
|
89 |
x1 = rearrange(x1[-1], "c h w -> h w c")
|
90 |
output_img = Image.fromarray((127.5 * (x1 + 1.0)).cpu().byte().numpy())
|
91 |
|
92 |
-
return [
|
93 |
|
94 |
interface = gr.Interface(
|
95 |
fn=generate_image,
|
@@ -104,7 +122,7 @@ interface = gr.Interface(
|
|
104 |
gr.Checkbox(label="Random Seed")
|
105 |
],
|
106 |
outputs=ImageSlider(label="Before / After"), # Use ImageSlider as the output
|
107 |
-
title="FLUX.1 Controlnet
|
108 |
description="Generate images using ControlNet and a text prompt.\n[[non-commercial license, Flux.1 Dev](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)]"
|
109 |
)
|
110 |
|
|
|
9 |
from huggingface_hub import login
|
10 |
from gradio_imageslider import ImageSlider # Import ImageSlider
|
11 |
|
|
|
|
|
|
|
12 |
from image_datasets.canny_dataset import canny_processor, c_crop
|
13 |
from src.flux.sampling import denoise_controlnet, get_noise, get_schedule, prepare, unpack
|
14 |
from src.flux.util import load_ae, load_clip, load_t5, load_flow_model, load_controlnet, load_safetensors
|
|
|
21 |
with open(model_path, 'wb') as f:
|
22 |
f.write(response.content)
|
23 |
|
24 |
+
# Source: https://github.com/XLabs-AI/x-flux.git
|
25 |
name = "flux-dev"
|
26 |
device = torch.device("cuda")
|
27 |
offload = False
|
|
|
42 |
|
43 |
load_models()
|
44 |
|
45 |
+
def preprocess_image(image, target_width, target_height, crop=True):
|
46 |
+
if crop:
|
47 |
+
image = c_crop(image) # Crop the image to square
|
48 |
+
original_width, original_height = image.size
|
49 |
+
|
50 |
+
# Resize to match the target size without stretching
|
51 |
+
scale = max(target_width / original_width, target_height / original_height)
|
52 |
+
resized_width = int(scale * original_width)
|
53 |
+
resized_height = int(scale * original_height)
|
54 |
+
|
55 |
+
image = image.resize((resized_width, resized_height), Image.LANCZOS)
|
56 |
+
|
57 |
+
# Center crop to match the target dimensions
|
58 |
+
left = (resized_width - target_width) // 2
|
59 |
+
top = (resized_height - target_height) // 2
|
60 |
+
image = image.crop((left, top, left + target_width, top + target_height))
|
61 |
+
else:
|
62 |
+
image = image.resize((target_width, target_height), Image.LANCZOS)
|
63 |
+
|
64 |
+
return image
|
65 |
+
|
66 |
+
def preprocess_canny_image(image, target_width, target_height, crop=True):
|
67 |
+
image = preprocess_image(image, target_width, target_height, crop=crop)
|
68 |
image = canny_processor(image)
|
69 |
return image
|
70 |
|
|
|
72 |
def generate_image(prompt, control_image, num_steps=50, guidance=4, width=512, height=512, seed=42, random_seed=False):
|
73 |
if random_seed:
|
74 |
seed = np.random.randint(0, 10000)
|
75 |
+
|
76 |
if not os.path.isdir("./controlnet_results/"):
|
77 |
os.makedirs("./controlnet_results/")
|
78 |
|
|
|
88 |
height = 16 * height // 16
|
89 |
timesteps = get_schedule(num_steps, (width // 8) * (height // 8) // (16 * 16), shift=(not is_schnell))
|
90 |
|
91 |
+
processed_input = preprocess_image(control_image, width, height)
|
92 |
canny_processed = preprocess_canny_image(control_image, width, height)
|
93 |
controlnet_cond = torch.from_numpy((np.array(canny_processed) / 127.5) - 1)
|
94 |
controlnet_cond = controlnet_cond.permute(2, 0, 1).unsqueeze(0).to(torch.bfloat16).to(torch_device)
|
|
|
107 |
x1 = rearrange(x1[-1], "c h w -> h w c")
|
108 |
output_img = Image.fromarray((127.5 * (x1 + 1.0)).cpu().byte().numpy())
|
109 |
|
110 |
+
return [processed_input, output_img] # Return both images for slider
|
111 |
|
112 |
interface = gr.Interface(
|
113 |
fn=generate_image,
|
|
|
122 |
gr.Checkbox(label="Random Seed")
|
123 |
],
|
124 |
outputs=ImageSlider(label="Before / After"), # Use ImageSlider as the output
|
125 |
+
title="FLUX.1 Controlnet Canny",
|
126 |
description="Generate images using ControlNet and a text prompt.\n[[non-commercial license, Flux.1 Dev](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)]"
|
127 |
)
|
128 |
|