Spaces:
Running
on
Zero
Running
on
Zero
Himanshu-AT
commited on
Commit
·
f9694e5
1
Parent(s):
814738a
remove lora scale
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import numpy as np
|
|
4 |
import spaces
|
5 |
import torch
|
6 |
import spaces
|
7 |
-
import random
|
8 |
|
9 |
from diffusers import FluxFillPipeline
|
10 |
from PIL import Image
|
@@ -20,7 +20,7 @@ pipe.enable_sequential_cpu_offload()
|
|
20 |
def calculate_optimal_dimensions(image: Image.Image):
|
21 |
# Extract the original dimensions
|
22 |
original_width, original_height = image.size
|
23 |
-
|
24 |
# Set constants
|
25 |
MIN_ASPECT_RATIO = 9 / 16
|
26 |
MAX_ASPECT_RATIO = 16 / 9
|
@@ -70,15 +70,15 @@ def infer(edit_images, prompt, seed=42, randomize_seed=False, width=1024, height
|
|
70 |
guidance_scale=guidance_scale,
|
71 |
num_inference_steps=num_inference_steps,
|
72 |
generator=torch.Generator("cpu").manual_seed(seed),
|
73 |
-
lora_scale=0.75
|
74 |
).images[0]
|
75 |
|
76 |
output_image_jpg = image.convert("RGB")
|
77 |
output_image_jpg.save("output.jpg", "JPEG")
|
78 |
-
|
79 |
return output_image_jpg, seed
|
80 |
# return image, seed
|
81 |
-
|
82 |
examples = [
|
83 |
"photography of a young woman, accent lighting, (front view:1.4), "
|
84 |
# "a tiny astronaut hatching from an egg on the moon",
|
@@ -94,7 +94,7 @@ css="""
|
|
94 |
"""
|
95 |
|
96 |
with gr.Blocks(css=css) as demo:
|
97 |
-
|
98 |
with gr.Column(elem_id="col-container"):
|
99 |
gr.Markdown(f"""# FLUX.1 [dev]
|
100 |
""")
|
@@ -117,11 +117,11 @@ with gr.Blocks(css=css) as demo:
|
|
117 |
container=False,
|
118 |
)
|
119 |
run_button = gr.Button("Run")
|
120 |
-
|
121 |
result = gr.Image(label="Result", show_label=False)
|
122 |
-
|
123 |
with gr.Accordion("Advanced Settings", open=False):
|
124 |
-
|
125 |
seed = gr.Slider(
|
126 |
label="Seed",
|
127 |
minimum=0,
|
@@ -129,11 +129,11 @@ with gr.Blocks(css=css) as demo:
|
|
129 |
step=1,
|
130 |
value=0,
|
131 |
)
|
132 |
-
|
133 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
134 |
-
|
135 |
with gr.Row():
|
136 |
-
|
137 |
width = gr.Slider(
|
138 |
label="Width",
|
139 |
minimum=256,
|
@@ -142,7 +142,7 @@ with gr.Blocks(css=css) as demo:
|
|
142 |
value=1024,
|
143 |
visible=False
|
144 |
)
|
145 |
-
|
146 |
height = gr.Slider(
|
147 |
label="Height",
|
148 |
minimum=256,
|
@@ -151,7 +151,7 @@ with gr.Blocks(css=css) as demo:
|
|
151 |
value=1024,
|
152 |
visible=False
|
153 |
)
|
154 |
-
|
155 |
with gr.Row():
|
156 |
|
157 |
guidance_scale = gr.Slider(
|
@@ -161,7 +161,7 @@ with gr.Blocks(css=css) as demo:
|
|
161 |
step=0.5,
|
162 |
value=50,
|
163 |
)
|
164 |
-
|
165 |
num_inference_steps = gr.Slider(
|
166 |
label="Number of inference steps",
|
167 |
minimum=1,
|
@@ -177,4 +177,4 @@ with gr.Blocks(css=css) as demo:
|
|
177 |
outputs = [result, seed]
|
178 |
)
|
179 |
|
180 |
-
demo.launch()
|
|
|
4 |
import spaces
|
5 |
import torch
|
6 |
import spaces
|
7 |
+
import random
|
8 |
|
9 |
from diffusers import FluxFillPipeline
|
10 |
from PIL import Image
|
|
|
20 |
def calculate_optimal_dimensions(image: Image.Image):
|
21 |
# Extract the original dimensions
|
22 |
original_width, original_height = image.size
|
23 |
+
|
24 |
# Set constants
|
25 |
MIN_ASPECT_RATIO = 9 / 16
|
26 |
MAX_ASPECT_RATIO = 16 / 9
|
|
|
70 |
guidance_scale=guidance_scale,
|
71 |
num_inference_steps=num_inference_steps,
|
72 |
generator=torch.Generator("cpu").manual_seed(seed),
|
73 |
+
# lora_scale=0.75 // not supported in this version
|
74 |
).images[0]
|
75 |
|
76 |
output_image_jpg = image.convert("RGB")
|
77 |
output_image_jpg.save("output.jpg", "JPEG")
|
78 |
+
|
79 |
return output_image_jpg, seed
|
80 |
# return image, seed
|
81 |
+
|
82 |
examples = [
|
83 |
"photography of a young woman, accent lighting, (front view:1.4), "
|
84 |
# "a tiny astronaut hatching from an egg on the moon",
|
|
|
94 |
"""
|
95 |
|
96 |
with gr.Blocks(css=css) as demo:
|
97 |
+
|
98 |
with gr.Column(elem_id="col-container"):
|
99 |
gr.Markdown(f"""# FLUX.1 [dev]
|
100 |
""")
|
|
|
117 |
container=False,
|
118 |
)
|
119 |
run_button = gr.Button("Run")
|
120 |
+
|
121 |
result = gr.Image(label="Result", show_label=False)
|
122 |
+
|
123 |
with gr.Accordion("Advanced Settings", open=False):
|
124 |
+
|
125 |
seed = gr.Slider(
|
126 |
label="Seed",
|
127 |
minimum=0,
|
|
|
129 |
step=1,
|
130 |
value=0,
|
131 |
)
|
132 |
+
|
133 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
134 |
+
|
135 |
with gr.Row():
|
136 |
+
|
137 |
width = gr.Slider(
|
138 |
label="Width",
|
139 |
minimum=256,
|
|
|
142 |
value=1024,
|
143 |
visible=False
|
144 |
)
|
145 |
+
|
146 |
height = gr.Slider(
|
147 |
label="Height",
|
148 |
minimum=256,
|
|
|
151 |
value=1024,
|
152 |
visible=False
|
153 |
)
|
154 |
+
|
155 |
with gr.Row():
|
156 |
|
157 |
guidance_scale = gr.Slider(
|
|
|
161 |
step=0.5,
|
162 |
value=50,
|
163 |
)
|
164 |
+
|
165 |
num_inference_steps = gr.Slider(
|
166 |
label="Number of inference steps",
|
167 |
minimum=1,
|
|
|
177 |
outputs = [result, seed]
|
178 |
)
|
179 |
|
180 |
+
demo.launch()
|