Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -20,8 +20,13 @@ pipe_v1 = AuraFlowPipeline.from_pretrained(
|
|
20 |
torch_dtype=torch.float16
|
21 |
).to("cuda")
|
22 |
|
|
|
|
|
|
|
|
|
|
|
23 |
pipe = AuraFlowPipeline.from_pretrained(
|
24 |
-
"fal/AuraFlow-v0.
|
25 |
torch_dtype=torch.float16
|
26 |
).to("cuda")
|
27 |
#pipe.transformer.to(memory_format=torch.channels_last)
|
@@ -50,14 +55,25 @@ def infer_example(prompt, negative_prompt="", seed=42, randomize_seed=False, wid
|
|
50 |
return image, seed
|
51 |
|
52 |
@spaces.GPU(duration=95)
|
53 |
-
def infer(prompt,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
if randomize_seed:
|
56 |
seed = random.randint(0, MAX_SEED)
|
57 |
|
58 |
generator = torch.Generator().manual_seed(seed)
|
59 |
if(comparison_mode):
|
60 |
-
image_1 =
|
61 |
prompt = prompt,
|
62 |
negative_prompt = negative_prompt,
|
63 |
width=width,
|
@@ -86,7 +102,17 @@ def infer(prompt, negative_prompt="", seed=42, randomize_seed=False, width=1024,
|
|
86 |
guidance_scale = guidance_scale,
|
87 |
num_inference_steps = num_inference_steps,
|
88 |
generator = generator
|
89 |
-
).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
else:
|
91 |
image = pipe(
|
92 |
prompt = prompt,
|
@@ -96,8 +122,8 @@ def infer(prompt, negative_prompt="", seed=42, randomize_seed=False, width=1024,
|
|
96 |
guidance_scale = guidance_scale,
|
97 |
num_inference_steps = num_inference_steps,
|
98 |
generator = generator
|
99 |
-
).images[0]
|
100 |
-
|
101 |
return gr.update(visible=True, value=image), gr.update(visible=False), seed
|
102 |
|
103 |
examples = [
|
@@ -118,8 +144,8 @@ with gr.Blocks(css=css) as demo:
|
|
118 |
|
119 |
with gr.Column(elem_id="col-container"):
|
120 |
gr.Markdown(f"""
|
121 |
-
# AuraFlow 0.
|
122 |
-
Demo of the [AuraFlow 0.
|
123 |
[[blog](https://blog.fal.ai/auraflow/)] [[model](https://huggingface.co/fal/AuraFlow)] [[fal](https://fal.ai/models/fal-ai/aura-flow)]
|
124 |
""")
|
125 |
|
@@ -136,12 +162,12 @@ with gr.Blocks(css=css) as demo:
|
|
136 |
run_button = gr.Button("Run", scale=0)
|
137 |
|
138 |
result = gr.Image(label="Result", show_label=False)
|
139 |
-
result_compare = ImageSlider(visible=False, label="Left 0.
|
140 |
-
comparison_mode = gr.Checkbox(label="Comparison mode", info="Compare v0.
|
141 |
with gr.Accordion("Advanced Settings", open=False):
|
142 |
|
143 |
model_version = gr.Dropdown(
|
144 |
-
["0.1", "0.2"], label="Model version", value="0.
|
145 |
)
|
146 |
|
147 |
negative_prompt = gr.Text(
|
|
|
20 |
torch_dtype=torch.float16
|
21 |
).to("cuda")
|
22 |
|
23 |
+
pipe_v2 = AuraFlowPipeline.from_pretrained(
|
24 |
+
"fal/AuraFlow-v0.2",
|
25 |
+
torch_dtype=torch.float16
|
26 |
+
).to("cuda")
|
27 |
+
|
28 |
pipe = AuraFlowPipeline.from_pretrained(
|
29 |
+
"fal/AuraFlow-v0.3",
|
30 |
torch_dtype=torch.float16
|
31 |
).to("cuda")
|
32 |
#pipe.transformer.to(memory_format=torch.channels_last)
|
|
|
55 |
return image, seed
|
56 |
|
57 |
@spaces.GPU(duration=95)
|
58 |
+
def infer(prompt,
|
59 |
+
negative_prompt="",
|
60 |
+
seed=42,
|
61 |
+
randomize_seed=False,
|
62 |
+
width=1024,
|
63 |
+
height=1024,
|
64 |
+
guidance_scale=5.0,
|
65 |
+
num_inference_steps=28,
|
66 |
+
model_version="0.3",
|
67 |
+
comparison_mode=False,
|
68 |
+
progress=gr.Progress(track_tqdm=True)
|
69 |
+
):
|
70 |
|
71 |
if randomize_seed:
|
72 |
seed = random.randint(0, MAX_SEED)
|
73 |
|
74 |
generator = torch.Generator().manual_seed(seed)
|
75 |
if(comparison_mode):
|
76 |
+
image_1 = pipe_v2(
|
77 |
prompt = prompt,
|
78 |
negative_prompt = negative_prompt,
|
79 |
width=width,
|
|
|
102 |
guidance_scale = guidance_scale,
|
103 |
num_inference_steps = num_inference_steps,
|
104 |
generator = generator
|
105 |
+
).images[0]
|
106 |
+
elif(model_version == "0.2"):
|
107 |
+
image = pipe_v2(
|
108 |
+
prompt = prompt,
|
109 |
+
negative_prompt = negative_prompt,
|
110 |
+
width=width,
|
111 |
+
height=height,
|
112 |
+
guidance_scale = guidance_scale,
|
113 |
+
num_inference_steps = num_inference_steps,
|
114 |
+
generator = generator
|
115 |
+
).images[0]
|
116 |
else:
|
117 |
image = pipe(
|
118 |
prompt = prompt,
|
|
|
122 |
guidance_scale = guidance_scale,
|
123 |
num_inference_steps = num_inference_steps,
|
124 |
generator = generator
|
125 |
+
).images[0]
|
126 |
+
|
127 |
return gr.update(visible=True, value=image), gr.update(visible=False), seed
|
128 |
|
129 |
examples = [
|
|
|
144 |
|
145 |
with gr.Column(elem_id="col-container"):
|
146 |
gr.Markdown(f"""
|
147 |
+
# AuraFlow 0.3
|
148 |
+
Demo of the [AuraFlow 0.3](https://huggingface.co/fal/AuraFlow-v0.3) 6.8B parameters open source diffusion transformer model
|
149 |
[[blog](https://blog.fal.ai/auraflow/)] [[model](https://huggingface.co/fal/AuraFlow)] [[fal](https://fal.ai/models/fal-ai/aura-flow)]
|
150 |
""")
|
151 |
|
|
|
162 |
run_button = gr.Button("Run", scale=0)
|
163 |
|
164 |
result = gr.Image(label="Result", show_label=False)
|
165 |
+
result_compare = ImageSlider(visible=False, label="Left 0.2, Right 0.3")
|
166 |
+
comparison_mode = gr.Checkbox(label="Comparison mode", info="Compare v0.2 with v0.3", value=False)
|
167 |
with gr.Accordion("Advanced Settings", open=False):
|
168 |
|
169 |
model_version = gr.Dropdown(
|
170 |
+
["0.1", "0.2", "0.3"], label="Model version", value="0.3"
|
171 |
)
|
172 |
|
173 |
negative_prompt = gr.Text(
|