ford442 commited on
Commit
a6920aa
·
1 Parent(s): c53362d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -6
app.py CHANGED
@@ -98,15 +98,12 @@ def load_and_prepare_model(model_id):
98
  torch_dtype=torch.bfloat16,
99
  use_safetensors=True,
100
  add_watermarker=False,
101
- ).to(device)
102
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
103
 
104
  if USE_TORCH_COMPILE:
105
  pipe.compile()
106
 
107
- if ENABLE_CPU_OFFLOAD:
108
- pipe.enable_model_cpu_offload()
109
-
110
  return pipe
111
 
112
  # Preload and compile both models
@@ -124,7 +121,7 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
124
  seed = random.randint(0, MAX_SEED)
125
  return seed
126
 
127
- @spaces.GPU(duration=40, enable_queue=True)
128
  def generate(
129
  model_choice: str,
130
  prompt: str,
@@ -143,7 +140,57 @@ def generate(
143
  ):
144
  global models
145
  pipe = models[model_choice]
146
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  seed = int(randomize_seed_fn(seed, randomize_seed))
148
  generator = torch.Generator(device=device).manual_seed(seed)
149
 
@@ -215,6 +262,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
215
  container=False,
216
  )
217
  run_button = gr.Button("Run", scale=0)
 
218
  result = gr.Gallery(label="Result", columns=1, show_label=False)
219
 
220
  with gr.Row():
@@ -328,6 +376,29 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
328
  outputs=[result, seed],
329
  )
330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331
  gr.Markdown("### REALVISXL V5.0")
332
  predefined_gallery = gr.Gallery(label="REALVISXL V5.0", columns=3, show_label=False, value=load_predefined_images1())
333
 
 
98
  torch_dtype=torch.bfloat16,
99
  use_safetensors=True,
100
  add_watermarker=False,
101
+ )
102
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
103
 
104
  if USE_TORCH_COMPILE:
105
  pipe.compile()
106
 
 
 
 
107
  return pipe
108
 
109
  # Preload and compile both models
 
121
  seed = random.randint(0, MAX_SEED)
122
  return seed
123
 
124
+ @spaces.GPU(duration=70, enable_queue=True)
125
  def generate(
126
  model_choice: str,
127
  prompt: str,
 
140
  ):
141
  global models
142
  pipe = models[model_choice]
143
+ pipe.to("cuda:0")
144
+ seed = int(randomize_seed_fn(seed, randomize_seed))
145
+ generator = torch.Generator(device=device).manual_seed(seed)
146
+
147
+ prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
148
+
149
+ options = {
150
+ "prompt": [prompt] * num_images,
151
+ "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
152
+ "width": width,
153
+ "height": height,
154
+ "guidance_scale": guidance_scale,
155
+ "num_inference_steps": num_inference_steps,
156
+ "generator": generator,
157
+ "output_type": "pil",
158
+ }
159
+
160
+ if use_resolution_binning:
161
+ options["use_resolution_binning"] = True
162
+
163
+ images = []
164
+ for i in range(0, num_images, BATCH_SIZE):
165
+ batch_options = options.copy()
166
+ batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
167
+ if "negative_prompt" in batch_options:
168
+ batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
169
+ images.extend(pipe(**batch_options).images)
170
+
171
+ image_paths = [save_image(img) for img in images]
172
+ return image_paths, seed
173
+
174
+ def generate_cpu(
175
+ model_choice: str,
176
+ prompt: str,
177
+ negative_prompt: str = "",
178
+ use_negative_prompt: bool = False,
179
+ style_selection: str = DEFAULT_STYLE_NAME,
180
+ seed: int = 1,
181
+ width: int = 768,
182
+ height: int = 768,
183
+ guidance_scale: float = 4,
184
+ num_inference_steps: int = 150,
185
+ randomize_seed: bool = False,
186
+ use_resolution_binning: bool = True,
187
+ num_images: int = 1,
188
+ progress=gr.Progress(track_tqdm=True),
189
+ ):
190
+ global models
191
+ pipe = models[model_choice]
192
+ pipe.to("cpu")
193
+
194
  seed = int(randomize_seed_fn(seed, randomize_seed))
195
  generator = torch.Generator(device=device).manual_seed(seed)
196
 
 
262
  container=False,
263
  )
264
  run_button = gr.Button("Run", scale=0)
265
+ cpu_run_button = gr.Button("CPU Run", scale=0)
266
  result = gr.Gallery(label="Result", columns=1, show_label=False)
267
 
268
  with gr.Row():
 
376
  outputs=[result, seed],
377
  )
378
 
379
+ gr.on(
380
+ triggers=[
381
+ cpu_run_button.click,
382
+ ],
383
+ api_name="generate", # Add this line
384
+ fn=generate_cpu,
385
+ inputs=[
386
+ model_choice,
387
+ prompt,
388
+ negative_prompt,
389
+ use_negative_prompt,
390
+ style_selection,
391
+ seed,
392
+ width,
393
+ height,
394
+ guidance_scale,
395
+ num_inference_steps,
396
+ randomize_seed,
397
+ num_images,
398
+ ],
399
+ outputs=[result, seed],
400
+ )
401
+
402
  gr.Markdown("### REALVISXL V5.0")
403
  predefined_gallery = gr.Gallery(label="REALVISXL V5.0", columns=3, show_label=False, value=load_predefined_images1())
404