Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -199,7 +199,23 @@ def generate_30(
|
|
199 |
options["use_resolution_binning"] = True
|
200 |
images = []
|
201 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
202 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
for i in range(0, num_images, BATCH_SIZE):
|
204 |
batch_options = options.copy()
|
205 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
@@ -250,7 +266,23 @@ def generate_60(
|
|
250 |
options["use_resolution_binning"] = True
|
251 |
images = []
|
252 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
253 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
for i in range(0, num_images, BATCH_SIZE):
|
255 |
batch_options = options.copy()
|
256 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
@@ -301,7 +333,23 @@ def generate_90(
|
|
301 |
options["use_resolution_binning"] = True
|
302 |
images = []
|
303 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
304 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
305 |
for i in range(0, num_images, BATCH_SIZE):
|
306 |
batch_options = options.copy()
|
307 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
|
|
199 |
options["use_resolution_binning"] = True
|
200 |
images = []
|
201 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
202 |
+
# write note txt
|
203 |
+
filename= f'tst_{seed}.txt'
|
204 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
205 |
+
with open(filename, "w") as f:
|
206 |
+
f.write(f"Realvis 5.0: {seed} png\n")
|
207 |
+
f.write(f"Date/time: {timestamp} \n")
|
208 |
+
f.write(f"Prompt: {enhanced_prompt} \n")
|
209 |
+
f.write(f"Steps: {num_inference_steps} \n")
|
210 |
+
f.write(f"Guidance Scale: {guidance_scale} \n")
|
211 |
+
f.write(f"SPACE SETUP: \n")
|
212 |
+
f.write(f"Use Safetensors: no \n")
|
213 |
+
f.write(f"Use Model Dtype: no \n")
|
214 |
+
f.write(f"Model Scheduler: Euler_a custom \n")
|
215 |
+
f.write(f"Model VAE: stabilityai/sdxl-vae no dtype \n")
|
216 |
+
f.write(f"Model UNET: stabilityai no dtype \n")
|
217 |
+
f.write(f"Model to dtype - then to cuda \n")
|
218 |
+
upload_to_ftp(filename)
|
219 |
for i in range(0, num_images, BATCH_SIZE):
|
220 |
batch_options = options.copy()
|
221 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
|
|
266 |
options["use_resolution_binning"] = True
|
267 |
images = []
|
268 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
269 |
+
# write note txt
|
270 |
+
filename= f'tst_{seed}.txt'
|
271 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
272 |
+
with open(filename, "w") as f:
|
273 |
+
f.write(f"Realvis 5.0: {seed} png\n")
|
274 |
+
f.write(f"Date/time: {timestamp} \n")
|
275 |
+
f.write(f"Prompt: {enhanced_prompt} \n")
|
276 |
+
f.write(f"Steps: {num_inference_steps} \n")
|
277 |
+
f.write(f"Guidance Scale: {guidance_scale} \n")
|
278 |
+
f.write(f"SPACE SETUP: \n")
|
279 |
+
f.write(f"Use Safetensors: no \n")
|
280 |
+
f.write(f"Use Model Dtype: no \n")
|
281 |
+
f.write(f"Model Scheduler: Euler_a custom \n")
|
282 |
+
f.write(f"Model VAE: stabilityai/sdxl-vae no dtype \n")
|
283 |
+
f.write(f"Model UNET: stabilityai no dtype \n")
|
284 |
+
f.write(f"Model to dtype - then to cuda \n")
|
285 |
+
upload_to_ftp(filename)
|
286 |
for i in range(0, num_images, BATCH_SIZE):
|
287 |
batch_options = options.copy()
|
288 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
|
|
333 |
options["use_resolution_binning"] = True
|
334 |
images = []
|
335 |
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
336 |
+
# write note txt
|
337 |
+
filename= f'tst_{seed}.txt'
|
338 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
339 |
+
with open(filename, "w") as f:
|
340 |
+
f.write(f"Realvis 5.0: {seed} png\n")
|
341 |
+
f.write(f"Date/time: {timestamp} \n")
|
342 |
+
f.write(f"Prompt: {enhanced_prompt} \n")
|
343 |
+
f.write(f"Steps: {num_inference_steps} \n")
|
344 |
+
f.write(f"Guidance Scale: {guidance_scale} \n")
|
345 |
+
f.write(f"SPACE SETUP: \n")
|
346 |
+
f.write(f"Use Safetensors: no \n")
|
347 |
+
f.write(f"Use Model Dtype: no \n")
|
348 |
+
f.write(f"Model Scheduler: Euler_a custom \n")
|
349 |
+
f.write(f"Model VAE: stabilityai/sdxl-vae no dtype \n")
|
350 |
+
f.write(f"Model UNET: stabilityai no dtype \n")
|
351 |
+
f.write(f"Model to dtype - then to cuda \n")
|
352 |
+
upload_to_ftp(filename)
|
353 |
for i in range(0, num_images, BATCH_SIZE):
|
354 |
batch_options = options.copy()
|
355 |
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|