Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -38,7 +38,7 @@ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).
|
|
38 |
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
39 |
|
40 |
txt2img_pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to(device)
|
41 |
-
txt2img_pipe.__class__.load_lora_into_transformer = classmethod(load_lora_into_transformer)
|
42 |
|
43 |
# img2img model
|
44 |
img2img_pipe = AutoPipelineForImage2Image.from_pretrained(base_model, vae=good_vae, transformer=txt2img_pipe.transformer, text_encoder=txt2img_pipe.text_encoder, tokenizer=txt2img_pipe.tokenizer, text_encoder_2=txt2img_pipe.text_encoder_2, tokenizer_2=txt2img_pipe.tokenizer_2, torch_dtype=dtype)
|
@@ -166,8 +166,13 @@ def run_lora(prompt, image_url, lora_strings_json, image_strength, cfg_scale, s
|
|
166 |
gr.Info("Start to load LoRA ...")
|
167 |
with calculateDuration("Unloading LoRA"):
|
168 |
img2img_pipe.unload_lora_weights()
|
|
|
169 |
txt2img_pipe.unload_lora_weights()
|
|
|
|
|
170 |
print(txt2img_pipe.get_active_adapters())
|
|
|
|
|
171 |
|
172 |
lora_configs = None
|
173 |
adapter_names = []
|
@@ -198,6 +203,7 @@ def run_lora(prompt, image_url, lora_strings_json, image_strength, cfg_scale, s
|
|
198 |
try:
|
199 |
if img2img_model:
|
200 |
img2img_pipe.load_lora_weights(lora_repo, weight_name=weights, low_cpu_mem_usage=True, adapter_name=lora_name)
|
|
|
201 |
else:
|
202 |
txt2img_pipe.load_lora_weights(lora_repo, weight_name=weights, low_cpu_mem_usage=True, adapter_name=lora_name)
|
203 |
except:
|
@@ -234,7 +240,7 @@ def run_lora(prompt, image_url, lora_strings_json, image_strength, cfg_scale, s
|
|
234 |
|
235 |
gr.Info("Completed!")
|
236 |
progress(100, "Completed!")
|
237 |
-
|
238 |
return final_image, seed, json.dumps(result)
|
239 |
|
240 |
# Gradio interface
|
|
|
38 |
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
39 |
|
40 |
txt2img_pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to(device)
|
41 |
+
# txt2img_pipe.__class__.load_lora_into_transformer = classmethod(load_lora_into_transformer)
|
42 |
|
43 |
# img2img model
|
44 |
img2img_pipe = AutoPipelineForImage2Image.from_pretrained(base_model, vae=good_vae, transformer=txt2img_pipe.transformer, text_encoder=txt2img_pipe.text_encoder, tokenizer=txt2img_pipe.tokenizer, text_encoder_2=txt2img_pipe.text_encoder_2, tokenizer_2=txt2img_pipe.tokenizer_2, torch_dtype=dtype)
|
|
|
166 |
gr.Info("Start to load LoRA ...")
|
167 |
with calculateDuration("Unloading LoRA"):
|
168 |
img2img_pipe.unload_lora_weights()
|
169 |
+
img2img_pipe.unload_lora()
|
170 |
txt2img_pipe.unload_lora_weights()
|
171 |
+
txt2img_pipe.unload_lora()
|
172 |
+
|
173 |
print(txt2img_pipe.get_active_adapters())
|
174 |
+
list_adapters_component_wise = txt2img_pipe.get_list_adapters()
|
175 |
+
print(list_adapters_component_wise)
|
176 |
|
177 |
lora_configs = None
|
178 |
adapter_names = []
|
|
|
203 |
try:
|
204 |
if img2img_model:
|
205 |
img2img_pipe.load_lora_weights(lora_repo, weight_name=weights, low_cpu_mem_usage=True, adapter_name=lora_name)
|
206 |
+
img2img_pipe.set
|
207 |
else:
|
208 |
txt2img_pipe.load_lora_weights(lora_repo, weight_name=weights, low_cpu_mem_usage=True, adapter_name=lora_name)
|
209 |
except:
|
|
|
240 |
|
241 |
gr.Info("Completed!")
|
242 |
progress(100, "Completed!")
|
243 |
+
torch.cuda.empty_cache()
|
244 |
return final_image, seed, json.dumps(result)
|
245 |
|
246 |
# Gradio interface
|