Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -106,7 +106,7 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
|
|
106 |
negative = ""
|
107 |
return p.replace("{prompt}", positive), n + negative
|
108 |
|
109 |
-
unetX = UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='unet', low_cpu_mem_usage=False, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
110 |
|
111 |
def load_and_prepare_model():
|
112 |
vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False, low_cpu_mem_usage=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
@@ -177,7 +177,7 @@ txt_tokenizer.tokenizer_legacy=False
|
|
177 |
model = Phi3ForCausalLM.from_pretrained(checkpoint).to('cuda:0')
|
178 |
#model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map='cuda') #.to('cuda')
|
179 |
|
180 |
-
|
181 |
text_encoder_1=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder',token=True) #.to(device=device, dtype=torch.bfloat16)
|
182 |
text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder_2',token=True) #.to(device=device, dtype=torch.bfloat16)
|
183 |
|
@@ -449,11 +449,11 @@ def generate_30(
|
|
449 |
torch.cuda.empty_cache()
|
450 |
global text_encoder_1
|
451 |
global text_encoder_2
|
452 |
-
global unetX
|
453 |
pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
|
454 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
455 |
-
pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
|
456 |
-
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
457 |
print('-- generating image --')
|
458 |
sd_image = ip_model.generate(
|
459 |
pil_image_1=sd_image_a,
|
@@ -599,11 +599,11 @@ def generate_60(
|
|
599 |
torch.cuda.empty_cache()
|
600 |
global text_encoder_1
|
601 |
global text_encoder_2
|
602 |
-
global unetX
|
603 |
pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
|
604 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
605 |
-
pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
|
606 |
-
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
607 |
print('-- generating image --')
|
608 |
sd_image = ip_model.generate(
|
609 |
pil_image_1=sd_image_a,
|
@@ -750,11 +750,11 @@ def generate_90(
|
|
750 |
torch.cuda.empty_cache()
|
751 |
global text_encoder_1
|
752 |
global text_encoder_2
|
753 |
-
global unetX
|
754 |
pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
|
755 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
756 |
-
pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
|
757 |
-
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
758 |
print('-- generating image --')
|
759 |
sd_image = ip_model.generate(
|
760 |
pil_image_1=sd_image_a,
|
|
|
106 |
negative = ""
|
107 |
return p.replace("{prompt}", positive), n + negative
|
108 |
|
109 |
+
#unetX = UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='unet', low_cpu_mem_usage=False, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
110 |
|
111 |
def load_and_prepare_model():
|
112 |
vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False, low_cpu_mem_usage=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
|
|
177 |
model = Phi3ForCausalLM.from_pretrained(checkpoint).to('cuda:0')
|
178 |
#model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map='cuda') #.to('cuda')
|
179 |
|
180 |
+
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
181 |
text_encoder_1=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder',token=True) #.to(device=device, dtype=torch.bfloat16)
|
182 |
text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder_2',token=True) #.to(device=device, dtype=torch.bfloat16)
|
183 |
|
|
|
449 |
torch.cuda.empty_cache()
|
450 |
global text_encoder_1
|
451 |
global text_encoder_2
|
452 |
+
#global unetX
|
453 |
pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
|
454 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
455 |
+
#pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
|
456 |
+
#ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
457 |
print('-- generating image --')
|
458 |
sd_image = ip_model.generate(
|
459 |
pil_image_1=sd_image_a,
|
|
|
599 |
torch.cuda.empty_cache()
|
600 |
global text_encoder_1
|
601 |
global text_encoder_2
|
602 |
+
#global unetX
|
603 |
pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
|
604 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
605 |
+
#pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
|
606 |
+
#ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
607 |
print('-- generating image --')
|
608 |
sd_image = ip_model.generate(
|
609 |
pil_image_1=sd_image_a,
|
|
|
750 |
torch.cuda.empty_cache()
|
751 |
global text_encoder_1
|
752 |
global text_encoder_2
|
753 |
+
#global unetX
|
754 |
pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
|
755 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
756 |
+
#pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
|
757 |
+
#ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
758 |
print('-- generating image --')
|
759 |
sd_image = ip_model.generate(
|
760 |
pil_image_1=sd_image_a,
|