Spaces:
Running
on
Zero
Running
on
Zero
1inkusFace
commited on
Update pipeline_stable_diffusion_3_ipa_clip.py
Browse files
pipeline_stable_diffusion_3_ipa_clip.py
CHANGED
@@ -865,16 +865,11 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
|
|
865 |
@torch.inference_mode()
|
866 |
def init_ipadapter(self, ip_adapter_path, image_encoder_path, nb_token, output_dim=2432):
|
867 |
from transformers import SiglipVisionModel, SiglipImageProcessor
|
868 |
-
from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
|
869 |
state_dict = torch.load(ip_adapter_path, map_location="cpu")
|
870 |
|
871 |
device, dtype = self.transformer.device, self.transformer.dtype
|
872 |
-
|
873 |
-
|
874 |
-
|
875 |
-
#image_processor = SiglipImageProcessor.from_pretrained(image_encoder_path)
|
876 |
-
image_processor = CLIPImageProcessor.from_pretrained(image_encoder_path)
|
877 |
-
|
878 |
image_encoder.eval()
|
879 |
image_encoder.to(device, dtype=dtype)
|
880 |
self.image_encoder = image_encoder
|
|
|
865 |
@torch.inference_mode()
|
866 |
def init_ipadapter(self, ip_adapter_path, image_encoder_path, nb_token, output_dim=2432):
|
867 |
from transformers import SiglipVisionModel, SiglipImageProcessor
|
|
|
868 |
state_dict = torch.load(ip_adapter_path, map_location="cpu")
|
869 |
|
870 |
device, dtype = self.transformer.device, self.transformer.dtype
|
871 |
+
image_encoder = SiglipVisionModel.from_pretrained(image_encoder_path)
|
872 |
+
image_processor = SiglipImageProcessor.from_pretrained(image_encoder_path)
|
|
|
|
|
|
|
|
|
873 |
image_encoder.eval()
|
874 |
image_encoder.to(device, dtype=dtype)
|
875 |
self.image_encoder = image_encoder
|