Spaces:
Runtime error
Runtime error
Update pipeline_stable_diffusion_3_ipa.py
Browse files
pipeline_stable_diffusion_3_ipa.py
CHANGED
@@ -1142,10 +1142,11 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
|
|
1142 |
image_prompt_embeds_list = []
|
1143 |
|
1144 |
# 3. prepare clip emb
|
1145 |
-
clip_image
|
1146 |
-
|
1147 |
-
|
1148 |
-
|
|
|
1149 |
if clip_image_2 != None:
|
1150 |
print('Using secondary image.')
|
1151 |
clip_image_2 = clip_image_2.resize((max(clip_image.size), max(clip_image.size)))
|
@@ -1187,11 +1188,11 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
|
|
1187 |
linear_layer.to('cuda')
|
1188 |
'''
|
1189 |
# Pass the concatenated embeddings through the linear layer
|
1190 |
-
|
1191 |
|
1192 |
# Add a ReLU activation for non-linearity (optional)
|
1193 |
-
combined_embeds = torch.relu(combined_embeds)
|
1194 |
-
clip_image_embeds = combined_embeds #torch.cat(image_prompt_embeds_list).mean(dim=0).unsqueeze(0)
|
1195 |
|
1196 |
# 4. Prepare timesteps
|
1197 |
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
|
|
1142 |
image_prompt_embeds_list = []
|
1143 |
|
1144 |
# 3. prepare clip emb
|
1145 |
+
if clip_image != None:
|
1146 |
+
print('Using primary image.')
|
1147 |
+
clip_image = clip_image.resize((max(clip_image.size), max(clip_image.size)))
|
1148 |
+
clip_image_embeds_1 = self.encode_clip_image_emb(clip_image, device, dtype)
|
1149 |
+
image_prompt_embeds_list.append(clip_image_embeds_1)
|
1150 |
if clip_image_2 != None:
|
1151 |
print('Using secondary image.')
|
1152 |
clip_image_2 = clip_image_2.resize((max(clip_image.size), max(clip_image.size)))
|
|
|
1188 |
linear_layer.to('cuda')
|
1189 |
'''
|
1190 |
# Pass the concatenated embeddings through the linear layer
|
1191 |
+
clip_image_embeds = linear_layer(concatenated_embeds)
|
1192 |
|
1193 |
# Add a ReLU activation for non-linearity (optional)
|
1194 |
+
#combined_embeds = torch.relu(combined_embeds)
|
1195 |
+
#clip_image_embeds = combined_embeds #torch.cat(image_prompt_embeds_list).mean(dim=0).unsqueeze(0)
|
1196 |
|
1197 |
# 4. Prepare timesteps
|
1198 |
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|