Spaces:
Runtime error
Runtime error
fix
Browse files
mvdream/pipeline_mvdream.py
CHANGED
@@ -499,6 +499,7 @@ class MVDreamPipeline(DiffusionPipeline):
|
|
499 |
# Prepare extra step kwargs.
|
500 |
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
501 |
|
|
|
502 |
context = torch.cat([prompt_embeds_neg] * actual_num_frames + [prompt_embeds_pos] * actual_num_frames)
|
503 |
torch.cat([camera] * multiplier)
|
504 |
|
@@ -511,7 +512,6 @@ class MVDreamPipeline(DiffusionPipeline):
|
|
511 |
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
512 |
for i, t in enumerate(timesteps):
|
513 |
# expand the latents if we are doing classifier free guidance
|
514 |
-
multiplier = 2 if do_classifier_free_guidance else 1
|
515 |
latent_model_input = torch.cat([latents] * multiplier)
|
516 |
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
517 |
|
|
|
499 |
# Prepare extra step kwargs.
|
500 |
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
501 |
|
502 |
+
multiplier = 2 if do_classifier_free_guidance else 1
|
503 |
context = torch.cat([prompt_embeds_neg] * actual_num_frames + [prompt_embeds_pos] * actual_num_frames)
|
504 |
torch.cat([camera] * multiplier)
|
505 |
|
|
|
512 |
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
513 |
for i, t in enumerate(timesteps):
|
514 |
# expand the latents if we are doing classifier free guidance
|
|
|
515 |
latent_model_input = torch.cat([latents] * multiplier)
|
516 |
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
517 |
|