# File: diffusers-main/src/diffusers/__init__.py __version__ = '0.31.0.dev0' from typing import TYPE_CHECKING from .utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_k_diffusion_available, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_sentencepiece_available, is_torch_available, is_torchsde_available, is_transformers_available _import_structure = {'configuration_utils': ['ConfigMixin'], 'loaders': ['FromOriginalModelMixin'], 'models': [], 'pipelines': [], 'schedulers': [], 'utils': ['OptionalDependencyNotAvailable', 'is_flax_available', 'is_inflect_available', 'is_invisible_watermark_available', 'is_k_diffusion_available', 'is_k_diffusion_version', 'is_librosa_available', 'is_note_seq_available', 'is_onnx_available', 'is_scipy_available', 'is_torch_available', 'is_torchsde_available', 'is_transformers_available', 'is_transformers_version', 'is_unidecode_available', 'logging']} try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_onnx_objects _import_structure['utils.dummy_onnx_objects'] = [name for name in dir(dummy_onnx_objects) if not name.startswith('_')] else: _import_structure['pipelines'].extend(['OnnxRuntimeModel']) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_pt_objects _import_structure['utils.dummy_pt_objects'] = [name for name in dir(dummy_pt_objects) if not name.startswith('_')] else: _import_structure['models'].extend(['AsymmetricAutoencoderKL', 'AuraFlowTransformer2DModel', 'AutoencoderKL', 'AutoencoderKLCogVideoX', 'AutoencoderKLTemporalDecoder', 'AutoencoderOobleck', 'AutoencoderTiny', 'CogVideoXTransformer3DModel', 'ConsistencyDecoderVAE', 'ControlNetModel', 'ControlNetXSAdapter', 'DiTTransformer2DModel', 'FluxControlNetModel', 'FluxMultiControlNetModel', 'FluxTransformer2DModel', 'HunyuanDiT2DControlNetModel', 'HunyuanDiT2DModel', 'HunyuanDiT2DMultiControlNetModel', 'I2VGenXLUNet', 'Kandinsky3UNet', 'LatteTransformer3DModel', 'LuminaNextDiT2DModel', 'ModelMixin', 'MotionAdapter', 'MultiAdapter', 'PixArtTransformer2DModel', 'PriorTransformer', 'SD3ControlNetModel', 'SD3MultiControlNetModel', 'SD3Transformer2DModel', 'SparseControlNetModel', 'StableAudioDiTModel', 'StableCascadeUNet', 'T2IAdapter', 'T5FilmDecoder', 'Transformer2DModel', 'UNet1DModel', 'UNet2DConditionModel', 'UNet2DModel', 'UNet3DConditionModel', 'UNetControlNetXSModel', 'UNetMotionModel', 'UNetSpatioTemporalConditionModel', 'UVit2DModel', 'VQModel']) _import_structure['optimization'] = ['get_constant_schedule', 'get_constant_schedule_with_warmup', 'get_cosine_schedule_with_warmup', 'get_cosine_with_hard_restarts_schedule_with_warmup', 'get_linear_schedule_with_warmup', 'get_polynomial_decay_schedule_with_warmup', 'get_scheduler'] _import_structure['pipelines'].extend(['AudioPipelineOutput', 'AutoPipelineForImage2Image', 'AutoPipelineForInpainting', 'AutoPipelineForText2Image', 'ConsistencyModelPipeline', 'DanceDiffusionPipeline', 'DDIMPipeline', 'DDPMPipeline', 'DiffusionPipeline', 'DiTPipeline', 'ImagePipelineOutput', 'KarrasVePipeline', 'LDMPipeline', 'LDMSuperResolutionPipeline', 'PNDMPipeline', 'RePaintPipeline', 'ScoreSdeVePipeline', 'StableDiffusionMixin']) _import_structure['schedulers'].extend(['AmusedScheduler', 'CMStochasticIterativeScheduler', 'CogVideoXDDIMScheduler', 'CogVideoXDPMScheduler', 'DDIMInverseScheduler', 'DDIMParallelScheduler', 'DDIMScheduler', 'DDPMParallelScheduler', 'DDPMScheduler', 'DDPMWuerstchenScheduler', 'DEISMultistepScheduler', 'DPMSolverMultistepInverseScheduler', 'DPMSolverMultistepScheduler', 'DPMSolverSinglestepScheduler', 'EDMDPMSolverMultistepScheduler', 'EDMEulerScheduler', 'EulerAncestralDiscreteScheduler', 'EulerDiscreteScheduler', 'FlowMatchEulerDiscreteScheduler', 'FlowMatchHeunDiscreteScheduler', 'HeunDiscreteScheduler', 'IPNDMScheduler', 'KarrasVeScheduler', 'KDPM2AncestralDiscreteScheduler', 'KDPM2DiscreteScheduler', 'LCMScheduler', 'PNDMScheduler', 'RePaintScheduler', 'SASolverScheduler', 'SchedulerMixin', 'ScoreSdeVeScheduler', 'TCDScheduler', 'UnCLIPScheduler', 'UniPCMultistepScheduler', 'VQDiffusionScheduler']) _import_structure['training_utils'] = ['EMAModel'] try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torch_and_scipy_objects _import_structure['utils.dummy_torch_and_scipy_objects'] = [name for name in dir(dummy_torch_and_scipy_objects) if not name.startswith('_')] else: _import_structure['schedulers'].extend(['LMSDiscreteScheduler']) try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torch_and_torchsde_objects _import_structure['utils.dummy_torch_and_torchsde_objects'] = [name for name in dir(dummy_torch_and_torchsde_objects) if not name.startswith('_')] else: _import_structure['schedulers'].extend(['CosineDPMSolverMultistepScheduler', 'DPMSolverSDEScheduler']) try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torch_and_transformers_objects _import_structure['utils.dummy_torch_and_transformers_objects'] = [name for name in dir(dummy_torch_and_transformers_objects) if not name.startswith('_')] else: _import_structure['pipelines'].extend(['AltDiffusionImg2ImgPipeline', 'AltDiffusionPipeline', 'AmusedImg2ImgPipeline', 'AmusedInpaintPipeline', 'AmusedPipeline', 'AnimateDiffControlNetPipeline', 'AnimateDiffPAGPipeline', 'AnimateDiffPipeline', 'AnimateDiffSDXLPipeline', 'AnimateDiffSparseControlNetPipeline', 'AnimateDiffVideoToVideoControlNetPipeline', 'AnimateDiffVideoToVideoPipeline', 'AudioLDM2Pipeline', 'AudioLDM2ProjectionModel', 'AudioLDM2UNet2DConditionModel', 'AudioLDMPipeline', 'AuraFlowPipeline', 'BlipDiffusionControlNetPipeline', 'BlipDiffusionPipeline', 'CLIPImageProjection', 'CogVideoXPipeline', 'CogVideoXVideoToVideoPipeline', 'CycleDiffusionPipeline', 'FluxControlNetPipeline', 'FluxImg2ImgPipeline', 'FluxInpaintPipeline', 'FluxPipeline', 'HunyuanDiTControlNetPipeline', 'HunyuanDiTPAGPipeline', 'HunyuanDiTPipeline', 'I2VGenXLPipeline', 'IFImg2ImgPipeline', 'IFImg2ImgSuperResolutionPipeline', 'IFInpaintingPipeline', 'IFInpaintingSuperResolutionPipeline', 'IFPipeline', 'IFSuperResolutionPipeline', 'ImageTextPipelineOutput', 'Kandinsky3Img2ImgPipeline', 'Kandinsky3Pipeline', 'KandinskyCombinedPipeline', 'KandinskyImg2ImgCombinedPipeline', 'KandinskyImg2ImgPipeline', 'KandinskyInpaintCombinedPipeline', 'KandinskyInpaintPipeline', 'KandinskyPipeline', 'KandinskyPriorPipeline', 'KandinskyV22CombinedPipeline', 'KandinskyV22ControlnetImg2ImgPipeline', 'KandinskyV22ControlnetPipeline', 'KandinskyV22Img2ImgCombinedPipeline', 'KandinskyV22Img2ImgPipeline', 'KandinskyV22InpaintCombinedPipeline', 'KandinskyV22InpaintPipeline', 'KandinskyV22Pipeline', 'KandinskyV22PriorEmb2EmbPipeline', 'KandinskyV22PriorPipeline', 'LatentConsistencyModelImg2ImgPipeline', 'LatentConsistencyModelPipeline', 'LattePipeline', 'LDMTextToImagePipeline', 'LEditsPPPipelineStableDiffusion', 'LEditsPPPipelineStableDiffusionXL', 'LuminaText2ImgPipeline', 'MarigoldDepthPipeline', 'MarigoldNormalsPipeline', 'MusicLDMPipeline', 'PaintByExamplePipeline', 'PIAPipeline', 'PixArtAlphaPipeline', 'PixArtSigmaPAGPipeline', 'PixArtSigmaPipeline', 'SemanticStableDiffusionPipeline', 'ShapEImg2ImgPipeline', 'ShapEPipeline', 'StableAudioPipeline', 'StableAudioProjectionModel', 'StableCascadeCombinedPipeline', 'StableCascadeDecoderPipeline', 'StableCascadePriorPipeline', 'StableDiffusion3ControlNetInpaintingPipeline', 'StableDiffusion3ControlNetPipeline', 'StableDiffusion3Img2ImgPipeline', 'StableDiffusion3InpaintPipeline', 'StableDiffusion3PAGPipeline', 'StableDiffusion3Pipeline', 'StableDiffusionAdapterPipeline', 'StableDiffusionAttendAndExcitePipeline', 'StableDiffusionControlNetImg2ImgPipeline', 'StableDiffusionControlNetInpaintPipeline', 'StableDiffusionControlNetPAGPipeline', 'StableDiffusionControlNetPipeline', 'StableDiffusionControlNetXSPipeline', 'StableDiffusionDepth2ImgPipeline', 'StableDiffusionDiffEditPipeline', 'StableDiffusionGLIGENPipeline', 'StableDiffusionGLIGENTextImagePipeline', 'StableDiffusionImageVariationPipeline', 'StableDiffusionImg2ImgPipeline', 'StableDiffusionInpaintPipeline', 'StableDiffusionInpaintPipelineLegacy', 'StableDiffusionInstructPix2PixPipeline', 'StableDiffusionLatentUpscalePipeline', 'StableDiffusionLDM3DPipeline', 'StableDiffusionModelEditingPipeline', 'StableDiffusionPAGPipeline', 'StableDiffusionPanoramaPipeline', 'StableDiffusionParadigmsPipeline', 'StableDiffusionPipeline', 'StableDiffusionPipelineSafe', 'StableDiffusionPix2PixZeroPipeline', 'StableDiffusionSAGPipeline', 'StableDiffusionUpscalePipeline', 'StableDiffusionXLAdapterPipeline', 'StableDiffusionXLControlNetImg2ImgPipeline', 'StableDiffusionXLControlNetInpaintPipeline', 'StableDiffusionXLControlNetPAGImg2ImgPipeline', 'StableDiffusionXLControlNetPAGPipeline', 'StableDiffusionXLControlNetPipeline', 'StableDiffusionXLControlNetXSPipeline', 'StableDiffusionXLImg2ImgPipeline', 'StableDiffusionXLInpaintPipeline', 'StableDiffusionXLInstructPix2PixPipeline', 'StableDiffusionXLPAGImg2ImgPipeline', 'StableDiffusionXLPAGInpaintPipeline', 'StableDiffusionXLPAGPipeline', 'StableDiffusionXLPipeline', 'StableUnCLIPImg2ImgPipeline', 'StableUnCLIPPipeline', 'StableVideoDiffusionPipeline', 'TextToVideoSDPipeline', 'TextToVideoZeroPipeline', 'TextToVideoZeroSDXLPipeline', 'UnCLIPImageVariationPipeline', 'UnCLIPPipeline', 'UniDiffuserModel', 'UniDiffuserPipeline', 'UniDiffuserTextDecoder', 'VersatileDiffusionDualGuidedPipeline', 'VersatileDiffusionImageVariationPipeline', 'VersatileDiffusionPipeline', 'VersatileDiffusionTextToImagePipeline', 'VideoToVideoSDPipeline', 'VQDiffusionPipeline', 'WuerstchenCombinedPipeline', 'WuerstchenDecoderPipeline', 'WuerstchenPriorPipeline']) try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torch_and_transformers_and_k_diffusion_objects _import_structure['utils.dummy_torch_and_transformers_and_k_diffusion_objects'] = [name for name in dir(dummy_torch_and_transformers_and_k_diffusion_objects) if not name.startswith('_')] else: _import_structure['pipelines'].extend(['StableDiffusionKDiffusionPipeline', 'StableDiffusionXLKDiffusionPipeline']) try: if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torch_and_transformers_and_sentencepiece_objects _import_structure['utils.dummy_torch_and_transformers_and_sentencepiece_objects'] = [name for name in dir(dummy_torch_and_transformers_and_sentencepiece_objects) if not name.startswith('_')] else: _import_structure['pipelines'].extend(['KolorsImg2ImgPipeline', 'KolorsPAGPipeline', 'KolorsPipeline']) try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torch_and_transformers_and_onnx_objects _import_structure['utils.dummy_torch_and_transformers_and_onnx_objects'] = [name for name in dir(dummy_torch_and_transformers_and_onnx_objects) if not name.startswith('_')] else: _import_structure['pipelines'].extend(['OnnxStableDiffusionImg2ImgPipeline', 'OnnxStableDiffusionInpaintPipeline', 'OnnxStableDiffusionInpaintPipelineLegacy', 'OnnxStableDiffusionPipeline', 'OnnxStableDiffusionUpscalePipeline', 'StableDiffusionOnnxPipeline']) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torch_and_librosa_objects _import_structure['utils.dummy_torch_and_librosa_objects'] = [name for name in dir(dummy_torch_and_librosa_objects) if not name.startswith('_')] else: _import_structure['pipelines'].extend(['AudioDiffusionPipeline', 'Mel']) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_transformers_and_torch_and_note_seq_objects _import_structure['utils.dummy_transformers_and_torch_and_note_seq_objects'] = [name for name in dir(dummy_transformers_and_torch_and_note_seq_objects) if not name.startswith('_')] else: _import_structure['pipelines'].extend(['SpectrogramDiffusionPipeline']) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_flax_objects _import_structure['utils.dummy_flax_objects'] = [name for name in dir(dummy_flax_objects) if not name.startswith('_')] else: _import_structure['models.controlnet_flax'] = ['FlaxControlNetModel'] _import_structure['models.modeling_flax_utils'] = ['FlaxModelMixin'] _import_structure['models.unets.unet_2d_condition_flax'] = ['FlaxUNet2DConditionModel'] _import_structure['models.vae_flax'] = ['FlaxAutoencoderKL'] _import_structure['pipelines'].extend(['FlaxDiffusionPipeline']) _import_structure['schedulers'].extend(['FlaxDDIMScheduler', 'FlaxDDPMScheduler', 'FlaxDPMSolverMultistepScheduler', 'FlaxEulerDiscreteScheduler', 'FlaxKarrasVeScheduler', 'FlaxLMSDiscreteScheduler', 'FlaxPNDMScheduler', 'FlaxSchedulerMixin', 'FlaxScoreSdeVeScheduler']) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_flax_and_transformers_objects _import_structure['utils.dummy_flax_and_transformers_objects'] = [name for name in dir(dummy_flax_and_transformers_objects) if not name.startswith('_')] else: _import_structure['pipelines'].extend(['FlaxStableDiffusionControlNetPipeline', 'FlaxStableDiffusionImg2ImgPipeline', 'FlaxStableDiffusionInpaintPipeline', 'FlaxStableDiffusionPipeline', 'FlaxStableDiffusionXLPipeline']) try: if not is_note_seq_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_note_seq_objects _import_structure['utils.dummy_note_seq_objects'] = [name for name in dir(dummy_note_seq_objects) if not name.startswith('_')] else: _import_structure['pipelines'].extend(['MidiProcessor']) if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .configuration_utils import ConfigMixin try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * else: from .models import AsymmetricAutoencoderKL, AuraFlowTransformer2DModel, AutoencoderKL, AutoencoderKLCogVideoX, AutoencoderKLTemporalDecoder, AutoencoderOobleck, AutoencoderTiny, CogVideoXTransformer3DModel, ConsistencyDecoderVAE, ControlNetModel, ControlNetXSAdapter, DiTTransformer2DModel, FluxControlNetModel, FluxMultiControlNetModel, FluxTransformer2DModel, HunyuanDiT2DControlNetModel, HunyuanDiT2DModel, HunyuanDiT2DMultiControlNetModel, I2VGenXLUNet, Kandinsky3UNet, LatteTransformer3DModel, LuminaNextDiT2DModel, ModelMixin, MotionAdapter, MultiAdapter, PixArtTransformer2DModel, PriorTransformer, SD3ControlNetModel, SD3MultiControlNetModel, SD3Transformer2DModel, SparseControlNetModel, StableAudioDiTModel, T2IAdapter, T5FilmDecoder, Transformer2DModel, UNet1DModel, UNet2DConditionModel, UNet2DModel, UNet3DConditionModel, UNetControlNetXSModel, UNetMotionModel, UNetSpatioTemporalConditionModel, UVit2DModel, VQModel from .optimization import get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler from .pipelines import AudioPipelineOutput, AutoPipelineForImage2Image, AutoPipelineForInpainting, AutoPipelineForText2Image, BlipDiffusionControlNetPipeline, BlipDiffusionPipeline, CLIPImageProjection, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, StableDiffusionMixin from .schedulers import AmusedScheduler, CMStochasticIterativeScheduler, CogVideoXDDIMScheduler, CogVideoXDPMScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DDPMWuerstchenScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EDMDPMSolverMultistepScheduler, EDMEulerScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, FlowMatchEulerDiscreteScheduler, FlowMatchHeunDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPM2AncestralDiscreteScheduler, KDPM2DiscreteScheduler, LCMScheduler, PNDMScheduler, RePaintScheduler, SASolverScheduler, SchedulerMixin, ScoreSdeVeScheduler, TCDScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * else: from .schedulers import CosineDPMSolverMultistepScheduler, DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * else: from .pipelines import AltDiffusionImg2ImgPipeline, AltDiffusionPipeline, AmusedImg2ImgPipeline, AmusedInpaintPipeline, AmusedPipeline, AnimateDiffControlNetPipeline, AnimateDiffPAGPipeline, AnimateDiffPipeline, AnimateDiffSDXLPipeline, AnimateDiffSparseControlNetPipeline, AnimateDiffVideoToVideoControlNetPipeline, AnimateDiffVideoToVideoPipeline, AudioLDM2Pipeline, AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel, AudioLDMPipeline, AuraFlowPipeline, CLIPImageProjection, CogVideoXPipeline, CogVideoXVideoToVideoPipeline, CycleDiffusionPipeline, FluxControlNetPipeline, FluxImg2ImgPipeline, FluxInpaintPipeline, FluxPipeline, HunyuanDiTControlNetPipeline, HunyuanDiTPAGPipeline, HunyuanDiTPipeline, I2VGenXLPipeline, IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline, KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyImg2ImgPipeline, KandinskyInpaintCombinedPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyV22CombinedPipeline, KandinskyV22ControlnetImg2ImgPipeline, KandinskyV22ControlnetPipeline, KandinskyV22Img2ImgCombinedPipeline, KandinskyV22Img2ImgPipeline, KandinskyV22InpaintCombinedPipeline, KandinskyV22InpaintPipeline, KandinskyV22Pipeline, KandinskyV22PriorEmb2EmbPipeline, KandinskyV22PriorPipeline, LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline, LattePipeline, LDMTextToImagePipeline, LEditsPPPipelineStableDiffusion, LEditsPPPipelineStableDiffusionXL, LuminaText2ImgPipeline, MarigoldDepthPipeline, MarigoldNormalsPipeline, MusicLDMPipeline, PaintByExamplePipeline, PIAPipeline, PixArtAlphaPipeline, PixArtSigmaPAGPipeline, PixArtSigmaPipeline, SemanticStableDiffusionPipeline, ShapEImg2ImgPipeline, ShapEPipeline, StableAudioPipeline, StableAudioProjectionModel, StableCascadeCombinedPipeline, StableCascadeDecoderPipeline, StableCascadePriorPipeline, StableDiffusion3ControlNetPipeline, StableDiffusion3Img2ImgPipeline, StableDiffusion3InpaintPipeline, StableDiffusion3PAGPipeline, StableDiffusion3Pipeline, StableDiffusionAdapterPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImg2ImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPAGPipeline, StableDiffusionControlNetPipeline, StableDiffusionControlNetXSPipeline, StableDiffusionDepth2ImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionGLIGENPipeline, StableDiffusionGLIGENTextImagePipeline, StableDiffusionImageVariationPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPix2PixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDM3DPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPAGPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPix2PixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableDiffusionXLAdapterPipeline, StableDiffusionXLControlNetImg2ImgPipeline, StableDiffusionXLControlNetInpaintPipeline, StableDiffusionXLControlNetPAGImg2ImgPipeline, StableDiffusionXLControlNetPAGPipeline, StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetXSPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLInstructPix2PixPipeline, StableDiffusionXLPAGImg2ImgPipeline, StableDiffusionXLPAGInpaintPipeline, StableDiffusionXLPAGPipeline, StableDiffusionXLPipeline, StableUnCLIPImg2ImgPipeline, StableUnCLIPPipeline, StableVideoDiffusionPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, TextToVideoZeroSDXLPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, WuerstchenCombinedPipeline, WuerstchenDecoderPipeline, WuerstchenPriorPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * else: from .pipelines import StableDiffusionKDiffusionPipeline, StableDiffusionXLKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_sentencepiece_objects import * else: from .pipelines import KolorsImg2ImgPipeline, KolorsPAGPipeline, KolorsPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * else: from .pipelines import OnnxStableDiffusionImg2ImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unets.unet_2d_condition_flax import FlaxUNet2DConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxEulerDiscreteScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * else: from .pipelines import FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImg2ImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, FlaxStableDiffusionXLPipeline try: if not is_note_seq_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * else: from .pipelines import MidiProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__, extra_objects={'__version__': __version__}) # File: diffusers-main/src/diffusers/callbacks.py from typing import Any, Dict, List from .configuration_utils import ConfigMixin, register_to_config from .utils import CONFIG_NAME class PipelineCallback(ConfigMixin): config_name = CONFIG_NAME @register_to_config def __init__(self, cutoff_step_ratio=1.0, cutoff_step_index=None): super().__init__() if cutoff_step_ratio is None and cutoff_step_index is None or (cutoff_step_ratio is not None and cutoff_step_index is not None): raise ValueError('Either cutoff_step_ratio or cutoff_step_index should be provided, not both or none.') if cutoff_step_ratio is not None and (not isinstance(cutoff_step_ratio, float) or not 0.0 <= cutoff_step_ratio <= 1.0): raise ValueError('cutoff_step_ratio must be a float between 0.0 and 1.0.') @property def tensor_inputs(self) -> List[str]: raise NotImplementedError(f'You need to set the attribute `tensor_inputs` for {self.__class__}') def callback_fn(self, pipeline, step_index, timesteps, callback_kwargs) -> Dict[str, Any]: raise NotImplementedError(f'You need to implement the method `callback_fn` for {self.__class__}') def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: return self.callback_fn(pipeline, step_index, timestep, callback_kwargs) class MultiPipelineCallbacks: def __init__(self, callbacks: List[PipelineCallback]): self.callbacks = callbacks @property def tensor_inputs(self) -> List[str]: return [input for callback in self.callbacks for input in callback.tensor_inputs] def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: for callback in self.callbacks: callback_kwargs = callback(pipeline, step_index, timestep, callback_kwargs) return callback_kwargs class SDCFGCutoffCallback(PipelineCallback): tensor_inputs = ['prompt_embeds'] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index cutoff_step = cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) if step_index == cutoff_step: prompt_embeds = callback_kwargs[self.tensor_inputs[0]] prompt_embeds = prompt_embeds[-1:] pipeline._guidance_scale = 0.0 callback_kwargs[self.tensor_inputs[0]] = prompt_embeds return callback_kwargs class SDXLCFGCutoffCallback(PipelineCallback): tensor_inputs = ['prompt_embeds', 'add_text_embeds', 'add_time_ids'] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index cutoff_step = cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) if step_index == cutoff_step: prompt_embeds = callback_kwargs[self.tensor_inputs[0]] prompt_embeds = prompt_embeds[-1:] add_text_embeds = callback_kwargs[self.tensor_inputs[1]] add_text_embeds = add_text_embeds[-1:] add_time_ids = callback_kwargs[self.tensor_inputs[2]] add_time_ids = add_time_ids[-1:] pipeline._guidance_scale = 0.0 callback_kwargs[self.tensor_inputs[0]] = prompt_embeds callback_kwargs[self.tensor_inputs[1]] = add_text_embeds callback_kwargs[self.tensor_inputs[2]] = add_time_ids return callback_kwargs class IPAdapterScaleCutoffCallback(PipelineCallback): tensor_inputs = [] def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: cutoff_step_ratio = self.config.cutoff_step_ratio cutoff_step_index = self.config.cutoff_step_index cutoff_step = cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) if step_index == cutoff_step: pipeline.set_ip_adapter_scale(0.0) return callback_kwargs # File: diffusers-main/src/diffusers/commands/__init__.py from abc import ABC, abstractmethod from argparse import ArgumentParser class BaseDiffusersCLICommand(ABC): @staticmethod @abstractmethod def register_subcommand(parser: ArgumentParser): raise NotImplementedError() @abstractmethod def run(self): raise NotImplementedError() # File: diffusers-main/src/diffusers/commands/diffusers_cli.py from argparse import ArgumentParser from .env import EnvironmentCommand from .fp16_safetensors import FP16SafetensorsCommand def main(): parser = ArgumentParser('Diffusers CLI tool', usage='diffusers-cli []') commands_parser = parser.add_subparsers(help='diffusers-cli command helpers') EnvironmentCommand.register_subcommand(commands_parser) FP16SafetensorsCommand.register_subcommand(commands_parser) args = parser.parse_args() if not hasattr(args, 'func'): parser.print_help() exit(1) service = args.func(args) service.run() if __name__ == '__main__': main() # File: diffusers-main/src/diffusers/commands/env.py import platform import subprocess from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_bitsandbytes_available, is_flax_available, is_google_colab, is_peft_available, is_safetensors_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def info_command_factory(_): return EnvironmentCommand() class EnvironmentCommand(BaseDiffusersCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser) -> None: download_parser = parser.add_parser('env') download_parser.set_defaults(func=info_command_factory) def run(self) -> dict: hub_version = huggingface_hub.__version__ safetensors_version = 'not installed' if is_safetensors_available(): import safetensors safetensors_version = safetensors.__version__ pt_version = 'not installed' pt_cuda_available = 'NA' if is_torch_available(): import torch pt_version = torch.__version__ pt_cuda_available = torch.cuda.is_available() flax_version = 'not installed' jax_version = 'not installed' jaxlib_version = 'not installed' jax_backend = 'NA' if is_flax_available(): import flax import jax import jaxlib flax_version = flax.__version__ jax_version = jax.__version__ jaxlib_version = jaxlib.__version__ jax_backend = jax.lib.xla_bridge.get_backend().platform transformers_version = 'not installed' if is_transformers_available(): import transformers transformers_version = transformers.__version__ accelerate_version = 'not installed' if is_accelerate_available(): import accelerate accelerate_version = accelerate.__version__ peft_version = 'not installed' if is_peft_available(): import peft peft_version = peft.__version__ bitsandbytes_version = 'not installed' if is_bitsandbytes_available(): import bitsandbytes bitsandbytes_version = bitsandbytes.__version__ xformers_version = 'not installed' if is_xformers_available(): import xformers xformers_version = xformers.__version__ platform_info = platform.platform() is_google_colab_str = 'Yes' if is_google_colab() else 'No' accelerator = 'NA' if platform.system() in {'Linux', 'Windows'}: try: sp = subprocess.Popen(['nvidia-smi', '--query-gpu=gpu_name,memory.total', '--format=csv,noheader'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out_str, _) = sp.communicate() out_str = out_str.decode('utf-8') if len(out_str) > 0: accelerator = out_str.strip() except FileNotFoundError: pass elif platform.system() == 'Darwin': try: sp = subprocess.Popen(['system_profiler', 'SPDisplaysDataType'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out_str, _) = sp.communicate() out_str = out_str.decode('utf-8') start = out_str.find('Chipset Model:') if start != -1: start += len('Chipset Model:') end = out_str.find('\n', start) accelerator = out_str[start:end].strip() start = out_str.find('VRAM (Total):') if start != -1: start += len('VRAM (Total):') end = out_str.find('\n', start) accelerator += ' VRAM: ' + out_str[start:end].strip() except FileNotFoundError: pass else: print('It seems you are running an unusual OS. Could you fill in the accelerator manually?') info = {'🤗 Diffusers version': version, 'Platform': platform_info, 'Running on Google Colab?': is_google_colab_str, 'Python version': platform.python_version(), 'PyTorch version (GPU?)': f'{pt_version} ({pt_cuda_available})', 'Flax version (CPU?/GPU?/TPU?)': f'{flax_version} ({jax_backend})', 'Jax version': jax_version, 'JaxLib version': jaxlib_version, 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'PEFT version': peft_version, 'Bitsandbytes version': bitsandbytes_version, 'Safetensors version': safetensors_version, 'xFormers version': xformers_version, 'Accelerator': accelerator, 'Using GPU in script?': '', 'Using distributed or parallel set-up in script?': ''} print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n') print(self.format_dict(info)) return info @staticmethod def format_dict(d: dict) -> str: return '\n'.join([f'- {prop}: {val}' for (prop, val) in d.items()]) + '\n' # File: diffusers-main/src/diffusers/commands/fp16_safetensors.py """""" import glob import json import warnings from argparse import ArgumentParser, Namespace from importlib import import_module import huggingface_hub import torch from huggingface_hub import hf_hub_download from packaging import version from ..utils import logging from . import BaseDiffusersCLICommand def conversion_command_factory(args: Namespace): if args.use_auth_token: warnings.warn('The `--use_auth_token` flag is deprecated and will be removed in a future version. Authentication is now handled automatically if user is logged in.') return FP16SafetensorsCommand(args.ckpt_id, args.fp16, args.use_safetensors) class FP16SafetensorsCommand(BaseDiffusersCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): conversion_parser = parser.add_parser('fp16_safetensors') conversion_parser.add_argument('--ckpt_id', type=str, help="Repo id of the checkpoints on which to run the conversion. Example: 'openai/shap-e'.") conversion_parser.add_argument('--fp16', action='store_true', help='If serializing the variables in FP16 precision.') conversion_parser.add_argument('--use_safetensors', action='store_true', help='If serializing in the safetensors format.') conversion_parser.add_argument('--use_auth_token', action='store_true', help='When working with checkpoints having private visibility. When used `huggingface-cli login` needs to be run beforehand.') conversion_parser.set_defaults(func=conversion_command_factory) def __init__(self, ckpt_id: str, fp16: bool, use_safetensors: bool): self.logger = logging.get_logger('diffusers-cli/fp16_safetensors') self.ckpt_id = ckpt_id self.local_ckpt_dir = f'/tmp/{ckpt_id}' self.fp16 = fp16 self.use_safetensors = use_safetensors if not self.use_safetensors and (not self.fp16): raise NotImplementedError('When `use_safetensors` and `fp16` both are False, then this command is of no use.') def run(self): if version.parse(huggingface_hub.__version__) < version.parse('0.9.0'): raise ImportError('The huggingface_hub version must be >= 0.9.0 to use this command. Please update your huggingface_hub installation.') else: from huggingface_hub import create_commit from huggingface_hub._commit_api import CommitOperationAdd model_index = hf_hub_download(repo_id=self.ckpt_id, filename='model_index.json') with open(model_index, 'r') as f: pipeline_class_name = json.load(f)['_class_name'] pipeline_class = getattr(import_module('diffusers'), pipeline_class_name) self.logger.info(f'Pipeline class imported: {pipeline_class_name}.') pipeline = pipeline_class.from_pretrained(self.ckpt_id, torch_dtype=torch.float16 if self.fp16 else torch.float32) pipeline.save_pretrained(self.local_ckpt_dir, safe_serialization=True if self.use_safetensors else False, variant='fp16' if self.fp16 else None) self.logger.info(f'Pipeline locally saved to {self.local_ckpt_dir}.') if self.fp16: modified_paths = glob.glob(f'{self.local_ckpt_dir}/*/*.fp16.*') elif self.use_safetensors: modified_paths = glob.glob(f'{self.local_ckpt_dir}/*/*.safetensors') commit_message = f'Serialize variables with FP16: {self.fp16} and safetensors: {self.use_safetensors}.' operations = [] for path in modified_paths: operations.append(CommitOperationAdd(path_in_repo='/'.join(path.split('/')[4:]), path_or_fileobj=path)) commit_description = "Variables converted by the [`diffusers`' `fp16_safetensors` CLI](https://github.com/huggingface/diffusers/blob/main/src/diffusers/commands/fp16_safetensors.py)." hub_pr_url = create_commit(repo_id=self.ckpt_id, operations=operations, commit_message=commit_message, commit_description=commit_description, repo_type='model', create_pr=True).pr_url self.logger.info(f'PR created here: {hub_pr_url}.') # File: diffusers-main/src/diffusers/configuration_utils.py """""" import dataclasses import functools import importlib import inspect import json import os import re from collections import OrderedDict from pathlib import Path from typing import Any, Dict, Tuple, Union import numpy as np from huggingface_hub import create_repo, hf_hub_download from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, validate_hf_hub_args from requests import HTTPError from . import __version__ from .utils import HUGGINGFACE_CO_RESOLVE_ENDPOINT, DummyObject, deprecate, extract_commit_hash, http_user_agent, logging logger = logging.get_logger(__name__) _re_configuration_file = re.compile('config\\.(.*)\\.json') class FrozenDict(OrderedDict): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) for (key, value) in self.items(): setattr(self, key, value) self.__frozen = True def __delitem__(self, *args, **kwargs): raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.') def setdefault(self, *args, **kwargs): raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.') def pop(self, *args, **kwargs): raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.') def update(self, *args, **kwargs): raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.') def __setattr__(self, name, value): if hasattr(self, '__frozen') and self.__frozen: raise Exception(f'You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.') super().__setattr__(name, value) def __setitem__(self, name, value): if hasattr(self, '__frozen') and self.__frozen: raise Exception(f'You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.') super().__setitem__(name, value) class ConfigMixin: config_name = None ignore_for_config = [] has_compatibles = False _deprecated_kwargs = [] def register_to_config(self, **kwargs): if self.config_name is None: raise NotImplementedError(f'Make sure that {self.__class__} has defined a class name `config_name`') kwargs.pop('kwargs', None) if not hasattr(self, '_internal_dict'): internal_dict = kwargs else: previous_dict = dict(self._internal_dict) internal_dict = {**self._internal_dict, **kwargs} logger.debug(f'Updating config from {previous_dict} to {internal_dict}') self._internal_dict = FrozenDict(internal_dict) def __getattr__(self, name: str) -> Any: is_in_config = '_internal_dict' in self.__dict__ and hasattr(self.__dict__['_internal_dict'], name) is_attribute = name in self.__dict__ if is_in_config and (not is_attribute): deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'." deprecate('direct config name access', '1.0.0', deprecation_message, standard_warn=False) return self._internal_dict[name] raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs): if os.path.isfile(save_directory): raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file') os.makedirs(save_directory, exist_ok=True) output_config_file = os.path.join(save_directory, self.config_name) self.to_json_file(output_config_file) logger.info(f'Configuration saved in {output_config_file}') if push_to_hub: commit_message = kwargs.pop('commit_message', None) private = kwargs.pop('private', False) create_pr = kwargs.pop('create_pr', False) token = kwargs.pop('token', None) repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id self._upload_folder(save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr) @classmethod def from_config(cls, config: Union[FrozenDict, Dict[str, Any]]=None, return_unused_kwargs=False, **kwargs): if 'pretrained_model_name_or_path' in kwargs: config = kwargs.pop('pretrained_model_name_or_path') if config is None: raise ValueError('Please make sure to provide a config as the first positional argument.') if not isinstance(config, dict): deprecation_message = 'It is deprecated to pass a pretrained model name or path to `from_config`.' if 'Scheduler' in cls.__name__: deprecation_message += f'If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead. Otherwise, please make sure to pass a configuration dictionary instead. This functionality will be removed in v1.0.0.' elif 'Model' in cls.__name__: deprecation_message += f'If you were trying to load a model, please use {cls}.load_config(...) followed by {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary instead. This functionality will be removed in v1.0.0.' deprecate('config-passed-as-path', '1.0.0', deprecation_message, standard_warn=False) (config, kwargs) = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs) (init_dict, unused_kwargs, hidden_dict) = cls.extract_init_dict(config, **kwargs) if 'dtype' in unused_kwargs: init_dict['dtype'] = unused_kwargs.pop('dtype') for deprecated_kwarg in cls._deprecated_kwargs: if deprecated_kwarg in unused_kwargs: init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg) model = cls(**init_dict) if '_class_name' in hidden_dict: hidden_dict['_class_name'] = cls.__name__ model.register_to_config(**hidden_dict) unused_kwargs = {**unused_kwargs, **hidden_dict} if return_unused_kwargs: return (model, unused_kwargs) else: return model @classmethod def get_config_dict(cls, *args, **kwargs): deprecation_message = f' The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be removed in version v1.0.0' deprecate('get_config_dict', '1.0.0', deprecation_message, standard_warn=False) return cls.load_config(*args, **kwargs) @classmethod @validate_hf_hub_args def load_config(cls, pretrained_model_name_or_path: Union[str, os.PathLike], return_unused_kwargs=False, return_commit_hash=False, **kwargs) -> Tuple[Dict[str, Any], Dict[str, Any]]: cache_dir = kwargs.pop('cache_dir', None) local_dir = kwargs.pop('local_dir', None) local_dir_use_symlinks = kwargs.pop('local_dir_use_symlinks', 'auto') force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) token = kwargs.pop('token', None) local_files_only = kwargs.pop('local_files_only', False) revision = kwargs.pop('revision', None) _ = kwargs.pop('mirror', None) subfolder = kwargs.pop('subfolder', None) user_agent = kwargs.pop('user_agent', {}) user_agent = {**user_agent, 'file_type': 'config'} user_agent = http_user_agent(user_agent) pretrained_model_name_or_path = str(pretrained_model_name_or_path) if cls.config_name is None: raise ValueError('`self.config_name` is not defined. Note that one should not load a config from `ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`') if os.path.isfile(pretrained_model_name_or_path): config_file = pretrained_model_name_or_path elif os.path.isdir(pretrained_model_name_or_path): if subfolder is not None and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)): config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)): config_file = os.path.join(pretrained_model_name_or_path, cls.config_name) else: raise EnvironmentError(f'Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.') else: try: config_file = hf_hub_download(pretrained_model_name_or_path, filename=cls.config_name, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, user_agent=user_agent, subfolder=subfolder, revision=revision, local_dir=local_dir, local_dir_use_symlinks=local_dir_use_symlinks) except RepositoryNotFoundError: raise EnvironmentError(f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo with `token` or log in with `huggingface-cli login`.") except RevisionNotFoundError: raise EnvironmentError(f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this model name. Check the model page at 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.") except EntryNotFoundError: raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.') except HTTPError as err: raise EnvironmentError(f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}') except ValueError: raise EnvironmentError(f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a directory containing a {cls.config_name} file.\nCheckout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.") except EnvironmentError: raise EnvironmentError(f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing a {cls.config_name} file") try: config_dict = cls._dict_from_json_file(config_file) commit_hash = extract_commit_hash(config_file) except (json.JSONDecodeError, UnicodeDecodeError): raise EnvironmentError(f"It looks like the config file at '{config_file}' is not a valid JSON file.") if not (return_unused_kwargs or return_commit_hash): return config_dict outputs = (config_dict,) if return_unused_kwargs: outputs += (kwargs,) if return_commit_hash: outputs += (commit_hash,) return outputs @staticmethod def _get_init_keys(input_class): return set(dict(inspect.signature(input_class.__init__).parameters).keys()) @classmethod def extract_init_dict(cls, config_dict, **kwargs): used_defaults = config_dict.get('_use_default_values', []) config_dict = {k: v for (k, v) in config_dict.items() if k not in used_defaults and k != '_use_default_values'} original_dict = dict(config_dict.items()) expected_keys = cls._get_init_keys(cls) expected_keys.remove('self') if 'kwargs' in expected_keys: expected_keys.remove('kwargs') if hasattr(cls, '_flax_internal_args'): for arg in cls._flax_internal_args: expected_keys.remove(arg) if len(cls.ignore_for_config) > 0: expected_keys = expected_keys - set(cls.ignore_for_config) diffusers_library = importlib.import_module(__name__.split('.')[0]) if cls.has_compatibles: compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)] else: compatible_classes = [] expected_keys_comp_cls = set() for c in compatible_classes: expected_keys_c = cls._get_init_keys(c) expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c) expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls) config_dict = {k: v for (k, v) in config_dict.items() if k not in expected_keys_comp_cls} orig_cls_name = config_dict.pop('_class_name', cls.__name__) if isinstance(orig_cls_name, str) and orig_cls_name != cls.__name__ and hasattr(diffusers_library, orig_cls_name): orig_cls = getattr(diffusers_library, orig_cls_name) unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys config_dict = {k: v for (k, v) in config_dict.items() if k not in unexpected_keys_from_orig} elif not isinstance(orig_cls_name, str) and (not isinstance(orig_cls_name, (list, tuple))): raise ValueError('Make sure that the `_class_name` is of type string or list of string (for custom pipelines).') config_dict = {k: v for (k, v) in config_dict.items() if not k.startswith('_')} init_dict = {} for key in expected_keys: if key in kwargs and key in config_dict: config_dict[key] = kwargs.pop(key) if key in kwargs: init_dict[key] = kwargs.pop(key) elif key in config_dict: init_dict[key] = config_dict.pop(key) if len(config_dict) > 0: logger.warning(f'The config attributes {config_dict} were passed to {cls.__name__}, but are not expected and will be ignored. Please verify your {cls.config_name} configuration file.') passed_keys = set(init_dict.keys()) if len(expected_keys - passed_keys) > 0: logger.info(f'{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.') unused_kwargs = {**config_dict, **kwargs} hidden_config_dict = {k: v for (k, v) in original_dict.items() if k not in init_dict} return (init_dict, unused_kwargs, hidden_config_dict) @classmethod def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]): with open(json_file, 'r', encoding='utf-8') as reader: text = reader.read() return json.loads(text) def __repr__(self): return f'{self.__class__.__name__} {self.to_json_string()}' @property def config(self) -> Dict[str, Any]: return self._internal_dict def to_json_string(self) -> str: config_dict = self._internal_dict if hasattr(self, '_internal_dict') else {} config_dict['_class_name'] = self.__class__.__name__ config_dict['_diffusers_version'] = __version__ def to_json_saveable(value): if isinstance(value, np.ndarray): value = value.tolist() elif isinstance(value, Path): value = value.as_posix() return value config_dict = {k: to_json_saveable(v) for (k, v) in config_dict.items()} config_dict.pop('_ignore_files', None) config_dict.pop('_use_default_values', None) return json.dumps(config_dict, indent=2, sort_keys=True) + '\n' def to_json_file(self, json_file_path: Union[str, os.PathLike]): with open(json_file_path, 'w', encoding='utf-8') as writer: writer.write(self.to_json_string()) def register_to_config(init): @functools.wraps(init) def inner_init(self, *args, **kwargs): init_kwargs = {k: v for (k, v) in kwargs.items() if not k.startswith('_')} config_init_kwargs = {k: v for (k, v) in kwargs.items() if k.startswith('_')} if not isinstance(self, ConfigMixin): raise RuntimeError(f'`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does not inherit from `ConfigMixin`.') ignore = getattr(self, 'ignore_for_config', []) new_kwargs = {} signature = inspect.signature(init) parameters = {name: p.default for (i, (name, p)) in enumerate(signature.parameters.items()) if i > 0 and name not in ignore} for (arg, name) in zip(args, parameters.keys()): new_kwargs[name] = arg new_kwargs.update({k: init_kwargs.get(k, default) for (k, default) in parameters.items() if k not in ignore and k not in new_kwargs}) if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0: new_kwargs['_use_default_values'] = list(set(new_kwargs.keys()) - set(init_kwargs)) new_kwargs = {**config_init_kwargs, **new_kwargs} getattr(self, 'register_to_config')(**new_kwargs) init(self, *args, **init_kwargs) return inner_init def flax_register_to_config(cls): original_init = cls.__init__ @functools.wraps(original_init) def init(self, *args, **kwargs): if not isinstance(self, ConfigMixin): raise RuntimeError(f'`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does not inherit from `ConfigMixin`.') init_kwargs = dict(kwargs.items()) fields = dataclasses.fields(self) default_kwargs = {} for field in fields: if field.name in self._flax_internal_args: continue if type(field.default) == dataclasses._MISSING_TYPE: default_kwargs[field.name] = None else: default_kwargs[field.name] = getattr(self, field.name) new_kwargs = {**default_kwargs, **init_kwargs} if 'dtype' in new_kwargs: new_kwargs.pop('dtype') for (i, arg) in enumerate(args): name = fields[i].name new_kwargs[name] = arg if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0: new_kwargs['_use_default_values'] = list(set(new_kwargs.keys()) - set(init_kwargs)) getattr(self, 'register_to_config')(**new_kwargs) original_init(self, *args, **kwargs) cls.__init__ = init return cls class LegacyConfigMixin(ConfigMixin): @classmethod def from_config(cls, config: Union[FrozenDict, Dict[str, Any]]=None, return_unused_kwargs=False, **kwargs): from .models.model_loading_utils import _fetch_remapped_cls_from_config remapped_class = _fetch_remapped_cls_from_config(config, cls) return remapped_class.from_config(config, return_unused_kwargs, **kwargs) # File: diffusers-main/src/diffusers/dependency_versions_check.py from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core pkgs_to_check_at_runtime = 'python requests filelock numpy'.split() for pkg in pkgs_to_check_at_runtime: if pkg in deps: require_version_core(deps[pkg]) else: raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def dep_version_check(pkg, hint=None): require_version(deps[pkg], hint) # File: diffusers-main/src/diffusers/dependency_versions_table.py deps = {'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.31.0', 'compel': 'compel==0.1.8', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.23.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark>=0.2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.4.1', 'jaxlib': 'jaxlib>=0.4.1', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'parameterized': 'parameterized', 'peft': 'peft>=0.6.0', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ruff': 'ruff==0.1.5', 'safetensors': 'safetensors>=0.3.1', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'GitPython': 'GitPython<3.1.19', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.41.2', 'urllib3': 'urllib3<=2.0.0', 'black': 'black'} # File: diffusers-main/src/diffusers/experimental/rl/value_guided_sampling.py import numpy as np import torch import tqdm from ...models.unets.unet_1d import UNet1DModel from ...pipelines import DiffusionPipeline from ...utils.dummy_pt_objects import DDPMScheduler from ...utils.torch_utils import randn_tensor class ValueGuidedRLPipeline(DiffusionPipeline): def __init__(self, value_function: UNet1DModel, unet: UNet1DModel, scheduler: DDPMScheduler, env): super().__init__() self.register_modules(value_function=value_function, unet=unet, scheduler=scheduler, env=env) self.data = env.get_dataset() self.means = {} for key in self.data.keys(): try: self.means[key] = self.data[key].mean() except: pass self.stds = {} for key in self.data.keys(): try: self.stds[key] = self.data[key].std() except: pass self.state_dim = env.observation_space.shape[0] self.action_dim = env.action_space.shape[0] def normalize(self, x_in, key): return (x_in - self.means[key]) / self.stds[key] def de_normalize(self, x_in, key): return x_in * self.stds[key] + self.means[key] def to_torch(self, x_in): if isinstance(x_in, dict): return {k: self.to_torch(v) for (k, v) in x_in.items()} elif torch.is_tensor(x_in): return x_in.to(self.unet.device) return torch.tensor(x_in, device=self.unet.device) def reset_x0(self, x_in, cond, act_dim): for (key, val) in cond.items(): x_in[:, key, act_dim:] = val.clone() return x_in def run_diffusion(self, x, conditions, n_guide_steps, scale): batch_size = x.shape[0] y = None for i in tqdm.tqdm(self.scheduler.timesteps): timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) for _ in range(n_guide_steps): with torch.enable_grad(): x.requires_grad_() y = self.value_function(x.permute(0, 2, 1), timesteps).sample grad = torch.autograd.grad([y.sum()], [x])[0] posterior_variance = self.scheduler._get_variance(i) model_std = torch.exp(0.5 * posterior_variance) grad = model_std * grad grad[timesteps < 2] = 0 x = x.detach() x = x + scale * grad x = self.reset_x0(x, conditions, self.action_dim) prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) x = self.scheduler.step(prev_x, i, x)['prev_sample'] x = self.reset_x0(x, conditions, self.action_dim) x = self.to_torch(x) return (x, y) def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): obs = self.normalize(obs, 'observations') obs = obs[None].repeat(batch_size, axis=0) conditions = {0: self.to_torch(obs)} shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) x1 = randn_tensor(shape, device=self.unet.device) x = self.reset_x0(x1, conditions, self.action_dim) x = self.to_torch(x) (x, y) = self.run_diffusion(x, conditions, n_guide_steps, scale) sorted_idx = y.argsort(0, descending=True).squeeze() sorted_values = x[sorted_idx] actions = sorted_values[:, :, :self.action_dim] actions = actions.detach().cpu().numpy() denorm_actions = self.de_normalize(actions, key='actions') if y is not None: selected_index = 0 else: selected_index = np.random.randint(0, batch_size) denorm_actions = denorm_actions[selected_index, 0] return denorm_actions # File: diffusers-main/src/diffusers/image_processor.py import math import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from PIL import Image, ImageFilter, ImageOps from .configuration_utils import ConfigMixin, register_to_config from .utils import CONFIG_NAME, PIL_INTERPOLATION, deprecate PipelineImageInput = Union[PIL.Image.Image, np.ndarray, torch.Tensor, List[PIL.Image.Image], List[np.ndarray], List[torch.Tensor]] PipelineDepthInput = PipelineImageInput def is_valid_image(image): return isinstance(image, PIL.Image.Image) or (isinstance(image, (np.ndarray, torch.Tensor)) and image.ndim in (2, 3)) def is_valid_image_imagelist(images): if isinstance(images, (np.ndarray, torch.Tensor)) and images.ndim == 4: return True elif is_valid_image(images): return True elif isinstance(images, list): return all((is_valid_image(image) for image in images)) return False class VaeImageProcessor(ConfigMixin): config_name = CONFIG_NAME @register_to_config def __init__(self, do_resize: bool=True, vae_scale_factor: int=8, vae_latent_channels: int=4, resample: str='lanczos', do_normalize: bool=True, do_binarize: bool=False, do_convert_rgb: bool=False, do_convert_grayscale: bool=False): super().__init__() if do_convert_rgb and do_convert_grayscale: raise ValueError('`do_convert_rgb` and `do_convert_grayscale` can not both be set to `True`, if you intended to convert the image into RGB format, please set `do_convert_grayscale = False`.', ' if you intended to convert the image into grayscale format, please set `do_convert_rgb = False`') @staticmethod def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]: if images.ndim == 3: images = images[None, ...] images = (images * 255).round().astype('uint8') if images.shape[-1] == 1: pil_images = [Image.fromarray(image.squeeze(), mode='L') for image in images] else: pil_images = [Image.fromarray(image) for image in images] return pil_images @staticmethod def pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray: if not isinstance(images, list): images = [images] images = [np.array(image).astype(np.float32) / 255.0 for image in images] images = np.stack(images, axis=0) return images @staticmethod def numpy_to_pt(images: np.ndarray) -> torch.Tensor: if images.ndim == 3: images = images[..., None] images = torch.from_numpy(images.transpose(0, 3, 1, 2)) return images @staticmethod def pt_to_numpy(images: torch.Tensor) -> np.ndarray: images = images.cpu().permute(0, 2, 3, 1).float().numpy() return images @staticmethod def normalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]: return 2.0 * images - 1.0 @staticmethod def denormalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]: return (images / 2 + 0.5).clamp(0, 1) @staticmethod def convert_to_rgb(image: PIL.Image.Image) -> PIL.Image.Image: image = image.convert('RGB') return image @staticmethod def convert_to_grayscale(image: PIL.Image.Image) -> PIL.Image.Image: image = image.convert('L') return image @staticmethod def blur(image: PIL.Image.Image, blur_factor: int=4) -> PIL.Image.Image: image = image.filter(ImageFilter.GaussianBlur(blur_factor)) return image @staticmethod def get_crop_region(mask_image: PIL.Image.Image, width: int, height: int, pad=0): mask_image = mask_image.convert('L') mask = np.array(mask_image) (h, w) = mask.shape crop_left = 0 for i in range(w): if not (mask[:, i] == 0).all(): break crop_left += 1 crop_right = 0 for i in reversed(range(w)): if not (mask[:, i] == 0).all(): break crop_right += 1 crop_top = 0 for i in range(h): if not (mask[i] == 0).all(): break crop_top += 1 crop_bottom = 0 for i in reversed(range(h)): if not (mask[i] == 0).all(): break crop_bottom += 1 (x1, y1, x2, y2) = (int(max(crop_left - pad, 0)), int(max(crop_top - pad, 0)), int(min(w - crop_right + pad, w)), int(min(h - crop_bottom + pad, h))) ratio_crop_region = (x2 - x1) / (y2 - y1) ratio_processing = width / height if ratio_crop_region > ratio_processing: desired_height = (x2 - x1) / ratio_processing desired_height_diff = int(desired_height - (y2 - y1)) y1 -= desired_height_diff // 2 y2 += desired_height_diff - desired_height_diff // 2 if y2 >= mask_image.height: diff = y2 - mask_image.height y2 -= diff y1 -= diff if y1 < 0: y2 -= y1 y1 -= y1 if y2 >= mask_image.height: y2 = mask_image.height else: desired_width = (y2 - y1) * ratio_processing desired_width_diff = int(desired_width - (x2 - x1)) x1 -= desired_width_diff // 2 x2 += desired_width_diff - desired_width_diff // 2 if x2 >= mask_image.width: diff = x2 - mask_image.width x2 -= diff x1 -= diff if x1 < 0: x2 -= x1 x1 -= x1 if x2 >= mask_image.width: x2 = mask_image.width return (x1, y1, x2, y2) def _resize_and_fill(self, image: PIL.Image.Image, width: int, height: int) -> PIL.Image.Image: ratio = width / height src_ratio = image.width / image.height src_w = width if ratio < src_ratio else image.width * height // image.height src_h = height if ratio >= src_ratio else image.height * width // image.width resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION['lanczos']) res = Image.new('RGB', (width, height)) res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2)) if ratio < src_ratio: fill_height = height // 2 - src_h // 2 if fill_height > 0: res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0)) res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h)) elif ratio > src_ratio: fill_width = width // 2 - src_w // 2 if fill_width > 0: res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0)) res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0)) return res def _resize_and_crop(self, image: PIL.Image.Image, width: int, height: int) -> PIL.Image.Image: ratio = width / height src_ratio = image.width / image.height src_w = width if ratio > src_ratio else image.width * height // image.height src_h = height if ratio <= src_ratio else image.height * width // image.width resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION['lanczos']) res = Image.new('RGB', (width, height)) res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2)) return res def resize(self, image: Union[PIL.Image.Image, np.ndarray, torch.Tensor], height: int, width: int, resize_mode: str='default') -> Union[PIL.Image.Image, np.ndarray, torch.Tensor]: if resize_mode != 'default' and (not isinstance(image, PIL.Image.Image)): raise ValueError(f'Only PIL image input is supported for resize_mode {resize_mode}') if isinstance(image, PIL.Image.Image): if resize_mode == 'default': image = image.resize((width, height), resample=PIL_INTERPOLATION[self.config.resample]) elif resize_mode == 'fill': image = self._resize_and_fill(image, width, height) elif resize_mode == 'crop': image = self._resize_and_crop(image, width, height) else: raise ValueError(f'resize_mode {resize_mode} is not supported') elif isinstance(image, torch.Tensor): image = torch.nn.functional.interpolate(image, size=(height, width)) elif isinstance(image, np.ndarray): image = self.numpy_to_pt(image) image = torch.nn.functional.interpolate(image, size=(height, width)) image = self.pt_to_numpy(image) return image def binarize(self, image: PIL.Image.Image) -> PIL.Image.Image: image[image < 0.5] = 0 image[image >= 0.5] = 1 return image def get_default_height_width(self, image: Union[PIL.Image.Image, np.ndarray, torch.Tensor], height: Optional[int]=None, width: Optional[int]=None) -> Tuple[int, int]: if height is None: if isinstance(image, PIL.Image.Image): height = image.height elif isinstance(image, torch.Tensor): height = image.shape[2] else: height = image.shape[1] if width is None: if isinstance(image, PIL.Image.Image): width = image.width elif isinstance(image, torch.Tensor): width = image.shape[3] else: width = image.shape[2] (width, height) = (x - x % self.config.vae_scale_factor for x in (width, height)) return (height, width) def preprocess(self, image: PipelineImageInput, height: Optional[int]=None, width: Optional[int]=None, resize_mode: str='default', crops_coords: Optional[Tuple[int, int, int, int]]=None) -> torch.Tensor: supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor) if self.config.do_convert_grayscale and isinstance(image, (torch.Tensor, np.ndarray)) and (image.ndim == 3): if isinstance(image, torch.Tensor): image = image.unsqueeze(1) elif image.shape[-1] == 1: image = np.expand_dims(image, axis=0) else: image = np.expand_dims(image, axis=-1) if isinstance(image, list) and isinstance(image[0], np.ndarray) and (image[0].ndim == 4): warnings.warn('Passing `image` as a list of 4d np.ndarray is deprecated.Please concatenate the list along the batch dimension and pass it as a single 4d np.ndarray', FutureWarning) image = np.concatenate(image, axis=0) if isinstance(image, list) and isinstance(image[0], torch.Tensor) and (image[0].ndim == 4): warnings.warn('Passing `image` as a list of 4d torch.Tensor is deprecated.Please concatenate the list along the batch dimension and pass it as a single 4d torch.Tensor', FutureWarning) image = torch.cat(image, axis=0) if not is_valid_image_imagelist(image): raise ValueError(f"Input is in incorrect format. Currently, we only support {', '.join((str(x) for x in supported_formats))}") if not isinstance(image, list): image = [image] if isinstance(image[0], PIL.Image.Image): if crops_coords is not None: image = [i.crop(crops_coords) for i in image] if self.config.do_resize: (height, width) = self.get_default_height_width(image[0], height, width) image = [self.resize(i, height, width, resize_mode=resize_mode) for i in image] if self.config.do_convert_rgb: image = [self.convert_to_rgb(i) for i in image] elif self.config.do_convert_grayscale: image = [self.convert_to_grayscale(i) for i in image] image = self.pil_to_numpy(image) image = self.numpy_to_pt(image) elif isinstance(image[0], np.ndarray): image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) image = self.numpy_to_pt(image) (height, width) = self.get_default_height_width(image, height, width) if self.config.do_resize: image = self.resize(image, height, width) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) if self.config.do_convert_grayscale and image.ndim == 3: image = image.unsqueeze(1) channel = image.shape[1] if channel == self.config.vae_latent_channels: return image (height, width) = self.get_default_height_width(image, height, width) if self.config.do_resize: image = self.resize(image, height, width) do_normalize = self.config.do_normalize if do_normalize and image.min() < 0: warnings.warn(f'Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] when passing as pytorch tensor or numpy Array. You passed `image` with value range [{image.min()},{image.max()}]', FutureWarning) do_normalize = False if do_normalize: image = self.normalize(image) if self.config.do_binarize: image = self.binarize(image) return image def postprocess(self, image: torch.Tensor, output_type: str='pil', do_denormalize: Optional[List[bool]]=None) -> Union[PIL.Image.Image, np.ndarray, torch.Tensor]: if not isinstance(image, torch.Tensor): raise ValueError(f'Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor') if output_type not in ['latent', 'pt', 'np', 'pil']: deprecation_message = f'the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: `pil`, `np`, `pt`, `latent`' deprecate('Unsupported output_type', '1.0.0', deprecation_message, standard_warn=False) output_type = 'np' if output_type == 'latent': return image if do_denormalize is None: do_denormalize = [self.config.do_normalize] * image.shape[0] image = torch.stack([self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])]) if output_type == 'pt': return image image = self.pt_to_numpy(image) if output_type == 'np': return image if output_type == 'pil': return self.numpy_to_pil(image) def apply_overlay(self, mask: PIL.Image.Image, init_image: PIL.Image.Image, image: PIL.Image.Image, crop_coords: Optional[Tuple[int, int, int, int]]=None) -> PIL.Image.Image: (width, height) = (image.width, image.height) init_image = self.resize(init_image, width=width, height=height) mask = self.resize(mask, width=width, height=height) init_image_masked = PIL.Image.new('RGBa', (width, height)) init_image_masked.paste(init_image.convert('RGBA').convert('RGBa'), mask=ImageOps.invert(mask.convert('L'))) init_image_masked = init_image_masked.convert('RGBA') if crop_coords is not None: (x, y, x2, y2) = crop_coords w = x2 - x h = y2 - y base_image = PIL.Image.new('RGBA', (width, height)) image = self.resize(image, height=h, width=w, resize_mode='crop') base_image.paste(image, (x, y)) image = base_image.convert('RGB') image = image.convert('RGBA') image.alpha_composite(init_image_masked) image = image.convert('RGB') return image class VaeImageProcessorLDM3D(VaeImageProcessor): config_name = CONFIG_NAME @register_to_config def __init__(self, do_resize: bool=True, vae_scale_factor: int=8, resample: str='lanczos', do_normalize: bool=True): super().__init__() @staticmethod def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]: if images.ndim == 3: images = images[None, ...] images = (images * 255).round().astype('uint8') if images.shape[-1] == 1: pil_images = [Image.fromarray(image.squeeze(), mode='L') for image in images] else: pil_images = [Image.fromarray(image[:, :, :3]) for image in images] return pil_images @staticmethod def depth_pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray: if not isinstance(images, list): images = [images] images = [np.array(image).astype(np.float32) / (2 ** 16 - 1) for image in images] images = np.stack(images, axis=0) return images @staticmethod def rgblike_to_depthmap(image: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]: return image[:, :, 1] * 2 ** 8 + image[:, :, 2] def numpy_to_depth(self, images: np.ndarray) -> List[PIL.Image.Image]: if images.ndim == 3: images = images[None, ...] images_depth = images[:, :, :, 3:] if images.shape[-1] == 6: images_depth = (images_depth * 255).round().astype('uint8') pil_images = [Image.fromarray(self.rgblike_to_depthmap(image_depth), mode='I;16') for image_depth in images_depth] elif images.shape[-1] == 4: images_depth = (images_depth * 65535.0).astype(np.uint16) pil_images = [Image.fromarray(image_depth, mode='I;16') for image_depth in images_depth] else: raise Exception('Not supported') return pil_images def postprocess(self, image: torch.Tensor, output_type: str='pil', do_denormalize: Optional[List[bool]]=None) -> Union[PIL.Image.Image, np.ndarray, torch.Tensor]: if not isinstance(image, torch.Tensor): raise ValueError(f'Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor') if output_type not in ['latent', 'pt', 'np', 'pil']: deprecation_message = f'the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: `pil`, `np`, `pt`, `latent`' deprecate('Unsupported output_type', '1.0.0', deprecation_message, standard_warn=False) output_type = 'np' if do_denormalize is None: do_denormalize = [self.config.do_normalize] * image.shape[0] image = torch.stack([self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])]) image = self.pt_to_numpy(image) if output_type == 'np': if image.shape[-1] == 6: image_depth = np.stack([self.rgblike_to_depthmap(im[:, :, 3:]) for im in image], axis=0) else: image_depth = image[:, :, :, 3:] return (image[:, :, :, :3], image_depth) if output_type == 'pil': return (self.numpy_to_pil(image), self.numpy_to_depth(image)) else: raise Exception(f'This type {output_type} is not supported') def preprocess(self, rgb: Union[torch.Tensor, PIL.Image.Image, np.ndarray], depth: Union[torch.Tensor, PIL.Image.Image, np.ndarray], height: Optional[int]=None, width: Optional[int]=None, target_res: Optional[int]=None) -> torch.Tensor: supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor) if self.config.do_convert_grayscale and isinstance(rgb, (torch.Tensor, np.ndarray)) and (rgb.ndim == 3): raise Exception('This is not yet supported') if isinstance(rgb, supported_formats): rgb = [rgb] depth = [depth] elif not (isinstance(rgb, list) and all((isinstance(i, supported_formats) for i in rgb))): raise ValueError(f"Input is in incorrect format: {[type(i) for i in rgb]}. Currently, we only support {', '.join(supported_formats)}") if isinstance(rgb[0], PIL.Image.Image): if self.config.do_convert_rgb: raise Exception('This is not yet supported') if self.config.do_resize or target_res: (height, width) = self.get_default_height_width(rgb[0], height, width) if not target_res else target_res rgb = [self.resize(i, height, width) for i in rgb] depth = [self.resize(i, height, width) for i in depth] rgb = self.pil_to_numpy(rgb) rgb = self.numpy_to_pt(rgb) depth = self.depth_pil_to_numpy(depth) depth = self.numpy_to_pt(depth) elif isinstance(rgb[0], np.ndarray): rgb = np.concatenate(rgb, axis=0) if rgb[0].ndim == 4 else np.stack(rgb, axis=0) rgb = self.numpy_to_pt(rgb) (height, width) = self.get_default_height_width(rgb, height, width) if self.config.do_resize: rgb = self.resize(rgb, height, width) depth = np.concatenate(depth, axis=0) if rgb[0].ndim == 4 else np.stack(depth, axis=0) depth = self.numpy_to_pt(depth) (height, width) = self.get_default_height_width(depth, height, width) if self.config.do_resize: depth = self.resize(depth, height, width) elif isinstance(rgb[0], torch.Tensor): raise Exception('This is not yet supported') do_normalize = self.config.do_normalize if rgb.min() < 0 and do_normalize: warnings.warn(f'Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] when passing as pytorch tensor or numpy Array. You passed `image` with value range [{rgb.min()},{rgb.max()}]', FutureWarning) do_normalize = False if do_normalize: rgb = self.normalize(rgb) depth = self.normalize(depth) if self.config.do_binarize: rgb = self.binarize(rgb) depth = self.binarize(depth) return (rgb, depth) class IPAdapterMaskProcessor(VaeImageProcessor): config_name = CONFIG_NAME @register_to_config def __init__(self, do_resize: bool=True, vae_scale_factor: int=8, resample: str='lanczos', do_normalize: bool=False, do_binarize: bool=True, do_convert_grayscale: bool=True): super().__init__(do_resize=do_resize, vae_scale_factor=vae_scale_factor, resample=resample, do_normalize=do_normalize, do_binarize=do_binarize, do_convert_grayscale=do_convert_grayscale) @staticmethod def downsample(mask: torch.Tensor, batch_size: int, num_queries: int, value_embed_dim: int): o_h = mask.shape[1] o_w = mask.shape[2] ratio = o_w / o_h mask_h = int(math.sqrt(num_queries / ratio)) mask_h = int(mask_h) + int(num_queries % int(mask_h) != 0) mask_w = num_queries // mask_h mask_downsample = F.interpolate(mask.unsqueeze(0), size=(mask_h, mask_w), mode='bicubic').squeeze(0) if mask_downsample.shape[0] < batch_size: mask_downsample = mask_downsample.repeat(batch_size, 1, 1) mask_downsample = mask_downsample.view(mask_downsample.shape[0], -1) downsampled_area = mask_h * mask_w if downsampled_area < num_queries: warnings.warn('The aspect ratio of the mask does not match the aspect ratio of the output image. Please update your masks or adjust the output size for optimal performance.', UserWarning) mask_downsample = F.pad(mask_downsample, (0, num_queries - mask_downsample.shape[1]), value=0.0) if downsampled_area > num_queries: warnings.warn('The aspect ratio of the mask does not match the aspect ratio of the output image. Please update your masks or adjust the output size for optimal performance.', UserWarning) mask_downsample = mask_downsample[:, :num_queries] mask_downsample = mask_downsample.view(mask_downsample.shape[0], mask_downsample.shape[1], 1).repeat(1, 1, value_embed_dim) return mask_downsample class PixArtImageProcessor(VaeImageProcessor): @register_to_config def __init__(self, do_resize: bool=True, vae_scale_factor: int=8, resample: str='lanczos', do_normalize: bool=True, do_binarize: bool=False, do_convert_grayscale: bool=False): super().__init__(do_resize=do_resize, vae_scale_factor=vae_scale_factor, resample=resample, do_normalize=do_normalize, do_binarize=do_binarize, do_convert_grayscale=do_convert_grayscale) @staticmethod def classify_height_width_bin(height: int, width: int, ratios: dict) -> Tuple[int, int]: ar = float(height / width) closest_ratio = min(ratios.keys(), key=lambda ratio: abs(float(ratio) - ar)) default_hw = ratios[closest_ratio] return (int(default_hw[0]), int(default_hw[1])) @staticmethod def resize_and_crop_tensor(samples: torch.Tensor, new_width: int, new_height: int) -> torch.Tensor: (orig_height, orig_width) = (samples.shape[2], samples.shape[3]) if orig_height != new_height or orig_width != new_width: ratio = max(new_height / orig_height, new_width / orig_width) resized_width = int(orig_width * ratio) resized_height = int(orig_height * ratio) samples = F.interpolate(samples, size=(resized_height, resized_width), mode='bilinear', align_corners=False) start_x = (resized_width - new_width) // 2 end_x = start_x + new_width start_y = (resized_height - new_height) // 2 end_y = start_y + new_height samples = samples[:, :, start_y:end_y, start_x:end_x] return samples # File: diffusers-main/src/diffusers/loaders/__init__.py from typing import TYPE_CHECKING from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available def text_encoder_lora_state_dict(text_encoder): deprecate('text_encoder_load_state_dict in `models`', '0.27.0', '`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.') state_dict = {} for (name, module) in text_encoder_attn_modules(text_encoder): for (k, v) in module.q_proj.lora_linear_layer.state_dict().items(): state_dict[f'{name}.q_proj.lora_linear_layer.{k}'] = v for (k, v) in module.k_proj.lora_linear_layer.state_dict().items(): state_dict[f'{name}.k_proj.lora_linear_layer.{k}'] = v for (k, v) in module.v_proj.lora_linear_layer.state_dict().items(): state_dict[f'{name}.v_proj.lora_linear_layer.{k}'] = v for (k, v) in module.out_proj.lora_linear_layer.state_dict().items(): state_dict[f'{name}.out_proj.lora_linear_layer.{k}'] = v return state_dict if is_transformers_available(): def text_encoder_attn_modules(text_encoder): deprecate('text_encoder_attn_modules in `models`', '0.27.0', '`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.') from transformers import CLIPTextModel, CLIPTextModelWithProjection attn_modules = [] if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)): for (i, layer) in enumerate(text_encoder.text_model.encoder.layers): name = f'text_model.encoder.layers.{i}.self_attn' mod = layer.self_attn attn_modules.append((name, mod)) else: raise ValueError(f'do not know how to get attention modules for: {text_encoder.__class__.__name__}') return attn_modules _import_structure = {} if is_torch_available(): _import_structure['single_file_model'] = ['FromOriginalModelMixin'] _import_structure['unet'] = ['UNet2DConditionLoadersMixin'] _import_structure['utils'] = ['AttnProcsLayers'] if is_transformers_available(): _import_structure['single_file'] = ['FromSingleFileMixin'] _import_structure['lora_pipeline'] = ['AmusedLoraLoaderMixin', 'StableDiffusionLoraLoaderMixin', 'SD3LoraLoaderMixin', 'StableDiffusionXLLoraLoaderMixin', 'LoraLoaderMixin', 'FluxLoraLoaderMixin'] _import_structure['textual_inversion'] = ['TextualInversionLoaderMixin'] _import_structure['ip_adapter'] = ['IPAdapterMixin'] _import_structure['peft'] = ['PeftAdapterMixin'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if is_torch_available(): from .single_file_model import FromOriginalModelMixin from .unet import UNet2DConditionLoadersMixin from .utils import AttnProcsLayers if is_transformers_available(): from .ip_adapter import IPAdapterMixin from .lora_pipeline import AmusedLoraLoaderMixin, FluxLoraLoaderMixin, LoraLoaderMixin, SD3LoraLoaderMixin, StableDiffusionLoraLoaderMixin, StableDiffusionXLLoraLoaderMixin from .single_file import FromSingleFileMixin from .textual_inversion import TextualInversionLoaderMixin from .peft import PeftAdapterMixin else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: diffusers-main/src/diffusers/loaders/ip_adapter.py from pathlib import Path from typing import Dict, List, Optional, Union import torch import torch.nn.functional as F from huggingface_hub.utils import validate_hf_hub_args from safetensors import safe_open from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_state_dict from ..utils import USE_PEFT_BACKEND, _get_model_file, is_accelerate_available, is_torch_version, is_transformers_available, logging from .unet_loader_utils import _maybe_expand_lora_scales if is_transformers_available(): from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from ..models.attention_processor import AttnProcessor, AttnProcessor2_0, IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0 logger = logging.get_logger(__name__) class IPAdapterMixin: @validate_hf_hub_args def load_ip_adapter(self, pretrained_model_name_or_path_or_dict: Union[str, List[str], Dict[str, torch.Tensor]], subfolder: Union[str, List[str]], weight_name: Union[str, List[str]], image_encoder_folder: Optional[str]='image_encoder', **kwargs): if not isinstance(weight_name, list): weight_name = [weight_name] if not isinstance(pretrained_model_name_or_path_or_dict, list): pretrained_model_name_or_path_or_dict = [pretrained_model_name_or_path_or_dict] if len(pretrained_model_name_or_path_or_dict) == 1: pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict * len(weight_name) if not isinstance(subfolder, list): subfolder = [subfolder] if len(subfolder) == 1: subfolder = subfolder * len(weight_name) if len(weight_name) != len(pretrained_model_name_or_path_or_dict): raise ValueError('`weight_name` and `pretrained_model_name_or_path_or_dict` must have the same length.') if len(weight_name) != len(subfolder): raise ValueError('`weight_name` and `subfolder` must have the same length.') cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) local_files_only = kwargs.pop('local_files_only', None) token = kwargs.pop('token', None) revision = kwargs.pop('revision', None) low_cpu_mem_usage = kwargs.pop('low_cpu_mem_usage', _LOW_CPU_MEM_USAGE_DEFAULT) if low_cpu_mem_usage and (not is_accelerate_available()): low_cpu_mem_usage = False logger.warning('Cannot initialize model with low cpu memory usage because `accelerate` was not found in the environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip install accelerate\n```\n.') if low_cpu_mem_usage is True and (not is_torch_version('>=', '1.9.0')): raise NotImplementedError('Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set `low_cpu_mem_usage=False`.') user_agent = {'file_type': 'attn_procs_weights', 'framework': 'pytorch'} state_dicts = [] for (pretrained_model_name_or_path_or_dict, weight_name, subfolder) in zip(pretrained_model_name_or_path_or_dict, weight_name, subfolder): if not isinstance(pretrained_model_name_or_path_or_dict, dict): model_file = _get_model_file(pretrained_model_name_or_path_or_dict, weights_name=weight_name, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent) if weight_name.endswith('.safetensors'): state_dict = {'image_proj': {}, 'ip_adapter': {}} with safe_open(model_file, framework='pt', device='cpu') as f: for key in f.keys(): if key.startswith('image_proj.'): state_dict['image_proj'][key.replace('image_proj.', '')] = f.get_tensor(key) elif key.startswith('ip_adapter.'): state_dict['ip_adapter'][key.replace('ip_adapter.', '')] = f.get_tensor(key) else: state_dict = load_state_dict(model_file) else: state_dict = pretrained_model_name_or_path_or_dict keys = list(state_dict.keys()) if keys != ['image_proj', 'ip_adapter']: raise ValueError('Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.') state_dicts.append(state_dict) if hasattr(self, 'image_encoder') and getattr(self, 'image_encoder', None) is None: if image_encoder_folder is not None: if not isinstance(pretrained_model_name_or_path_or_dict, dict): logger.info(f'loading image_encoder from {pretrained_model_name_or_path_or_dict}') if image_encoder_folder.count('/') == 0: image_encoder_subfolder = Path(subfolder, image_encoder_folder).as_posix() else: image_encoder_subfolder = Path(image_encoder_folder).as_posix() image_encoder = CLIPVisionModelWithProjection.from_pretrained(pretrained_model_name_or_path_or_dict, subfolder=image_encoder_subfolder, low_cpu_mem_usage=low_cpu_mem_usage, cache_dir=cache_dir, local_files_only=local_files_only).to(self.device, dtype=self.dtype) self.register_modules(image_encoder=image_encoder) else: raise ValueError('`image_encoder` cannot be loaded because `pretrained_model_name_or_path_or_dict` is a state dict.') else: logger.warning('image_encoder is not loaded since `image_encoder_folder=None` passed. You will not be able to use `ip_adapter_image` when calling the pipeline with IP-Adapter.Use `ip_adapter_image_embeds` to pass pre-generated image embedding instead.') if hasattr(self, 'feature_extractor') and getattr(self, 'feature_extractor', None) is None: default_clip_size = 224 clip_image_size = self.image_encoder.config.image_size if self.image_encoder is not None else default_clip_size feature_extractor = CLIPImageProcessor(size=clip_image_size, crop_size=clip_image_size) self.register_modules(feature_extractor=feature_extractor) unet = getattr(self, self.unet_name) if not hasattr(self, 'unet') else self.unet unet._load_ip_adapter_weights(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage) extra_loras = unet._load_ip_adapter_loras(state_dicts) if extra_loras != {}: if not USE_PEFT_BACKEND: logger.warning('PEFT backend is required to load these weights.') else: peft_config = getattr(unet, 'peft_config', {}) for (k, lora) in extra_loras.items(): if f'faceid_{k}' not in peft_config: self.load_lora_weights(lora, adapter_name=f'faceid_{k}') self.set_adapters([f'faceid_{k}'], adapter_weights=[1.0]) def set_ip_adapter_scale(self, scale): unet = getattr(self, self.unet_name) if not hasattr(self, 'unet') else self.unet if not isinstance(scale, list): scale = [scale] scale_configs = _maybe_expand_lora_scales(unet, scale, default_scale=0.0) for (attn_name, attn_processor) in unet.attn_processors.items(): if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)): if len(scale_configs) != len(attn_processor.scale): raise ValueError(f'Cannot assign {len(scale_configs)} scale_configs to {len(attn_processor.scale)} IP-Adapter.') elif len(scale_configs) == 1: scale_configs = scale_configs * len(attn_processor.scale) for (i, scale_config) in enumerate(scale_configs): if isinstance(scale_config, dict): for (k, s) in scale_config.items(): if attn_name.startswith(k): attn_processor.scale[i] = s else: attn_processor.scale[i] = scale_config def unload_ip_adapter(self): if hasattr(self, 'image_encoder') and getattr(self, 'image_encoder', None) is not None: self.image_encoder = None self.register_to_config(image_encoder=[None, None]) if not hasattr(self, 'safety_checker'): if hasattr(self, 'feature_extractor') and getattr(self, 'feature_extractor', None) is not None: self.feature_extractor = None self.register_to_config(feature_extractor=[None, None]) self.unet.encoder_hid_proj = None self.unet.config.encoder_hid_dim_type = None if hasattr(self.unet, 'text_encoder_hid_proj') and self.unet.text_encoder_hid_proj is not None: self.unet.encoder_hid_proj = self.unet.text_encoder_hid_proj self.unet.text_encoder_hid_proj = None self.unet.config.encoder_hid_dim_type = 'text_proj' attn_procs = {} for (name, value) in self.unet.attn_processors.items(): attn_processor_class = AttnProcessor2_0() if hasattr(F, 'scaled_dot_product_attention') else AttnProcessor() attn_procs[name] = attn_processor_class if isinstance(value, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)) else value.__class__() self.unet.set_attn_processor(attn_procs) # File: diffusers-main/src/diffusers/loaders/lora_base.py import copy import inspect import os from pathlib import Path from typing import Callable, Dict, List, Optional, Union import safetensors import torch import torch.nn as nn from huggingface_hub import model_info from huggingface_hub.constants import HF_HUB_OFFLINE from ..models.modeling_utils import ModelMixin, load_state_dict from ..utils import USE_PEFT_BACKEND, _get_model_file, delete_adapter_layers, deprecate, is_accelerate_available, is_peft_available, is_transformers_available, logging, recurse_remove_peft_layers, set_adapter_layers, set_weights_and_activate_adapters if is_transformers_available(): from transformers import PreTrainedModel if is_peft_available(): from peft.tuners.tuners_utils import BaseTunerLayer if is_accelerate_available(): from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module logger = logging.get_logger(__name__) def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False, adapter_names=None): merge_kwargs = {'safe_merge': safe_fusing} for module in text_encoder.modules(): if isinstance(module, BaseTunerLayer): if lora_scale != 1.0: module.scale_layer(lora_scale) supported_merge_kwargs = list(inspect.signature(module.merge).parameters) if 'adapter_names' in supported_merge_kwargs: merge_kwargs['adapter_names'] = adapter_names elif 'adapter_names' not in supported_merge_kwargs and adapter_names is not None: raise ValueError('The `adapter_names` argument is not supported with your PEFT version. Please upgrade to the latest version of PEFT. `pip install -U peft`') module.merge(**merge_kwargs) def unfuse_text_encoder_lora(text_encoder): for module in text_encoder.modules(): if isinstance(module, BaseTunerLayer): module.unmerge() def set_adapters_for_text_encoder(adapter_names: Union[List[str], str], text_encoder: Optional['PreTrainedModel']=None, text_encoder_weights: Optional[Union[float, List[float], List[None]]]=None): if text_encoder is None: raise ValueError('The pipeline does not have a default `pipe.text_encoder` class. Please make sure to pass a `text_encoder` instead.') def process_weights(adapter_names, weights): if not isinstance(weights, list): weights = [weights] * len(adapter_names) if len(adapter_names) != len(weights): raise ValueError(f'Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(weights)}') weights = [w if w is not None else 1.0 for w in weights] return weights adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names text_encoder_weights = process_weights(adapter_names, text_encoder_weights) set_weights_and_activate_adapters(text_encoder, adapter_names, text_encoder_weights) def disable_lora_for_text_encoder(text_encoder: Optional['PreTrainedModel']=None): if text_encoder is None: raise ValueError('Text Encoder not found.') set_adapter_layers(text_encoder, enabled=False) def enable_lora_for_text_encoder(text_encoder: Optional['PreTrainedModel']=None): if text_encoder is None: raise ValueError('Text Encoder not found.') set_adapter_layers(text_encoder, enabled=True) def _remove_text_encoder_monkey_patch(text_encoder): recurse_remove_peft_layers(text_encoder) if getattr(text_encoder, 'peft_config', None) is not None: del text_encoder.peft_config text_encoder._hf_peft_config_loaded = None class LoraBaseMixin: _lora_loadable_modules = [] num_fused_loras = 0 def load_lora_weights(self, **kwargs): raise NotImplementedError('`load_lora_weights()` is not implemented.') @classmethod def save_lora_weights(cls, **kwargs): raise NotImplementedError('`save_lora_weights()` not implemented.') @classmethod def lora_state_dict(cls, **kwargs): raise NotImplementedError('`lora_state_dict()` is not implemented.') @classmethod def _optionally_disable_offloading(cls, _pipeline): is_model_cpu_offload = False is_sequential_cpu_offload = False if _pipeline is not None and _pipeline.hf_device_map is None: for (_, component) in _pipeline.components.items(): if isinstance(component, nn.Module) and hasattr(component, '_hf_hook'): if not is_model_cpu_offload: is_model_cpu_offload = isinstance(component._hf_hook, CpuOffload) if not is_sequential_cpu_offload: is_sequential_cpu_offload = isinstance(component._hf_hook, AlignDevicesHook) or (hasattr(component._hf_hook, 'hooks') and isinstance(component._hf_hook.hooks[0], AlignDevicesHook)) logger.info('Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again.') remove_hook_from_module(component, recurse=is_sequential_cpu_offload) return (is_model_cpu_offload, is_sequential_cpu_offload) @classmethod def _fetch_state_dict(cls, pretrained_model_name_or_path_or_dict, weight_name, use_safetensors, local_files_only, cache_dir, force_download, proxies, token, revision, subfolder, user_agent, allow_pickle): from .lora_pipeline import LORA_WEIGHT_NAME, LORA_WEIGHT_NAME_SAFE model_file = None if not isinstance(pretrained_model_name_or_path_or_dict, dict): if use_safetensors and weight_name is None or (weight_name is not None and weight_name.endswith('.safetensors')): try: if weight_name is None: weight_name = cls._best_guess_weight_name(pretrained_model_name_or_path_or_dict, file_extension='.safetensors', local_files_only=local_files_only) model_file = _get_model_file(pretrained_model_name_or_path_or_dict, weights_name=weight_name or LORA_WEIGHT_NAME_SAFE, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent) state_dict = safetensors.torch.load_file(model_file, device='cpu') except (IOError, safetensors.SafetensorError) as e: if not allow_pickle: raise e model_file = None pass if model_file is None: if weight_name is None: weight_name = cls._best_guess_weight_name(pretrained_model_name_or_path_or_dict, file_extension='.bin', local_files_only=local_files_only) model_file = _get_model_file(pretrained_model_name_or_path_or_dict, weights_name=weight_name or LORA_WEIGHT_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent) state_dict = load_state_dict(model_file) else: state_dict = pretrained_model_name_or_path_or_dict return state_dict @classmethod def _best_guess_weight_name(cls, pretrained_model_name_or_path_or_dict, file_extension='.safetensors', local_files_only=False): from .lora_pipeline import LORA_WEIGHT_NAME, LORA_WEIGHT_NAME_SAFE if local_files_only or HF_HUB_OFFLINE: raise ValueError('When using the offline mode, you must specify a `weight_name`.') targeted_files = [] if os.path.isfile(pretrained_model_name_or_path_or_dict): return elif os.path.isdir(pretrained_model_name_or_path_or_dict): targeted_files = [f for f in os.listdir(pretrained_model_name_or_path_or_dict) if f.endswith(file_extension)] else: files_in_repo = model_info(pretrained_model_name_or_path_or_dict).siblings targeted_files = [f.rfilename for f in files_in_repo if f.rfilename.endswith(file_extension)] if len(targeted_files) == 0: return unallowed_substrings = {'scheduler', 'optimizer', 'checkpoint'} targeted_files = list(filter(lambda x: all((substring not in x for substring in unallowed_substrings)), targeted_files)) if any((f.endswith(LORA_WEIGHT_NAME) for f in targeted_files)): targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME), targeted_files)) elif any((f.endswith(LORA_WEIGHT_NAME_SAFE) for f in targeted_files)): targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME_SAFE), targeted_files)) if len(targeted_files) > 1: raise ValueError(f"Provided path contains more than one weights file in the {file_extension} format. Either specify `weight_name` in `load_lora_weights` or make sure there's only one `.safetensors` or `.bin` file in {pretrained_model_name_or_path_or_dict}.") weight_name = targeted_files[0] return weight_name def unload_lora_weights(self): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') for component in self._lora_loadable_modules: model = getattr(self, component, None) if model is not None: if issubclass(model.__class__, ModelMixin): model.unload_lora() elif issubclass(model.__class__, PreTrainedModel): _remove_text_encoder_monkey_patch(model) def fuse_lora(self, components: List[str]=[], lora_scale: float=1.0, safe_fusing: bool=False, adapter_names: Optional[List[str]]=None, **kwargs): if 'fuse_unet' in kwargs: depr_message = 'Passing `fuse_unet` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_unet` will be removed in a future version.' deprecate('fuse_unet', '1.0.0', depr_message) if 'fuse_transformer' in kwargs: depr_message = 'Passing `fuse_transformer` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_transformer` will be removed in a future version.' deprecate('fuse_transformer', '1.0.0', depr_message) if 'fuse_text_encoder' in kwargs: depr_message = 'Passing `fuse_text_encoder` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_text_encoder` will be removed in a future version.' deprecate('fuse_text_encoder', '1.0.0', depr_message) if len(components) == 0: raise ValueError('`components` cannot be an empty list.') for fuse_component in components: if fuse_component not in self._lora_loadable_modules: raise ValueError(f'{fuse_component} is not found in self._lora_loadable_modules={self._lora_loadable_modules!r}.') model = getattr(self, fuse_component, None) if model is not None: if issubclass(model.__class__, ModelMixin): model.fuse_lora(lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names) if issubclass(model.__class__, PreTrainedModel): fuse_text_encoder_lora(model, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names) self.num_fused_loras += 1 def unfuse_lora(self, components: List[str]=[], **kwargs): if 'unfuse_unet' in kwargs: depr_message = 'Passing `unfuse_unet` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_unet` will be removed in a future version.' deprecate('unfuse_unet', '1.0.0', depr_message) if 'unfuse_transformer' in kwargs: depr_message = 'Passing `unfuse_transformer` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_transformer` will be removed in a future version.' deprecate('unfuse_transformer', '1.0.0', depr_message) if 'unfuse_text_encoder' in kwargs: depr_message = 'Passing `unfuse_text_encoder` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_text_encoder` will be removed in a future version.' deprecate('unfuse_text_encoder', '1.0.0', depr_message) if len(components) == 0: raise ValueError('`components` cannot be an empty list.') for fuse_component in components: if fuse_component not in self._lora_loadable_modules: raise ValueError(f'{fuse_component} is not found in self._lora_loadable_modules={self._lora_loadable_modules!r}.') model = getattr(self, fuse_component, None) if model is not None: if issubclass(model.__class__, (ModelMixin, PreTrainedModel)): for module in model.modules(): if isinstance(module, BaseTunerLayer): module.unmerge() self.num_fused_loras -= 1 def set_adapters(self, adapter_names: Union[List[str], str], adapter_weights: Optional[Union[float, Dict, List[float], List[Dict]]]=None): adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names adapter_weights = copy.deepcopy(adapter_weights) if not isinstance(adapter_weights, list): adapter_weights = [adapter_weights] * len(adapter_names) if len(adapter_names) != len(adapter_weights): raise ValueError(f'Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(adapter_weights)}') list_adapters = self.get_list_adapters() all_adapters = {adapter for adapters in list_adapters.values() for adapter in adapters} invert_list_adapters = {adapter: [part for (part, adapters) in list_adapters.items() if adapter in adapters] for adapter in all_adapters} _component_adapter_weights = {} for component in self._lora_loadable_modules: model = getattr(self, component) for (adapter_name, weights) in zip(adapter_names, adapter_weights): if isinstance(weights, dict): component_adapter_weights = weights.pop(component, None) if component_adapter_weights is not None and (not hasattr(self, component)): logger.warning(f'Lora weight dict contains {component} weights but will be ignored because pipeline does not have {component}.') if component_adapter_weights is not None and component not in invert_list_adapters[adapter_name]: logger.warning(f"Lora weight dict for adapter '{adapter_name}' contains {component},but this will be ignored because {adapter_name} does not contain weights for {component}.Valid parts for {adapter_name} are: {invert_list_adapters[adapter_name]}.") else: component_adapter_weights = weights _component_adapter_weights.setdefault(component, []) _component_adapter_weights[component].append(component_adapter_weights) if issubclass(model.__class__, ModelMixin): model.set_adapters(adapter_names, _component_adapter_weights[component]) elif issubclass(model.__class__, PreTrainedModel): set_adapters_for_text_encoder(adapter_names, model, _component_adapter_weights[component]) def disable_lora(self): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') for component in self._lora_loadable_modules: model = getattr(self, component, None) if model is not None: if issubclass(model.__class__, ModelMixin): model.disable_lora() elif issubclass(model.__class__, PreTrainedModel): disable_lora_for_text_encoder(model) def enable_lora(self): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') for component in self._lora_loadable_modules: model = getattr(self, component, None) if model is not None: if issubclass(model.__class__, ModelMixin): model.enable_lora() elif issubclass(model.__class__, PreTrainedModel): enable_lora_for_text_encoder(model) def delete_adapters(self, adapter_names: Union[List[str], str]): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') if isinstance(adapter_names, str): adapter_names = [adapter_names] for component in self._lora_loadable_modules: model = getattr(self, component, None) if model is not None: if issubclass(model.__class__, ModelMixin): model.delete_adapters(adapter_names) elif issubclass(model.__class__, PreTrainedModel): for adapter_name in adapter_names: delete_adapter_layers(model, adapter_name) def get_active_adapters(self) -> List[str]: if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`') active_adapters = [] for component in self._lora_loadable_modules: model = getattr(self, component, None) if model is not None and issubclass(model.__class__, ModelMixin): for module in model.modules(): if isinstance(module, BaseTunerLayer): active_adapters = module.active_adapters break return active_adapters def get_list_adapters(self) -> Dict[str, List[str]]: if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`') set_adapters = {} for component in self._lora_loadable_modules: model = getattr(self, component, None) if model is not None and issubclass(model.__class__, (ModelMixin, PreTrainedModel)) and hasattr(model, 'peft_config'): set_adapters[component] = list(model.peft_config.keys()) return set_adapters def set_lora_device(self, adapter_names: List[str], device: Union[torch.device, str, int]) -> None: if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') for component in self._lora_loadable_modules: model = getattr(self, component, None) if model is not None: for module in model.modules(): if isinstance(module, BaseTunerLayer): for adapter_name in adapter_names: module.lora_A[adapter_name].to(device) module.lora_B[adapter_name].to(device) if hasattr(module, 'lora_magnitude_vector') and module.lora_magnitude_vector is not None: if adapter_name in module.lora_magnitude_vector: module.lora_magnitude_vector[adapter_name] = module.lora_magnitude_vector[adapter_name].to(device) @staticmethod def pack_weights(layers, prefix): layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers layers_state_dict = {f'{prefix}.{module_name}': param for (module_name, param) in layers_weights.items()} return layers_state_dict @staticmethod def write_lora_layers(state_dict: Dict[str, torch.Tensor], save_directory: str, is_main_process: bool, weight_name: str, save_function: Callable, safe_serialization: bool): from .lora_pipeline import LORA_WEIGHT_NAME, LORA_WEIGHT_NAME_SAFE if os.path.isfile(save_directory): logger.error(f'Provided path ({save_directory}) should be a directory, not a file') return if save_function is None: if safe_serialization: def save_function(weights, filename): return safetensors.torch.save_file(weights, filename, metadata={'format': 'pt'}) else: save_function = torch.save os.makedirs(save_directory, exist_ok=True) if weight_name is None: if safe_serialization: weight_name = LORA_WEIGHT_NAME_SAFE else: weight_name = LORA_WEIGHT_NAME save_path = Path(save_directory, weight_name).as_posix() save_function(state_dict, save_path) logger.info(f'Model weights saved in {save_path}') @property def lora_scale(self) -> float: return self._lora_scale if hasattr(self, '_lora_scale') else 1.0 # File: diffusers-main/src/diffusers/loaders/lora_conversion_utils.py import re import torch from ..utils import is_peft_version, logging logger = logging.get_logger(__name__) def _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config, delimiter='_', block_slice_pos=5): all_keys = list(state_dict.keys()) sgm_patterns = ['input_blocks', 'middle_block', 'output_blocks'] is_in_sgm_format = False for key in all_keys: if any((p in key for p in sgm_patterns)): is_in_sgm_format = True break if not is_in_sgm_format: return state_dict new_state_dict = {} inner_block_map = ['resnets', 'attentions', 'upsamplers'] (input_block_ids, middle_block_ids, output_block_ids) = (set(), set(), set()) for layer in all_keys: if 'text' in layer: new_state_dict[layer] = state_dict.pop(layer) else: layer_id = int(layer.split(delimiter)[:block_slice_pos][-1]) if sgm_patterns[0] in layer: input_block_ids.add(layer_id) elif sgm_patterns[1] in layer: middle_block_ids.add(layer_id) elif sgm_patterns[2] in layer: output_block_ids.add(layer_id) else: raise ValueError(f'Checkpoint not supported because layer {layer} not supported.') input_blocks = {layer_id: [key for key in state_dict if f'input_blocks{delimiter}{layer_id}' in key] for layer_id in input_block_ids} middle_blocks = {layer_id: [key for key in state_dict if f'middle_block{delimiter}{layer_id}' in key] for layer_id in middle_block_ids} output_blocks = {layer_id: [key for key in state_dict if f'output_blocks{delimiter}{layer_id}' in key] for layer_id in output_block_ids} for i in input_block_ids: block_id = (i - 1) // (unet_config.layers_per_block + 1) layer_in_block_id = (i - 1) % (unet_config.layers_per_block + 1) for key in input_blocks[i]: inner_block_id = int(key.split(delimiter)[block_slice_pos]) inner_block_key = inner_block_map[inner_block_id] if 'op' not in key else 'downsamplers' inner_layers_in_block = str(layer_in_block_id) if 'op' not in key else '0' new_key = delimiter.join(key.split(delimiter)[:block_slice_pos - 1] + [str(block_id), inner_block_key, inner_layers_in_block] + key.split(delimiter)[block_slice_pos + 1:]) new_state_dict[new_key] = state_dict.pop(key) for i in middle_block_ids: key_part = None if i == 0: key_part = [inner_block_map[0], '0'] elif i == 1: key_part = [inner_block_map[1], '0'] elif i == 2: key_part = [inner_block_map[0], '1'] else: raise ValueError(f'Invalid middle block id {i}.') for key in middle_blocks[i]: new_key = delimiter.join(key.split(delimiter)[:block_slice_pos - 1] + key_part + key.split(delimiter)[block_slice_pos:]) new_state_dict[new_key] = state_dict.pop(key) for i in output_block_ids: block_id = i // (unet_config.layers_per_block + 1) layer_in_block_id = i % (unet_config.layers_per_block + 1) for key in output_blocks[i]: inner_block_id = int(key.split(delimiter)[block_slice_pos]) inner_block_key = inner_block_map[inner_block_id] inner_layers_in_block = str(layer_in_block_id) if inner_block_id < 2 else '0' new_key = delimiter.join(key.split(delimiter)[:block_slice_pos - 1] + [str(block_id), inner_block_key, inner_layers_in_block] + key.split(delimiter)[block_slice_pos + 1:]) new_state_dict[new_key] = state_dict.pop(key) if len(state_dict) > 0: raise ValueError('At this point all state dict entries have to be converted.') return new_state_dict def _convert_non_diffusers_lora_to_diffusers(state_dict, unet_name='unet', text_encoder_name='text_encoder'): unet_state_dict = {} te_state_dict = {} te2_state_dict = {} network_alphas = {} dora_present_in_unet = any(('dora_scale' in k and 'lora_unet_' in k for k in state_dict)) dora_present_in_te = any(('dora_scale' in k and ('lora_te_' in k or 'lora_te1_' in k) for k in state_dict)) dora_present_in_te2 = any(('dora_scale' in k and 'lora_te2_' in k for k in state_dict)) if dora_present_in_unet or dora_present_in_te or dora_present_in_te2: if is_peft_version('<', '0.9.0'): raise ValueError('You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`.') all_lora_keys = list(state_dict.keys()) for key in all_lora_keys: if not key.endswith('lora_down.weight'): continue lora_name = key.split('.')[0] lora_name_up = lora_name + '.lora_up.weight' lora_name_alpha = lora_name + '.alpha' if lora_name.startswith('lora_unet_'): diffusers_name = _convert_unet_lora_key(key) unet_state_dict[diffusers_name] = state_dict.pop(key) unet_state_dict[diffusers_name.replace('.down.', '.up.')] = state_dict.pop(lora_name_up) if dora_present_in_unet: dora_scale_key_to_replace = '_lora.down.' if '_lora.down.' in diffusers_name else '.lora.down.' unet_state_dict[diffusers_name.replace(dora_scale_key_to_replace, '.lora_magnitude_vector.')] = state_dict.pop(key.replace('lora_down.weight', 'dora_scale')) elif lora_name.startswith(('lora_te_', 'lora_te1_', 'lora_te2_')): diffusers_name = _convert_text_encoder_lora_key(key, lora_name) if lora_name.startswith(('lora_te_', 'lora_te1_')): te_state_dict[diffusers_name] = state_dict.pop(key) te_state_dict[diffusers_name.replace('.down.', '.up.')] = state_dict.pop(lora_name_up) else: te2_state_dict[diffusers_name] = state_dict.pop(key) te2_state_dict[diffusers_name.replace('.down.', '.up.')] = state_dict.pop(lora_name_up) if dora_present_in_te or dora_present_in_te2: dora_scale_key_to_replace_te = '_lora.down.' if '_lora.down.' in diffusers_name else '.lora_linear_layer.' if lora_name.startswith(('lora_te_', 'lora_te1_')): te_state_dict[diffusers_name.replace(dora_scale_key_to_replace_te, '.lora_magnitude_vector.')] = state_dict.pop(key.replace('lora_down.weight', 'dora_scale')) elif lora_name.startswith('lora_te2_'): te2_state_dict[diffusers_name.replace(dora_scale_key_to_replace_te, '.lora_magnitude_vector.')] = state_dict.pop(key.replace('lora_down.weight', 'dora_scale')) if lora_name_alpha in state_dict: alpha = state_dict.pop(lora_name_alpha).item() network_alphas.update(_get_alpha_name(lora_name_alpha, diffusers_name, alpha)) if len(state_dict) > 0: raise ValueError(f"The following keys have not been correctly renamed: \n\n {', '.join(state_dict.keys())}") logger.info('Non-diffusers checkpoint detected.') unet_state_dict = {f'{unet_name}.{module_name}': params for (module_name, params) in unet_state_dict.items()} te_state_dict = {f'{text_encoder_name}.{module_name}': params for (module_name, params) in te_state_dict.items()} te2_state_dict = {f'text_encoder_2.{module_name}': params for (module_name, params) in te2_state_dict.items()} if len(te2_state_dict) > 0 else None if te2_state_dict is not None: te_state_dict.update(te2_state_dict) new_state_dict = {**unet_state_dict, **te_state_dict} return (new_state_dict, network_alphas) def _convert_unet_lora_key(key): diffusers_name = key.replace('lora_unet_', '').replace('_', '.') diffusers_name = diffusers_name.replace('input.blocks', 'down_blocks') diffusers_name = diffusers_name.replace('down.blocks', 'down_blocks') diffusers_name = diffusers_name.replace('middle.block', 'mid_block') diffusers_name = diffusers_name.replace('mid.block', 'mid_block') diffusers_name = diffusers_name.replace('output.blocks', 'up_blocks') diffusers_name = diffusers_name.replace('up.blocks', 'up_blocks') diffusers_name = diffusers_name.replace('transformer.blocks', 'transformer_blocks') diffusers_name = diffusers_name.replace('to.q.lora', 'to_q_lora') diffusers_name = diffusers_name.replace('to.k.lora', 'to_k_lora') diffusers_name = diffusers_name.replace('to.v.lora', 'to_v_lora') diffusers_name = diffusers_name.replace('to.out.0.lora', 'to_out_lora') diffusers_name = diffusers_name.replace('proj.in', 'proj_in') diffusers_name = diffusers_name.replace('proj.out', 'proj_out') diffusers_name = diffusers_name.replace('emb.layers', 'time_emb_proj') if 'emb' in diffusers_name and 'time.emb.proj' not in diffusers_name: pattern = '\\.\\d+(?=\\D*$)' diffusers_name = re.sub(pattern, '', diffusers_name, count=1) if '.in.' in diffusers_name: diffusers_name = diffusers_name.replace('in.layers.2', 'conv1') if '.out.' in diffusers_name: diffusers_name = diffusers_name.replace('out.layers.3', 'conv2') if 'downsamplers' in diffusers_name or 'upsamplers' in diffusers_name: diffusers_name = diffusers_name.replace('op', 'conv') if 'skip' in diffusers_name: diffusers_name = diffusers_name.replace('skip.connection', 'conv_shortcut') if 'time.emb.proj' in diffusers_name: diffusers_name = diffusers_name.replace('time.emb.proj', 'time_emb_proj') if 'conv.shortcut' in diffusers_name: diffusers_name = diffusers_name.replace('conv.shortcut', 'conv_shortcut') if 'transformer_blocks' in diffusers_name: if 'attn1' in diffusers_name or 'attn2' in diffusers_name: diffusers_name = diffusers_name.replace('attn1', 'attn1.processor') diffusers_name = diffusers_name.replace('attn2', 'attn2.processor') elif 'ff' in diffusers_name: pass elif any((key in diffusers_name for key in ('proj_in', 'proj_out'))): pass else: pass return diffusers_name def _convert_text_encoder_lora_key(key, lora_name): if lora_name.startswith(('lora_te_', 'lora_te1_')): key_to_replace = 'lora_te_' if lora_name.startswith('lora_te_') else 'lora_te1_' else: key_to_replace = 'lora_te2_' diffusers_name = key.replace(key_to_replace, '').replace('_', '.') diffusers_name = diffusers_name.replace('text.model', 'text_model') diffusers_name = diffusers_name.replace('self.attn', 'self_attn') diffusers_name = diffusers_name.replace('q.proj.lora', 'to_q_lora') diffusers_name = diffusers_name.replace('k.proj.lora', 'to_k_lora') diffusers_name = diffusers_name.replace('v.proj.lora', 'to_v_lora') diffusers_name = diffusers_name.replace('out.proj.lora', 'to_out_lora') diffusers_name = diffusers_name.replace('text.projection', 'text_projection') if 'self_attn' in diffusers_name or 'text_projection' in diffusers_name: pass elif 'mlp' in diffusers_name: diffusers_name = diffusers_name.replace('.lora.', '.lora_linear_layer.') return diffusers_name def _get_alpha_name(lora_name_alpha, diffusers_name, alpha): if lora_name_alpha.startswith('lora_unet_'): prefix = 'unet.' elif lora_name_alpha.startswith(('lora_te_', 'lora_te1_')): prefix = 'text_encoder.' else: prefix = 'text_encoder_2.' new_name = prefix + diffusers_name.split('.lora.')[0] + '.alpha' return {new_name: alpha} def _convert_kohya_flux_lora_to_diffusers(state_dict): def _convert_to_ai_toolkit(sds_sd, ait_sd, sds_key, ait_key): if sds_key + '.lora_down.weight' not in sds_sd: return down_weight = sds_sd.pop(sds_key + '.lora_down.weight') rank = down_weight.shape[0] alpha = sds_sd.pop(sds_key + '.alpha').item() scale = alpha / rank scale_down = scale scale_up = 1.0 while scale_down * 2 < scale_up: scale_down *= 2 scale_up /= 2 ait_sd[ait_key + '.lora_A.weight'] = down_weight * scale_down ait_sd[ait_key + '.lora_B.weight'] = sds_sd.pop(sds_key + '.lora_up.weight') * scale_up def _convert_to_ai_toolkit_cat(sds_sd, ait_sd, sds_key, ait_keys, dims=None): if sds_key + '.lora_down.weight' not in sds_sd: return down_weight = sds_sd.pop(sds_key + '.lora_down.weight') up_weight = sds_sd.pop(sds_key + '.lora_up.weight') sd_lora_rank = down_weight.shape[0] alpha = sds_sd.pop(sds_key + '.alpha') scale = alpha / sd_lora_rank scale_down = scale scale_up = 1.0 while scale_down * 2 < scale_up: scale_down *= 2 scale_up /= 2 down_weight = down_weight * scale_down up_weight = up_weight * scale_up num_splits = len(ait_keys) if dims is None: dims = [up_weight.shape[0] // num_splits] * num_splits else: assert sum(dims) == up_weight.shape[0] is_sparse = False if sd_lora_rank % num_splits == 0: ait_rank = sd_lora_rank // num_splits is_sparse = True i = 0 for j in range(len(dims)): for k in range(len(dims)): if j == k: continue is_sparse = is_sparse and torch.all(up_weight[i:i + dims[j], k * ait_rank:(k + 1) * ait_rank] == 0) i += dims[j] if is_sparse: logger.info(f'weight is sparse: {sds_key}') ait_down_keys = [k + '.lora_A.weight' for k in ait_keys] ait_up_keys = [k + '.lora_B.weight' for k in ait_keys] if not is_sparse: ait_sd.update({k: down_weight for k in ait_down_keys}) ait_sd.update({k: v for (k, v) in zip(ait_up_keys, torch.split(up_weight, dims, dim=0))}) else: ait_sd.update({k: v for (k, v) in zip(ait_down_keys, torch.chunk(down_weight, num_splits, dim=0))}) i = 0 for j in range(len(dims)): ait_sd[ait_up_keys[j]] = up_weight[i:i + dims[j], j * ait_rank:(j + 1) * ait_rank].contiguous() i += dims[j] def _convert_sd_scripts_to_ai_toolkit(sds_sd): ait_sd = {} for i in range(19): _convert_to_ai_toolkit(sds_sd, ait_sd, f'lora_unet_double_blocks_{i}_img_attn_proj', f'transformer.transformer_blocks.{i}.attn.to_out.0') _convert_to_ai_toolkit_cat(sds_sd, ait_sd, f'lora_unet_double_blocks_{i}_img_attn_qkv', [f'transformer.transformer_blocks.{i}.attn.to_q', f'transformer.transformer_blocks.{i}.attn.to_k', f'transformer.transformer_blocks.{i}.attn.to_v']) _convert_to_ai_toolkit(sds_sd, ait_sd, f'lora_unet_double_blocks_{i}_img_mlp_0', f'transformer.transformer_blocks.{i}.ff.net.0.proj') _convert_to_ai_toolkit(sds_sd, ait_sd, f'lora_unet_double_blocks_{i}_img_mlp_2', f'transformer.transformer_blocks.{i}.ff.net.2') _convert_to_ai_toolkit(sds_sd, ait_sd, f'lora_unet_double_blocks_{i}_img_mod_lin', f'transformer.transformer_blocks.{i}.norm1.linear') _convert_to_ai_toolkit(sds_sd, ait_sd, f'lora_unet_double_blocks_{i}_txt_attn_proj', f'transformer.transformer_blocks.{i}.attn.to_add_out') _convert_to_ai_toolkit_cat(sds_sd, ait_sd, f'lora_unet_double_blocks_{i}_txt_attn_qkv', [f'transformer.transformer_blocks.{i}.attn.add_q_proj', f'transformer.transformer_blocks.{i}.attn.add_k_proj', f'transformer.transformer_blocks.{i}.attn.add_v_proj']) _convert_to_ai_toolkit(sds_sd, ait_sd, f'lora_unet_double_blocks_{i}_txt_mlp_0', f'transformer.transformer_blocks.{i}.ff_context.net.0.proj') _convert_to_ai_toolkit(sds_sd, ait_sd, f'lora_unet_double_blocks_{i}_txt_mlp_2', f'transformer.transformer_blocks.{i}.ff_context.net.2') _convert_to_ai_toolkit(sds_sd, ait_sd, f'lora_unet_double_blocks_{i}_txt_mod_lin', f'transformer.transformer_blocks.{i}.norm1_context.linear') for i in range(38): _convert_to_ai_toolkit_cat(sds_sd, ait_sd, f'lora_unet_single_blocks_{i}_linear1', [f'transformer.single_transformer_blocks.{i}.attn.to_q', f'transformer.single_transformer_blocks.{i}.attn.to_k', f'transformer.single_transformer_blocks.{i}.attn.to_v', f'transformer.single_transformer_blocks.{i}.proj_mlp'], dims=[3072, 3072, 3072, 12288]) _convert_to_ai_toolkit(sds_sd, ait_sd, f'lora_unet_single_blocks_{i}_linear2', f'transformer.single_transformer_blocks.{i}.proj_out') _convert_to_ai_toolkit(sds_sd, ait_sd, f'lora_unet_single_blocks_{i}_modulation_lin', f'transformer.single_transformer_blocks.{i}.norm.linear') if len(sds_sd) > 0: logger.warning(f'Unsuppored keys for ai-toolkit: {sds_sd.keys()}') return ait_sd return _convert_sd_scripts_to_ai_toolkit(state_dict) def _convert_xlabs_flux_lora_to_diffusers(old_state_dict): new_state_dict = {} orig_keys = list(old_state_dict.keys()) def handle_qkv(sds_sd, ait_sd, sds_key, ait_keys, dims=None): down_weight = sds_sd.pop(sds_key) up_weight = sds_sd.pop(sds_key.replace('.down.weight', '.up.weight')) num_splits = len(ait_keys) if dims is None: dims = [up_weight.shape[0] // num_splits] * num_splits else: assert sum(dims) == up_weight.shape[0] ait_down_keys = [k + '.lora_A.weight' for k in ait_keys] ait_up_keys = [k + '.lora_B.weight' for k in ait_keys] ait_sd.update({k: down_weight for k in ait_down_keys}) ait_sd.update({k: v for (k, v) in zip(ait_up_keys, torch.split(up_weight, dims, dim=0))}) for old_key in orig_keys: if old_key.startswith(('diffusion_model.double_blocks', 'double_blocks')): block_num = re.search('double_blocks\\.(\\d+)', old_key).group(1) new_key = f'transformer.transformer_blocks.{block_num}' if 'processor.proj_lora1' in old_key: new_key += '.attn.to_out.0' elif 'processor.proj_lora2' in old_key: new_key += '.attn.to_add_out' elif 'processor.qkv_lora2' in old_key and 'up' not in old_key: handle_qkv(old_state_dict, new_state_dict, old_key, [f'transformer.transformer_blocks.{block_num}.attn.add_q_proj', f'transformer.transformer_blocks.{block_num}.attn.add_k_proj', f'transformer.transformer_blocks.{block_num}.attn.add_v_proj']) elif 'processor.qkv_lora1' in old_key and 'up' not in old_key: handle_qkv(old_state_dict, new_state_dict, old_key, [f'transformer.transformer_blocks.{block_num}.attn.to_q', f'transformer.transformer_blocks.{block_num}.attn.to_k', f'transformer.transformer_blocks.{block_num}.attn.to_v']) if 'down' in old_key: new_key += '.lora_A.weight' elif 'up' in old_key: new_key += '.lora_B.weight' elif old_key.startswith('diffusion_model.single_blocks', 'single_blocks'): block_num = re.search('single_blocks\\.(\\d+)', old_key).group(1) new_key = f'transformer.single_transformer_blocks.{block_num}' if 'proj_lora1' in old_key or 'proj_lora2' in old_key: new_key += '.proj_out' elif 'qkv_lora1' in old_key or 'qkv_lora2' in old_key: new_key += '.norm.linear' if 'down' in old_key: new_key += '.lora_A.weight' elif 'up' in old_key: new_key += '.lora_B.weight' else: new_key = old_key if 'qkv' not in old_key: new_state_dict[new_key] = old_state_dict.pop(old_key) if len(old_state_dict) > 0: raise ValueError(f'`old_state_dict` should be at this point but has: {list(old_state_dict.keys())}.') return new_state_dict # File: diffusers-main/src/diffusers/loaders/lora_pipeline.py import os from typing import Callable, Dict, List, Optional, Union import torch from huggingface_hub.utils import validate_hf_hub_args from ..utils import USE_PEFT_BACKEND, convert_state_dict_to_diffusers, convert_state_dict_to_peft, convert_unet_state_dict_to_peft, deprecate, get_adapter_name, get_peft_kwargs, is_peft_version, is_transformers_available, logging, scale_lora_layers from .lora_base import LoraBaseMixin from .lora_conversion_utils import _convert_kohya_flux_lora_to_diffusers, _convert_non_diffusers_lora_to_diffusers, _convert_xlabs_flux_lora_to_diffusers, _maybe_map_sgm_blocks_to_diffusers if is_transformers_available(): from ..models.lora import text_encoder_attn_modules, text_encoder_mlp_modules logger = logging.get_logger(__name__) TEXT_ENCODER_NAME = 'text_encoder' UNET_NAME = 'unet' TRANSFORMER_NAME = 'transformer' LORA_WEIGHT_NAME = 'pytorch_lora_weights.bin' LORA_WEIGHT_NAME_SAFE = 'pytorch_lora_weights.safetensors' class StableDiffusionLoraLoaderMixin(LoraBaseMixin): _lora_loadable_modules = ['unet', 'text_encoder'] unet_name = UNET_NAME text_encoder_name = TEXT_ENCODER_NAME def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() (state_dict, network_alphas) = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all(('lora' in key or 'dora_scale' in key for key in state_dict.keys())) if not is_correct_format: raise ValueError('Invalid LoRA checkpoint.') self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=getattr(self, self.unet_name) if not hasattr(self, 'unet') else self.unet, adapter_name=adapter_name, _pipeline=self) self.load_lora_into_text_encoder(state_dict, network_alphas=network_alphas, text_encoder=getattr(self, self.text_encoder_name) if not hasattr(self, 'text_encoder') else self.text_encoder, lora_scale=self.lora_scale, adapter_name=adapter_name, _pipeline=self) @classmethod @validate_hf_hub_args def lora_state_dict(cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) local_files_only = kwargs.pop('local_files_only', None) token = kwargs.pop('token', None) revision = kwargs.pop('revision', None) subfolder = kwargs.pop('subfolder', None) weight_name = kwargs.pop('weight_name', None) unet_config = kwargs.pop('unet_config', None) use_safetensors = kwargs.pop('use_safetensors', None) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {'file_type': 'attn_procs_weights', 'framework': 'pytorch'} state_dict = cls._fetch_state_dict(pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle) network_alphas = None if all((k.startswith('lora_te_') or k.startswith('lora_unet_') or k.startswith('lora_te1_') or k.startswith('lora_te2_') for k in state_dict.keys())): if unet_config is not None: state_dict = _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config) (state_dict, network_alphas) = _convert_non_diffusers_lora_to_diffusers(state_dict) return (state_dict, network_alphas) @classmethod def load_lora_into_unet(cls, state_dict, network_alphas, unet, adapter_name=None, _pipeline=None): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') keys = list(state_dict.keys()) only_text_encoder = all((key.startswith(cls.text_encoder_name) for key in keys)) if not only_text_encoder: logger.info(f'Loading {cls.unet_name}.') unet.load_attn_procs(state_dict, network_alphas=network_alphas, adapter_name=adapter_name, _pipeline=_pipeline) @classmethod def load_lora_into_text_encoder(cls, state_dict, network_alphas, text_encoder, prefix=None, lora_scale=1.0, adapter_name=None, _pipeline=None): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') from peft import LoraConfig keys = list(state_dict.keys()) prefix = cls.text_encoder_name if prefix is None else prefix if any((cls.text_encoder_name in key for key in keys)): text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split('.')[0] == prefix] text_encoder_lora_state_dict = {k.replace(f'{prefix}.', ''): v for (k, v) in state_dict.items() if k in text_encoder_keys} if len(text_encoder_lora_state_dict) > 0: logger.info(f'Loading {prefix}.') rank = {} text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict) text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict) for (name, _) in text_encoder_attn_modules(text_encoder): for module in ('out_proj', 'q_proj', 'k_proj', 'v_proj'): rank_key = f'{name}.{module}.lora_B.weight' if rank_key not in text_encoder_lora_state_dict: continue rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] for (name, _) in text_encoder_mlp_modules(text_encoder): for module in ('fc1', 'fc2'): rank_key = f'{name}.{module}.lora_B.weight' if rank_key not in text_encoder_lora_state_dict: continue rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] if network_alphas is not None: alpha_keys = [k for k in network_alphas.keys() if k.startswith(prefix) and k.split('.')[0] == prefix] network_alphas = {k.replace(f'{prefix}.', ''): v for (k, v) in network_alphas.items() if k in alpha_keys} lora_config_kwargs = get_peft_kwargs(rank, network_alphas, text_encoder_lora_state_dict, is_unet=False) if 'use_dora' in lora_config_kwargs: if lora_config_kwargs['use_dora']: if is_peft_version('<', '0.9.0'): raise ValueError('You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`.') elif is_peft_version('<', '0.9.0'): lora_config_kwargs.pop('use_dora') lora_config = LoraConfig(**lora_config_kwargs) if adapter_name is None: adapter_name = get_adapter_name(text_encoder) (is_model_cpu_offload, is_sequential_cpu_offload) = cls._optionally_disable_offloading(_pipeline) text_encoder.load_adapter(adapter_name=adapter_name, adapter_state_dict=text_encoder_lora_state_dict, peft_config=lora_config) scale_lora_layers(text_encoder, weight=lora_scale) text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype) if is_model_cpu_offload: _pipeline.enable_model_cpu_offload() elif is_sequential_cpu_offload: _pipeline.enable_sequential_cpu_offload() @classmethod def save_lora_weights(cls, save_directory: Union[str, os.PathLike], unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]]=None, text_encoder_lora_layers: Dict[str, torch.nn.Module]=None, is_main_process: bool=True, weight_name: str=None, save_function: Callable=None, safe_serialization: bool=True): state_dict = {} if not (unet_lora_layers or text_encoder_lora_layers): raise ValueError('You must pass at least one of `unet_lora_layers` and `text_encoder_lora_layers`.') if unet_lora_layers: state_dict.update(cls.pack_weights(unet_lora_layers, cls.unet_name)) if text_encoder_lora_layers: state_dict.update(cls.pack_weights(text_encoder_lora_layers, cls.text_encoder_name)) cls.write_lora_layers(state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization) def fuse_lora(self, components: List[str]=['unet', 'text_encoder'], lora_scale: float=1.0, safe_fusing: bool=False, adapter_names: Optional[List[str]]=None, **kwargs): super().fuse_lora(components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names) def unfuse_lora(self, components: List[str]=['unet', 'text_encoder'], **kwargs): super().unfuse_lora(components=components) class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin): _lora_loadable_modules = ['unet', 'text_encoder', 'text_encoder_2'] unet_name = UNET_NAME text_encoder_name = TEXT_ENCODER_NAME def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name: Optional[str]=None, **kwargs): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() (state_dict, network_alphas) = self.lora_state_dict(pretrained_model_name_or_path_or_dict, unet_config=self.unet.config, **kwargs) is_correct_format = all(('lora' in key or 'dora_scale' in key for key in state_dict.keys())) if not is_correct_format: raise ValueError('Invalid LoRA checkpoint.') self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet, adapter_name=adapter_name, _pipeline=self) text_encoder_state_dict = {k: v for (k, v) in state_dict.items() if 'text_encoder.' in k} if len(text_encoder_state_dict) > 0: self.load_lora_into_text_encoder(text_encoder_state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder, prefix='text_encoder', lora_scale=self.lora_scale, adapter_name=adapter_name, _pipeline=self) text_encoder_2_state_dict = {k: v for (k, v) in state_dict.items() if 'text_encoder_2.' in k} if len(text_encoder_2_state_dict) > 0: self.load_lora_into_text_encoder(text_encoder_2_state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder_2, prefix='text_encoder_2', lora_scale=self.lora_scale, adapter_name=adapter_name, _pipeline=self) @classmethod @validate_hf_hub_args def lora_state_dict(cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) local_files_only = kwargs.pop('local_files_only', None) token = kwargs.pop('token', None) revision = kwargs.pop('revision', None) subfolder = kwargs.pop('subfolder', None) weight_name = kwargs.pop('weight_name', None) unet_config = kwargs.pop('unet_config', None) use_safetensors = kwargs.pop('use_safetensors', None) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {'file_type': 'attn_procs_weights', 'framework': 'pytorch'} state_dict = cls._fetch_state_dict(pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle) network_alphas = None if all((k.startswith('lora_te_') or k.startswith('lora_unet_') or k.startswith('lora_te1_') or k.startswith('lora_te2_') for k in state_dict.keys())): if unet_config is not None: state_dict = _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config) (state_dict, network_alphas) = _convert_non_diffusers_lora_to_diffusers(state_dict) return (state_dict, network_alphas) @classmethod def load_lora_into_unet(cls, state_dict, network_alphas, unet, adapter_name=None, _pipeline=None): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') keys = list(state_dict.keys()) only_text_encoder = all((key.startswith(cls.text_encoder_name) for key in keys)) if not only_text_encoder: logger.info(f'Loading {cls.unet_name}.') unet.load_attn_procs(state_dict, network_alphas=network_alphas, adapter_name=adapter_name, _pipeline=_pipeline) @classmethod def load_lora_into_text_encoder(cls, state_dict, network_alphas, text_encoder, prefix=None, lora_scale=1.0, adapter_name=None, _pipeline=None): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') from peft import LoraConfig keys = list(state_dict.keys()) prefix = cls.text_encoder_name if prefix is None else prefix if any((cls.text_encoder_name in key for key in keys)): text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split('.')[0] == prefix] text_encoder_lora_state_dict = {k.replace(f'{prefix}.', ''): v for (k, v) in state_dict.items() if k in text_encoder_keys} if len(text_encoder_lora_state_dict) > 0: logger.info(f'Loading {prefix}.') rank = {} text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict) text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict) for (name, _) in text_encoder_attn_modules(text_encoder): for module in ('out_proj', 'q_proj', 'k_proj', 'v_proj'): rank_key = f'{name}.{module}.lora_B.weight' if rank_key not in text_encoder_lora_state_dict: continue rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] for (name, _) in text_encoder_mlp_modules(text_encoder): for module in ('fc1', 'fc2'): rank_key = f'{name}.{module}.lora_B.weight' if rank_key not in text_encoder_lora_state_dict: continue rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] if network_alphas is not None: alpha_keys = [k for k in network_alphas.keys() if k.startswith(prefix) and k.split('.')[0] == prefix] network_alphas = {k.replace(f'{prefix}.', ''): v for (k, v) in network_alphas.items() if k in alpha_keys} lora_config_kwargs = get_peft_kwargs(rank, network_alphas, text_encoder_lora_state_dict, is_unet=False) if 'use_dora' in lora_config_kwargs: if lora_config_kwargs['use_dora']: if is_peft_version('<', '0.9.0'): raise ValueError('You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`.') elif is_peft_version('<', '0.9.0'): lora_config_kwargs.pop('use_dora') lora_config = LoraConfig(**lora_config_kwargs) if adapter_name is None: adapter_name = get_adapter_name(text_encoder) (is_model_cpu_offload, is_sequential_cpu_offload) = cls._optionally_disable_offloading(_pipeline) text_encoder.load_adapter(adapter_name=adapter_name, adapter_state_dict=text_encoder_lora_state_dict, peft_config=lora_config) scale_lora_layers(text_encoder, weight=lora_scale) text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype) if is_model_cpu_offload: _pipeline.enable_model_cpu_offload() elif is_sequential_cpu_offload: _pipeline.enable_sequential_cpu_offload() @classmethod def save_lora_weights(cls, save_directory: Union[str, os.PathLike], unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]]=None, text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]]=None, text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]]=None, is_main_process: bool=True, weight_name: str=None, save_function: Callable=None, safe_serialization: bool=True): state_dict = {} if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers): raise ValueError('You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`.') if unet_lora_layers: state_dict.update(cls.pack_weights(unet_lora_layers, 'unet')) if text_encoder_lora_layers: state_dict.update(cls.pack_weights(text_encoder_lora_layers, 'text_encoder')) if text_encoder_2_lora_layers: state_dict.update(cls.pack_weights(text_encoder_2_lora_layers, 'text_encoder_2')) cls.write_lora_layers(state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization) def fuse_lora(self, components: List[str]=['unet', 'text_encoder', 'text_encoder_2'], lora_scale: float=1.0, safe_fusing: bool=False, adapter_names: Optional[List[str]]=None, **kwargs): super().fuse_lora(components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names) def unfuse_lora(self, components: List[str]=['unet', 'text_encoder', 'text_encoder_2'], **kwargs): super().unfuse_lora(components=components) class SD3LoraLoaderMixin(LoraBaseMixin): _lora_loadable_modules = ['transformer', 'text_encoder', 'text_encoder_2'] transformer_name = TRANSFORMER_NAME text_encoder_name = TEXT_ENCODER_NAME @classmethod @validate_hf_hub_args def lora_state_dict(cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) local_files_only = kwargs.pop('local_files_only', None) token = kwargs.pop('token', None) revision = kwargs.pop('revision', None) subfolder = kwargs.pop('subfolder', None) weight_name = kwargs.pop('weight_name', None) use_safetensors = kwargs.pop('use_safetensors', None) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {'file_type': 'attn_procs_weights', 'framework': 'pytorch'} state_dict = cls._fetch_state_dict(pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle) return state_dict def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() state_dict = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) is_correct_format = all(('lora' in key or 'dora_scale' in key for key in state_dict.keys())) if not is_correct_format: raise ValueError('Invalid LoRA checkpoint.') self.load_lora_into_transformer(state_dict, transformer=getattr(self, self.transformer_name) if not hasattr(self, 'transformer') else self.transformer, adapter_name=adapter_name, _pipeline=self) text_encoder_state_dict = {k: v for (k, v) in state_dict.items() if 'text_encoder.' in k} if len(text_encoder_state_dict) > 0: self.load_lora_into_text_encoder(text_encoder_state_dict, network_alphas=None, text_encoder=self.text_encoder, prefix='text_encoder', lora_scale=self.lora_scale, adapter_name=adapter_name, _pipeline=self) text_encoder_2_state_dict = {k: v for (k, v) in state_dict.items() if 'text_encoder_2.' in k} if len(text_encoder_2_state_dict) > 0: self.load_lora_into_text_encoder(text_encoder_2_state_dict, network_alphas=None, text_encoder=self.text_encoder_2, prefix='text_encoder_2', lora_scale=self.lora_scale, adapter_name=adapter_name, _pipeline=self) @classmethod def load_lora_into_transformer(cls, state_dict, transformer, adapter_name=None, _pipeline=None): from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict keys = list(state_dict.keys()) transformer_keys = [k for k in keys if k.startswith(cls.transformer_name)] state_dict = {k.replace(f'{cls.transformer_name}.', ''): v for (k, v) in state_dict.items() if k in transformer_keys} if len(state_dict.keys()) > 0: first_key = next(iter(state_dict.keys())) if 'lora_A' not in first_key: state_dict = convert_unet_state_dict_to_peft(state_dict) if adapter_name in getattr(transformer, 'peft_config', {}): raise ValueError(f'Adapter name {adapter_name} already in use in the transformer - please select a new adapter name.') rank = {} for (key, val) in state_dict.items(): if 'lora_B' in key: rank[key] = val.shape[1] lora_config_kwargs = get_peft_kwargs(rank, network_alpha_dict=None, peft_state_dict=state_dict) if 'use_dora' in lora_config_kwargs: if lora_config_kwargs['use_dora'] and is_peft_version('<', '0.9.0'): raise ValueError('You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`.') else: lora_config_kwargs.pop('use_dora') lora_config = LoraConfig(**lora_config_kwargs) if adapter_name is None: adapter_name = get_adapter_name(transformer) (is_model_cpu_offload, is_sequential_cpu_offload) = cls._optionally_disable_offloading(_pipeline) inject_adapter_in_model(lora_config, transformer, adapter_name=adapter_name) incompatible_keys = set_peft_model_state_dict(transformer, state_dict, adapter_name) if incompatible_keys is not None: unexpected_keys = getattr(incompatible_keys, 'unexpected_keys', None) if unexpected_keys: logger.warning(f'Loading adapter weights from state_dict led to unexpected keys not found in the model: {unexpected_keys}. ') if is_model_cpu_offload: _pipeline.enable_model_cpu_offload() elif is_sequential_cpu_offload: _pipeline.enable_sequential_cpu_offload() @classmethod def load_lora_into_text_encoder(cls, state_dict, network_alphas, text_encoder, prefix=None, lora_scale=1.0, adapter_name=None, _pipeline=None): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') from peft import LoraConfig keys = list(state_dict.keys()) prefix = cls.text_encoder_name if prefix is None else prefix if any((cls.text_encoder_name in key for key in keys)): text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split('.')[0] == prefix] text_encoder_lora_state_dict = {k.replace(f'{prefix}.', ''): v for (k, v) in state_dict.items() if k in text_encoder_keys} if len(text_encoder_lora_state_dict) > 0: logger.info(f'Loading {prefix}.') rank = {} text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict) text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict) for (name, _) in text_encoder_attn_modules(text_encoder): for module in ('out_proj', 'q_proj', 'k_proj', 'v_proj'): rank_key = f'{name}.{module}.lora_B.weight' if rank_key not in text_encoder_lora_state_dict: continue rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] for (name, _) in text_encoder_mlp_modules(text_encoder): for module in ('fc1', 'fc2'): rank_key = f'{name}.{module}.lora_B.weight' if rank_key not in text_encoder_lora_state_dict: continue rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] if network_alphas is not None: alpha_keys = [k for k in network_alphas.keys() if k.startswith(prefix) and k.split('.')[0] == prefix] network_alphas = {k.replace(f'{prefix}.', ''): v for (k, v) in network_alphas.items() if k in alpha_keys} lora_config_kwargs = get_peft_kwargs(rank, network_alphas, text_encoder_lora_state_dict, is_unet=False) if 'use_dora' in lora_config_kwargs: if lora_config_kwargs['use_dora']: if is_peft_version('<', '0.9.0'): raise ValueError('You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`.') elif is_peft_version('<', '0.9.0'): lora_config_kwargs.pop('use_dora') lora_config = LoraConfig(**lora_config_kwargs) if adapter_name is None: adapter_name = get_adapter_name(text_encoder) (is_model_cpu_offload, is_sequential_cpu_offload) = cls._optionally_disable_offloading(_pipeline) text_encoder.load_adapter(adapter_name=adapter_name, adapter_state_dict=text_encoder_lora_state_dict, peft_config=lora_config) scale_lora_layers(text_encoder, weight=lora_scale) text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype) if is_model_cpu_offload: _pipeline.enable_model_cpu_offload() elif is_sequential_cpu_offload: _pipeline.enable_sequential_cpu_offload() @classmethod def save_lora_weights(cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, torch.nn.Module]=None, text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]]=None, text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]]=None, is_main_process: bool=True, weight_name: str=None, save_function: Callable=None, safe_serialization: bool=True): state_dict = {} if not (transformer_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers): raise ValueError('You must pass at least one of `transformer_lora_layers`, `text_encoder_lora_layers`, `text_encoder_2_lora_layers`.') if transformer_lora_layers: state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if text_encoder_lora_layers: state_dict.update(cls.pack_weights(text_encoder_lora_layers, 'text_encoder')) if text_encoder_2_lora_layers: state_dict.update(cls.pack_weights(text_encoder_2_lora_layers, 'text_encoder_2')) cls.write_lora_layers(state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization) def fuse_lora(self, components: List[str]=['transformer', 'text_encoder', 'text_encoder_2'], lora_scale: float=1.0, safe_fusing: bool=False, adapter_names: Optional[List[str]]=None, **kwargs): super().fuse_lora(components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names) def unfuse_lora(self, components: List[str]=['transformer', 'text_encoder', 'text_encoder_2'], **kwargs): super().unfuse_lora(components=components) class FluxLoraLoaderMixin(LoraBaseMixin): _lora_loadable_modules = ['transformer', 'text_encoder'] transformer_name = TRANSFORMER_NAME text_encoder_name = TEXT_ENCODER_NAME @classmethod @validate_hf_hub_args def lora_state_dict(cls, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], return_alphas: bool=False, **kwargs): cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) local_files_only = kwargs.pop('local_files_only', None) token = kwargs.pop('token', None) revision = kwargs.pop('revision', None) subfolder = kwargs.pop('subfolder', None) weight_name = kwargs.pop('weight_name', None) use_safetensors = kwargs.pop('use_safetensors', None) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {'file_type': 'attn_procs_weights', 'framework': 'pytorch'} state_dict = cls._fetch_state_dict(pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, weight_name=weight_name, use_safetensors=use_safetensors, local_files_only=local_files_only, cache_dir=cache_dir, force_download=force_download, proxies=proxies, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, allow_pickle=allow_pickle) is_kohya = any(('.lora_down.weight' in k for k in state_dict)) if is_kohya: state_dict = _convert_kohya_flux_lora_to_diffusers(state_dict) return (state_dict, None) if return_alphas else state_dict is_xlabs = any(('processor' in k for k in state_dict)) if is_xlabs: state_dict = _convert_xlabs_flux_lora_to_diffusers(state_dict) return (state_dict, None) if return_alphas else state_dict keys = list(state_dict.keys()) network_alphas = {} for k in keys: if 'alpha' in k: alpha_value = state_dict.get(k) if torch.is_tensor(alpha_value) and torch.is_floating_point(alpha_value) or isinstance(alpha_value, float): network_alphas[k] = state_dict.pop(k) else: raise ValueError(f'The alpha key ({k}) seems to be incorrect. If you think this error is unexpected, please open as issue.') if return_alphas: return (state_dict, network_alphas) else: return state_dict def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') if isinstance(pretrained_model_name_or_path_or_dict, dict): pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() (state_dict, network_alphas) = self.lora_state_dict(pretrained_model_name_or_path_or_dict, return_alphas=True, **kwargs) is_correct_format = all(('lora' in key or 'dora_scale' in key for key in state_dict.keys())) if not is_correct_format: raise ValueError('Invalid LoRA checkpoint.') self.load_lora_into_transformer(state_dict, network_alphas=network_alphas, transformer=getattr(self, self.transformer_name) if not hasattr(self, 'transformer') else self.transformer, adapter_name=adapter_name, _pipeline=self) text_encoder_state_dict = {k: v for (k, v) in state_dict.items() if 'text_encoder.' in k} if len(text_encoder_state_dict) > 0: self.load_lora_into_text_encoder(text_encoder_state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder, prefix='text_encoder', lora_scale=self.lora_scale, adapter_name=adapter_name, _pipeline=self) @classmethod def load_lora_into_transformer(cls, state_dict, network_alphas, transformer, adapter_name=None, _pipeline=None): from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict keys = list(state_dict.keys()) transformer_keys = [k for k in keys if k.startswith(cls.transformer_name)] state_dict = {k.replace(f'{cls.transformer_name}.', ''): v for (k, v) in state_dict.items() if k in transformer_keys} if len(state_dict.keys()) > 0: first_key = next(iter(state_dict.keys())) if 'lora_A' not in first_key: state_dict = convert_unet_state_dict_to_peft(state_dict) if adapter_name in getattr(transformer, 'peft_config', {}): raise ValueError(f'Adapter name {adapter_name} already in use in the transformer - please select a new adapter name.') rank = {} for (key, val) in state_dict.items(): if 'lora_B' in key: rank[key] = val.shape[1] if network_alphas is not None and len(network_alphas) >= 1: prefix = cls.transformer_name alpha_keys = [k for k in network_alphas.keys() if k.startswith(prefix) and k.split('.')[0] == prefix] network_alphas = {k.replace(f'{prefix}.', ''): v for (k, v) in network_alphas.items() if k in alpha_keys} lora_config_kwargs = get_peft_kwargs(rank, network_alpha_dict=network_alphas, peft_state_dict=state_dict) if 'use_dora' in lora_config_kwargs: if lora_config_kwargs['use_dora'] and is_peft_version('<', '0.9.0'): raise ValueError('You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`.') else: lora_config_kwargs.pop('use_dora') lora_config = LoraConfig(**lora_config_kwargs) if adapter_name is None: adapter_name = get_adapter_name(transformer) (is_model_cpu_offload, is_sequential_cpu_offload) = cls._optionally_disable_offloading(_pipeline) inject_adapter_in_model(lora_config, transformer, adapter_name=adapter_name) incompatible_keys = set_peft_model_state_dict(transformer, state_dict, adapter_name) if incompatible_keys is not None: unexpected_keys = getattr(incompatible_keys, 'unexpected_keys', None) if unexpected_keys: logger.warning(f'Loading adapter weights from state_dict led to unexpected keys not found in the model: {unexpected_keys}. ') if is_model_cpu_offload: _pipeline.enable_model_cpu_offload() elif is_sequential_cpu_offload: _pipeline.enable_sequential_cpu_offload() @classmethod def load_lora_into_text_encoder(cls, state_dict, network_alphas, text_encoder, prefix=None, lora_scale=1.0, adapter_name=None, _pipeline=None): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') from peft import LoraConfig keys = list(state_dict.keys()) prefix = cls.text_encoder_name if prefix is None else prefix if any((cls.text_encoder_name in key for key in keys)): text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split('.')[0] == prefix] text_encoder_lora_state_dict = {k.replace(f'{prefix}.', ''): v for (k, v) in state_dict.items() if k in text_encoder_keys} if len(text_encoder_lora_state_dict) > 0: logger.info(f'Loading {prefix}.') rank = {} text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict) text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict) for (name, _) in text_encoder_attn_modules(text_encoder): for module in ('out_proj', 'q_proj', 'k_proj', 'v_proj'): rank_key = f'{name}.{module}.lora_B.weight' if rank_key not in text_encoder_lora_state_dict: continue rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] for (name, _) in text_encoder_mlp_modules(text_encoder): for module in ('fc1', 'fc2'): rank_key = f'{name}.{module}.lora_B.weight' if rank_key not in text_encoder_lora_state_dict: continue rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] if network_alphas is not None: alpha_keys = [k for k in network_alphas.keys() if k.startswith(prefix) and k.split('.')[0] == prefix] network_alphas = {k.replace(f'{prefix}.', ''): v for (k, v) in network_alphas.items() if k in alpha_keys} lora_config_kwargs = get_peft_kwargs(rank, network_alphas, text_encoder_lora_state_dict, is_unet=False) if 'use_dora' in lora_config_kwargs: if lora_config_kwargs['use_dora']: if is_peft_version('<', '0.9.0'): raise ValueError('You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`.') elif is_peft_version('<', '0.9.0'): lora_config_kwargs.pop('use_dora') lora_config = LoraConfig(**lora_config_kwargs) if adapter_name is None: adapter_name = get_adapter_name(text_encoder) (is_model_cpu_offload, is_sequential_cpu_offload) = cls._optionally_disable_offloading(_pipeline) text_encoder.load_adapter(adapter_name=adapter_name, adapter_state_dict=text_encoder_lora_state_dict, peft_config=lora_config) scale_lora_layers(text_encoder, weight=lora_scale) text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype) if is_model_cpu_offload: _pipeline.enable_model_cpu_offload() elif is_sequential_cpu_offload: _pipeline.enable_sequential_cpu_offload() @classmethod def save_lora_weights(cls, save_directory: Union[str, os.PathLike], transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]]=None, text_encoder_lora_layers: Dict[str, torch.nn.Module]=None, is_main_process: bool=True, weight_name: str=None, save_function: Callable=None, safe_serialization: bool=True): state_dict = {} if not (transformer_lora_layers or text_encoder_lora_layers): raise ValueError('You must pass at least one of `transformer_lora_layers` and `text_encoder_lora_layers`.') if transformer_lora_layers: state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if text_encoder_lora_layers: state_dict.update(cls.pack_weights(text_encoder_lora_layers, cls.text_encoder_name)) cls.write_lora_layers(state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization) def fuse_lora(self, components: List[str]=['transformer', 'text_encoder'], lora_scale: float=1.0, safe_fusing: bool=False, adapter_names: Optional[List[str]]=None, **kwargs): super().fuse_lora(components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names) def unfuse_lora(self, components: List[str]=['transformer', 'text_encoder'], **kwargs): super().unfuse_lora(components=components) class AmusedLoraLoaderMixin(StableDiffusionLoraLoaderMixin): _lora_loadable_modules = ['transformer', 'text_encoder'] transformer_name = TRANSFORMER_NAME text_encoder_name = TEXT_ENCODER_NAME @classmethod def load_lora_into_transformer(cls, state_dict, network_alphas, transformer, adapter_name=None, _pipeline=None): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict keys = list(state_dict.keys()) transformer_keys = [k for k in keys if k.startswith(cls.transformer_name)] state_dict = {k.replace(f'{cls.transformer_name}.', ''): v for (k, v) in state_dict.items() if k in transformer_keys} if network_alphas is not None: alpha_keys = [k for k in network_alphas.keys() if k.startswith(cls.transformer_name)] network_alphas = {k.replace(f'{cls.transformer_name}.', ''): v for (k, v) in network_alphas.items() if k in alpha_keys} if len(state_dict.keys()) > 0: if adapter_name in getattr(transformer, 'peft_config', {}): raise ValueError(f'Adapter name {adapter_name} already in use in the transformer - please select a new adapter name.') rank = {} for (key, val) in state_dict.items(): if 'lora_B' in key: rank[key] = val.shape[1] lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict) if 'use_dora' in lora_config_kwargs: if lora_config_kwargs['use_dora'] and is_peft_version('<', '0.9.0'): raise ValueError('You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`.') else: lora_config_kwargs.pop('use_dora') lora_config = LoraConfig(**lora_config_kwargs) if adapter_name is None: adapter_name = get_adapter_name(transformer) (is_model_cpu_offload, is_sequential_cpu_offload) = cls._optionally_disable_offloading(_pipeline) inject_adapter_in_model(lora_config, transformer, adapter_name=adapter_name) incompatible_keys = set_peft_model_state_dict(transformer, state_dict, adapter_name) if incompatible_keys is not None: unexpected_keys = getattr(incompatible_keys, 'unexpected_keys', None) if unexpected_keys: logger.warning(f'Loading adapter weights from state_dict led to unexpected keys not found in the model: {unexpected_keys}. ') if is_model_cpu_offload: _pipeline.enable_model_cpu_offload() elif is_sequential_cpu_offload: _pipeline.enable_sequential_cpu_offload() @classmethod def load_lora_into_text_encoder(cls, state_dict, network_alphas, text_encoder, prefix=None, lora_scale=1.0, adapter_name=None, _pipeline=None): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') from peft import LoraConfig keys = list(state_dict.keys()) prefix = cls.text_encoder_name if prefix is None else prefix if any((cls.text_encoder_name in key for key in keys)): text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split('.')[0] == prefix] text_encoder_lora_state_dict = {k.replace(f'{prefix}.', ''): v for (k, v) in state_dict.items() if k in text_encoder_keys} if len(text_encoder_lora_state_dict) > 0: logger.info(f'Loading {prefix}.') rank = {} text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict) text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict) for (name, _) in text_encoder_attn_modules(text_encoder): for module in ('out_proj', 'q_proj', 'k_proj', 'v_proj'): rank_key = f'{name}.{module}.lora_B.weight' if rank_key not in text_encoder_lora_state_dict: continue rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] for (name, _) in text_encoder_mlp_modules(text_encoder): for module in ('fc1', 'fc2'): rank_key = f'{name}.{module}.lora_B.weight' if rank_key not in text_encoder_lora_state_dict: continue rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] if network_alphas is not None: alpha_keys = [k for k in network_alphas.keys() if k.startswith(prefix) and k.split('.')[0] == prefix] network_alphas = {k.replace(f'{prefix}.', ''): v for (k, v) in network_alphas.items() if k in alpha_keys} lora_config_kwargs = get_peft_kwargs(rank, network_alphas, text_encoder_lora_state_dict, is_unet=False) if 'use_dora' in lora_config_kwargs: if lora_config_kwargs['use_dora']: if is_peft_version('<', '0.9.0'): raise ValueError('You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`.') elif is_peft_version('<', '0.9.0'): lora_config_kwargs.pop('use_dora') lora_config = LoraConfig(**lora_config_kwargs) if adapter_name is None: adapter_name = get_adapter_name(text_encoder) (is_model_cpu_offload, is_sequential_cpu_offload) = cls._optionally_disable_offloading(_pipeline) text_encoder.load_adapter(adapter_name=adapter_name, adapter_state_dict=text_encoder_lora_state_dict, peft_config=lora_config) scale_lora_layers(text_encoder, weight=lora_scale) text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype) if is_model_cpu_offload: _pipeline.enable_model_cpu_offload() elif is_sequential_cpu_offload: _pipeline.enable_sequential_cpu_offload() @classmethod def save_lora_weights(cls, save_directory: Union[str, os.PathLike], text_encoder_lora_layers: Dict[str, torch.nn.Module]=None, transformer_lora_layers: Dict[str, torch.nn.Module]=None, is_main_process: bool=True, weight_name: str=None, save_function: Callable=None, safe_serialization: bool=True): state_dict = {} if not (transformer_lora_layers or text_encoder_lora_layers): raise ValueError('You must pass at least one of `transformer_lora_layers` or `text_encoder_lora_layers`.') if transformer_lora_layers: state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) if text_encoder_lora_layers: state_dict.update(cls.pack_weights(text_encoder_lora_layers, cls.text_encoder_name)) cls.write_lora_layers(state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization) class LoraLoaderMixin(StableDiffusionLoraLoaderMixin): def __init__(self, *args, **kwargs): deprecation_message = 'LoraLoaderMixin is deprecated and this will be removed in a future version. Please use `StableDiffusionLoraLoaderMixin`, instead.' deprecate('LoraLoaderMixin', '1.0.0', deprecation_message) super().__init__(*args, **kwargs) # File: diffusers-main/src/diffusers/loaders/peft.py import inspect from functools import partial from typing import Dict, List, Optional, Union from ..utils import MIN_PEFT_VERSION, USE_PEFT_BACKEND, check_peft_version, delete_adapter_layers, is_peft_available, set_adapter_layers, set_weights_and_activate_adapters from .unet_loader_utils import _maybe_expand_lora_scales _SET_ADAPTER_SCALE_FN_MAPPING = {'UNet2DConditionModel': _maybe_expand_lora_scales, 'UNetMotionModel': _maybe_expand_lora_scales, 'SD3Transformer2DModel': lambda model_cls, weights: weights, 'FluxTransformer2DModel': lambda model_cls, weights: weights} class PeftAdapterMixin: _hf_peft_config_loaded = False def set_adapters(self, adapter_names: Union[List[str], str], weights: Optional[Union[float, Dict, List[float], List[Dict], List[None]]]=None): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for `set_adapters()`.') adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names if not isinstance(weights, list): weights = [weights] * len(adapter_names) if len(adapter_names) != len(weights): raise ValueError(f'Length of adapter names {len(adapter_names)} is not equal to the length of their weights {len(weights)}.') weights = [w if w is not None else 1.0 for w in weights] scale_expansion_fn = _SET_ADAPTER_SCALE_FN_MAPPING[self.__class__.__name__] weights = scale_expansion_fn(self, weights) set_weights_and_activate_adapters(self, adapter_names, weights) def add_adapter(self, adapter_config, adapter_name: str='default') -> None: check_peft_version(min_version=MIN_PEFT_VERSION) if not is_peft_available(): raise ImportError('PEFT is not available. Please install PEFT to use this function: `pip install peft`.') from peft import PeftConfig, inject_adapter_in_model if not self._hf_peft_config_loaded: self._hf_peft_config_loaded = True elif adapter_name in self.peft_config: raise ValueError(f'Adapter with name {adapter_name} already exists. Please use a different name.') if not isinstance(adapter_config, PeftConfig): raise ValueError(f'adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead.') adapter_config.base_model_name_or_path = None inject_adapter_in_model(adapter_config, self, adapter_name) self.set_adapter(adapter_name) def set_adapter(self, adapter_name: Union[str, List[str]]) -> None: check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError('No adapter loaded. Please load an adapter first.') if isinstance(adapter_name, str): adapter_name = [adapter_name] missing = set(adapter_name) - set(self.peft_config) if len(missing) > 0: raise ValueError(f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s). current loaded adapters are: {list(self.peft_config.keys())}") from peft.tuners.tuners_utils import BaseTunerLayer _adapters_has_been_set = False for (_, module) in self.named_modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, 'set_adapter'): module.set_adapter(adapter_name) elif not hasattr(module, 'set_adapter') and len(adapter_name) != 1: raise ValueError('You are trying to set multiple adapters and you have a PEFT version that does not support multi-adapter inference. Please upgrade to the latest version of PEFT. `pip install -U peft` or `pip install -U git+https://github.com/huggingface/peft.git`') else: module.active_adapter = adapter_name _adapters_has_been_set = True if not _adapters_has_been_set: raise ValueError('Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters.') def disable_adapters(self) -> None: check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError('No adapter loaded. Please load an adapter first.') from peft.tuners.tuners_utils import BaseTunerLayer for (_, module) in self.named_modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, 'enable_adapters'): module.enable_adapters(enabled=False) else: module.disable_adapters = True def enable_adapters(self) -> None: check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError('No adapter loaded. Please load an adapter first.') from peft.tuners.tuners_utils import BaseTunerLayer for (_, module) in self.named_modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, 'enable_adapters'): module.enable_adapters(enabled=True) else: module.disable_adapters = False def active_adapters(self) -> List[str]: check_peft_version(min_version=MIN_PEFT_VERSION) if not is_peft_available(): raise ImportError('PEFT is not available. Please install PEFT to use this function: `pip install peft`.') if not self._hf_peft_config_loaded: raise ValueError('No adapter loaded. Please load an adapter first.') from peft.tuners.tuners_utils import BaseTunerLayer for (_, module) in self.named_modules(): if isinstance(module, BaseTunerLayer): return module.active_adapter def fuse_lora(self, lora_scale=1.0, safe_fusing=False, adapter_names=None): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for `fuse_lora()`.') self.lora_scale = lora_scale self._safe_fusing = safe_fusing self.apply(partial(self._fuse_lora_apply, adapter_names=adapter_names)) def _fuse_lora_apply(self, module, adapter_names=None): from peft.tuners.tuners_utils import BaseTunerLayer merge_kwargs = {'safe_merge': self._safe_fusing} if isinstance(module, BaseTunerLayer): if self.lora_scale != 1.0: module.scale_layer(self.lora_scale) supported_merge_kwargs = list(inspect.signature(module.merge).parameters) if 'adapter_names' in supported_merge_kwargs: merge_kwargs['adapter_names'] = adapter_names elif 'adapter_names' not in supported_merge_kwargs and adapter_names is not None: raise ValueError('The `adapter_names` argument is not supported with your PEFT version. Please upgrade to the latest version of PEFT. `pip install -U peft`') module.merge(**merge_kwargs) def unfuse_lora(self): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for `unfuse_lora()`.') self.apply(self._unfuse_lora_apply) def _unfuse_lora_apply(self, module): from peft.tuners.tuners_utils import BaseTunerLayer if isinstance(module, BaseTunerLayer): module.unmerge() def unload_lora(self): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for `unload_lora()`.') from ..utils import recurse_remove_peft_layers recurse_remove_peft_layers(self) if hasattr(self, 'peft_config'): del self.peft_config def disable_lora(self): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') set_adapter_layers(self, enabled=False) def enable_lora(self): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') set_adapter_layers(self, enabled=True) def delete_adapters(self, adapter_names: Union[List[str], str]): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') if isinstance(adapter_names, str): adapter_names = [adapter_names] for adapter_name in adapter_names: delete_adapter_layers(self, adapter_name) if hasattr(self, 'peft_config'): self.peft_config.pop(adapter_name, None) # File: diffusers-main/src/diffusers/loaders/single_file.py import importlib import inspect import os import torch from huggingface_hub import snapshot_download from huggingface_hub.utils import LocalEntryNotFoundError, validate_hf_hub_args from packaging import version from ..utils import deprecate, is_transformers_available, logging from .single_file_utils import SingleFileComponentError, _is_legacy_scheduler_kwargs, _is_model_weights_in_cached_folder, _legacy_load_clip_tokenizer, _legacy_load_safety_checker, _legacy_load_scheduler, create_diffusers_clip_model_from_ldm, create_diffusers_t5_model_from_checkpoint, fetch_diffusers_config, fetch_original_config, is_clip_model_in_single_file, is_t5_in_single_file, load_single_file_checkpoint logger = logging.get_logger(__name__) SINGLE_FILE_OPTIONAL_COMPONENTS = ['safety_checker'] if is_transformers_available(): import transformers from transformers import PreTrainedModel, PreTrainedTokenizer def load_single_file_sub_model(library_name, class_name, name, checkpoint, pipelines, is_pipeline_module, cached_model_config_path, original_config=None, local_files_only=False, torch_dtype=None, is_legacy_loading=False, **kwargs): if is_pipeline_module: pipeline_module = getattr(pipelines, library_name) class_obj = getattr(pipeline_module, class_name) else: library = importlib.import_module(library_name) class_obj = getattr(library, class_name) if is_transformers_available(): transformers_version = version.parse(version.parse(transformers.__version__).base_version) else: transformers_version = 'N/A' is_transformers_model = is_transformers_available() and issubclass(class_obj, PreTrainedModel) and (transformers_version >= version.parse('4.20.0')) is_tokenizer = is_transformers_available() and issubclass(class_obj, PreTrainedTokenizer) and (transformers_version >= version.parse('4.20.0')) diffusers_module = importlib.import_module(__name__.split('.')[0]) is_diffusers_single_file_model = issubclass(class_obj, diffusers_module.FromOriginalModelMixin) is_diffusers_model = issubclass(class_obj, diffusers_module.ModelMixin) is_diffusers_scheduler = issubclass(class_obj, diffusers_module.SchedulerMixin) if is_diffusers_single_file_model: load_method = getattr(class_obj, 'from_single_file') if original_config: cached_model_config_path = None loaded_sub_model = load_method(pretrained_model_link_or_path_or_dict=checkpoint, original_config=original_config, config=cached_model_config_path, subfolder=name, torch_dtype=torch_dtype, local_files_only=local_files_only, **kwargs) elif is_transformers_model and is_clip_model_in_single_file(class_obj, checkpoint): loaded_sub_model = create_diffusers_clip_model_from_ldm(class_obj, checkpoint=checkpoint, config=cached_model_config_path, subfolder=name, torch_dtype=torch_dtype, local_files_only=local_files_only, is_legacy_loading=is_legacy_loading) elif is_transformers_model and is_t5_in_single_file(checkpoint): loaded_sub_model = create_diffusers_t5_model_from_checkpoint(class_obj, checkpoint=checkpoint, config=cached_model_config_path, subfolder=name, torch_dtype=torch_dtype, local_files_only=local_files_only) elif is_tokenizer and is_legacy_loading: loaded_sub_model = _legacy_load_clip_tokenizer(class_obj, checkpoint=checkpoint, config=cached_model_config_path, local_files_only=local_files_only) elif is_diffusers_scheduler and (is_legacy_loading or _is_legacy_scheduler_kwargs(kwargs)): loaded_sub_model = _legacy_load_scheduler(class_obj, checkpoint=checkpoint, component_name=name, original_config=original_config, **kwargs) else: if not hasattr(class_obj, 'from_pretrained'): raise ValueError(f'The component {class_obj.__name__} cannot be loaded as it does not seem to have a supported loading method.') loading_kwargs = {} loading_kwargs.update({'pretrained_model_name_or_path': cached_model_config_path, 'subfolder': name, 'local_files_only': local_files_only}) if issubclass(class_obj, torch.nn.Module): loading_kwargs.update({'torch_dtype': torch_dtype}) if is_diffusers_model or is_transformers_model: if not _is_model_weights_in_cached_folder(cached_model_config_path, name): raise SingleFileComponentError(f'Failed to load {class_name}. Weights for this component appear to be missing in the checkpoint.') load_method = getattr(class_obj, 'from_pretrained') loaded_sub_model = load_method(**loading_kwargs) return loaded_sub_model def _map_component_types_to_config_dict(component_types): diffusers_module = importlib.import_module(__name__.split('.')[0]) config_dict = {} component_types.pop('self', None) if is_transformers_available(): transformers_version = version.parse(version.parse(transformers.__version__).base_version) else: transformers_version = 'N/A' for (component_name, component_value) in component_types.items(): is_diffusers_model = issubclass(component_value[0], diffusers_module.ModelMixin) is_scheduler_enum = component_value[0].__name__ == 'KarrasDiffusionSchedulers' is_scheduler = issubclass(component_value[0], diffusers_module.SchedulerMixin) is_transformers_model = is_transformers_available() and issubclass(component_value[0], PreTrainedModel) and (transformers_version >= version.parse('4.20.0')) is_transformers_tokenizer = is_transformers_available() and issubclass(component_value[0], PreTrainedTokenizer) and (transformers_version >= version.parse('4.20.0')) if is_diffusers_model and component_name not in SINGLE_FILE_OPTIONAL_COMPONENTS: config_dict[component_name] = ['diffusers', component_value[0].__name__] elif is_scheduler_enum or is_scheduler: if is_scheduler_enum: config_dict[component_name] = ['diffusers', 'DDIMScheduler'] elif is_scheduler: config_dict[component_name] = ['diffusers', component_value[0].__name__] elif (is_transformers_model or is_transformers_tokenizer) and component_name not in SINGLE_FILE_OPTIONAL_COMPONENTS: config_dict[component_name] = ['transformers', component_value[0].__name__] else: config_dict[component_name] = [None, None] return config_dict def _infer_pipeline_config_dict(pipeline_class): parameters = inspect.signature(pipeline_class.__init__).parameters required_parameters = {k: v for (k, v) in parameters.items() if v.default == inspect._empty} component_types = pipeline_class._get_signature_types() component_types = {k: v for (k, v) in component_types.items() if k in required_parameters} config_dict = _map_component_types_to_config_dict(component_types) return config_dict def _download_diffusers_model_config_from_hub(pretrained_model_name_or_path, cache_dir, revision, proxies, force_download=None, local_files_only=None, token=None): allow_patterns = ['**/*.json', '*.json', '*.txt', '**/*.txt', '**/*.model'] cached_model_path = snapshot_download(pretrained_model_name_or_path, cache_dir=cache_dir, revision=revision, proxies=proxies, force_download=force_download, local_files_only=local_files_only, token=token, allow_patterns=allow_patterns) return cached_model_path class FromSingleFileMixin: @classmethod @validate_hf_hub_args def from_single_file(cls, pretrained_model_link_or_path, **kwargs): original_config_file = kwargs.pop('original_config_file', None) config = kwargs.pop('config', None) original_config = kwargs.pop('original_config', None) if original_config_file is not None: deprecation_message = '`original_config_file` argument is deprecated and will be removed in future versions.please use the `original_config` argument instead.' deprecate('original_config_file', '1.0.0', deprecation_message) original_config = original_config_file force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) token = kwargs.pop('token', None) cache_dir = kwargs.pop('cache_dir', None) local_files_only = kwargs.pop('local_files_only', False) revision = kwargs.pop('revision', None) torch_dtype = kwargs.pop('torch_dtype', None) is_legacy_loading = False scaling_factor = kwargs.get('scaling_factor', None) if scaling_factor is not None: deprecation_message = 'Passing the `scaling_factor` argument to `from_single_file is deprecated and will be ignored in future versions.' deprecate('scaling_factor', '1.0.0', deprecation_message) if original_config is not None: original_config = fetch_original_config(original_config, local_files_only=local_files_only) from ..pipelines.pipeline_utils import _get_pipeline_class pipeline_class = _get_pipeline_class(cls, config=None) checkpoint = load_single_file_checkpoint(pretrained_model_link_or_path, force_download=force_download, proxies=proxies, token=token, cache_dir=cache_dir, local_files_only=local_files_only, revision=revision) if config is None: config = fetch_diffusers_config(checkpoint) default_pretrained_model_config_name = config['pretrained_model_name_or_path'] else: default_pretrained_model_config_name = config if not os.path.isdir(default_pretrained_model_config_name): if default_pretrained_model_config_name.count('/') > 1: raise ValueError(f'The provided config "{config}" is neither a valid local path nor a valid repo id. Please check the parameter.') try: cached_model_config_path = _download_diffusers_model_config_from_hub(default_pretrained_model_config_name, cache_dir=cache_dir, revision=revision, proxies=proxies, force_download=force_download, local_files_only=local_files_only, token=token) config_dict = pipeline_class.load_config(cached_model_config_path) except LocalEntryNotFoundError: if original_config is None: logger.warning('`local_files_only` is True but no local configs were found for this checkpoint.\nAttempting to download the necessary config files for this pipeline.\n') cached_model_config_path = _download_diffusers_model_config_from_hub(default_pretrained_model_config_name, cache_dir=cache_dir, revision=revision, proxies=proxies, force_download=force_download, local_files_only=False, token=token) config_dict = pipeline_class.load_config(cached_model_config_path) else: logger.warning('Detected legacy `from_single_file` loading behavior. Attempting to create the pipeline based on inferred components.\nThis may lead to errors if the model components are not correctly inferred. \nTo avoid this warning, please explicity pass the `config` argument to `from_single_file` with a path to a local diffusers model repo \ne.g. `from_single_file(, config=) \nor run `from_single_file` with `local_files_only=False` first to update the local cache directory with the necessary config files.\n') is_legacy_loading = True cached_model_config_path = None config_dict = _infer_pipeline_config_dict(pipeline_class) config_dict['_class_name'] = pipeline_class.__name__ else: cached_model_config_path = default_pretrained_model_config_name config_dict = pipeline_class.load_config(cached_model_config_path) config_dict.pop('_ignore_files', None) (expected_modules, optional_kwargs) = pipeline_class._get_signature_keys(cls) passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} (init_dict, unused_kwargs, _) = pipeline_class.extract_init_dict(config_dict, **kwargs) init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict} init_kwargs = {**init_kwargs, **passed_pipe_kwargs} from diffusers import pipelines def load_module(name, value): if value[0] is None: return False if name in passed_class_obj and passed_class_obj[name] is None: return False if name in SINGLE_FILE_OPTIONAL_COMPONENTS: return False return True init_dict = {k: v for (k, v) in init_dict.items() if load_module(k, v)} for (name, (library_name, class_name)) in logging.tqdm(sorted(init_dict.items()), desc='Loading pipeline components...'): loaded_sub_model = None is_pipeline_module = hasattr(pipelines, library_name) if name in passed_class_obj: loaded_sub_model = passed_class_obj[name] else: try: loaded_sub_model = load_single_file_sub_model(library_name=library_name, class_name=class_name, name=name, checkpoint=checkpoint, is_pipeline_module=is_pipeline_module, cached_model_config_path=cached_model_config_path, pipelines=pipelines, torch_dtype=torch_dtype, original_config=original_config, local_files_only=local_files_only, is_legacy_loading=is_legacy_loading, **kwargs) except SingleFileComponentError as e: raise SingleFileComponentError(f"{e.message}\nPlease load the component before passing it in as an argument to `from_single_file`.\n\n{name} = {class_name}.from_pretrained('...')\npipe = {pipeline_class.__name__}.from_single_file(, {name}={name})\n\n") init_kwargs[name] = loaded_sub_model missing_modules = set(expected_modules) - set(init_kwargs.keys()) passed_modules = list(passed_class_obj.keys()) optional_modules = pipeline_class._optional_components if len(missing_modules) > 0 and missing_modules <= set(passed_modules + optional_modules): for module in missing_modules: init_kwargs[module] = passed_class_obj.get(module, None) elif len(missing_modules) > 0: passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs raise ValueError(f'Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed.') load_safety_checker = kwargs.pop('load_safety_checker', None) if load_safety_checker is not None: deprecation_message = 'Please pass instances of `StableDiffusionSafetyChecker` and `AutoImageProcessor`using the `safety_checker` and `feature_extractor` arguments in `from_single_file`' deprecate('load_safety_checker', '1.0.0', deprecation_message) safety_checker_components = _legacy_load_safety_checker(local_files_only, torch_dtype) init_kwargs.update(safety_checker_components) pipe = pipeline_class(**init_kwargs) return pipe # File: diffusers-main/src/diffusers/loaders/single_file_model.py import importlib import inspect import re from contextlib import nullcontext from typing import Optional from huggingface_hub.utils import validate_hf_hub_args from ..utils import deprecate, is_accelerate_available, logging from .single_file_utils import SingleFileComponentError, convert_animatediff_checkpoint_to_diffusers, convert_controlnet_checkpoint, convert_flux_transformer_checkpoint_to_diffusers, convert_ldm_unet_checkpoint, convert_ldm_vae_checkpoint, convert_sd3_transformer_checkpoint_to_diffusers, convert_stable_cascade_unet_single_file_to_diffusers, create_controlnet_diffusers_config_from_ldm, create_unet_diffusers_config_from_ldm, create_vae_diffusers_config_from_ldm, fetch_diffusers_config, fetch_original_config, load_single_file_checkpoint logger = logging.get_logger(__name__) if is_accelerate_available(): from accelerate import init_empty_weights from ..models.modeling_utils import load_model_dict_into_meta SINGLE_FILE_LOADABLE_CLASSES = {'StableCascadeUNet': {'checkpoint_mapping_fn': convert_stable_cascade_unet_single_file_to_diffusers}, 'UNet2DConditionModel': {'checkpoint_mapping_fn': convert_ldm_unet_checkpoint, 'config_mapping_fn': create_unet_diffusers_config_from_ldm, 'default_subfolder': 'unet', 'legacy_kwargs': {'num_in_channels': 'in_channels'}}, 'AutoencoderKL': {'checkpoint_mapping_fn': convert_ldm_vae_checkpoint, 'config_mapping_fn': create_vae_diffusers_config_from_ldm, 'default_subfolder': 'vae'}, 'ControlNetModel': {'checkpoint_mapping_fn': convert_controlnet_checkpoint, 'config_mapping_fn': create_controlnet_diffusers_config_from_ldm}, 'SD3Transformer2DModel': {'checkpoint_mapping_fn': convert_sd3_transformer_checkpoint_to_diffusers, 'default_subfolder': 'transformer'}, 'MotionAdapter': {'checkpoint_mapping_fn': convert_animatediff_checkpoint_to_diffusers}, 'SparseControlNetModel': {'checkpoint_mapping_fn': convert_animatediff_checkpoint_to_diffusers}, 'FluxTransformer2DModel': {'checkpoint_mapping_fn': convert_flux_transformer_checkpoint_to_diffusers, 'default_subfolder': 'transformer'}} def _get_single_file_loadable_mapping_class(cls): diffusers_module = importlib.import_module(__name__.split('.')[0]) for loadable_class_str in SINGLE_FILE_LOADABLE_CLASSES: loadable_class = getattr(diffusers_module, loadable_class_str) if issubclass(cls, loadable_class): return loadable_class_str return None def _get_mapping_function_kwargs(mapping_fn, **kwargs): parameters = inspect.signature(mapping_fn).parameters mapping_kwargs = {} for parameter in parameters: if parameter in kwargs: mapping_kwargs[parameter] = kwargs[parameter] return mapping_kwargs class FromOriginalModelMixin: @classmethod @validate_hf_hub_args def from_single_file(cls, pretrained_model_link_or_path_or_dict: Optional[str]=None, **kwargs): mapping_class_name = _get_single_file_loadable_mapping_class(cls) if mapping_class_name is None: raise ValueError(f"FromOriginalModelMixin is currently only compatible with {', '.join(SINGLE_FILE_LOADABLE_CLASSES.keys())}") pretrained_model_link_or_path = kwargs.get('pretrained_model_link_or_path', None) if pretrained_model_link_or_path is not None: deprecation_message = 'Please use `pretrained_model_link_or_path_or_dict` argument instead for model classes' deprecate('pretrained_model_link_or_path', '1.0.0', deprecation_message) pretrained_model_link_or_path_or_dict = pretrained_model_link_or_path config = kwargs.pop('config', None) original_config = kwargs.pop('original_config', None) if config is not None and original_config is not None: raise ValueError('`from_single_file` cannot accept both `config` and `original_config` arguments. Please provide only one of these arguments') force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) token = kwargs.pop('token', None) cache_dir = kwargs.pop('cache_dir', None) local_files_only = kwargs.pop('local_files_only', None) subfolder = kwargs.pop('subfolder', None) revision = kwargs.pop('revision', None) torch_dtype = kwargs.pop('torch_dtype', None) if isinstance(pretrained_model_link_or_path_or_dict, dict): checkpoint = pretrained_model_link_or_path_or_dict else: checkpoint = load_single_file_checkpoint(pretrained_model_link_or_path_or_dict, force_download=force_download, proxies=proxies, token=token, cache_dir=cache_dir, local_files_only=local_files_only, revision=revision) mapping_functions = SINGLE_FILE_LOADABLE_CLASSES[mapping_class_name] checkpoint_mapping_fn = mapping_functions['checkpoint_mapping_fn'] if original_config: if 'config_mapping_fn' in mapping_functions: config_mapping_fn = mapping_functions['config_mapping_fn'] else: config_mapping_fn = None if config_mapping_fn is None: raise ValueError(f'`original_config` has been provided for {mapping_class_name} but no mapping functionwas found to convert the original config to a Diffusers config in`diffusers.loaders.single_file_utils`') if isinstance(original_config, str): original_config = fetch_original_config(original_config, local_files_only=local_files_only) config_mapping_kwargs = _get_mapping_function_kwargs(config_mapping_fn, **kwargs) diffusers_model_config = config_mapping_fn(original_config=original_config, checkpoint=checkpoint, **config_mapping_kwargs) else: if config: if isinstance(config, str): default_pretrained_model_config_name = config else: raise ValueError('Invalid `config` argument. Please provide a string representing a repo idor path to a local Diffusers model repo.') else: config = fetch_diffusers_config(checkpoint) default_pretrained_model_config_name = config['pretrained_model_name_or_path'] if 'default_subfolder' in mapping_functions: subfolder = mapping_functions['default_subfolder'] subfolder = subfolder or config.pop('subfolder', None) diffusers_model_config = cls.load_config(pretrained_model_name_or_path=default_pretrained_model_config_name, subfolder=subfolder, local_files_only=local_files_only) (expected_kwargs, optional_kwargs) = cls._get_signature_keys(cls) if 'legacy_kwargs' in mapping_functions: legacy_kwargs = mapping_functions['legacy_kwargs'] for (legacy_key, new_key) in legacy_kwargs.items(): if legacy_key in kwargs: kwargs[new_key] = kwargs.pop(legacy_key) model_kwargs = {k: kwargs.get(k) for k in kwargs if k in expected_kwargs or k in optional_kwargs} diffusers_model_config.update(model_kwargs) checkpoint_mapping_kwargs = _get_mapping_function_kwargs(checkpoint_mapping_fn, **kwargs) diffusers_format_checkpoint = checkpoint_mapping_fn(config=diffusers_model_config, checkpoint=checkpoint, **checkpoint_mapping_kwargs) if not diffusers_format_checkpoint: raise SingleFileComponentError(f'Failed to load {mapping_class_name}. Weights for this component appear to be missing in the checkpoint.') ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): model = cls.from_config(diffusers_model_config) if is_accelerate_available(): unexpected_keys = load_model_dict_into_meta(model, diffusers_format_checkpoint, dtype=torch_dtype) else: (_, unexpected_keys) = model.load_state_dict(diffusers_format_checkpoint, strict=False) if model._keys_to_ignore_on_load_unexpected is not None: for pat in model._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning(f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}") if torch_dtype is not None: model.to(torch_dtype) model.eval() return model # File: diffusers-main/src/diffusers/loaders/single_file_utils.py """""" import os import re from contextlib import nullcontext from io import BytesIO from urllib.parse import urlparse import requests import torch import yaml from ..models.modeling_utils import load_state_dict from ..schedulers import DDIMScheduler, DPMSolverMultistepScheduler, EDMDPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler from ..utils import SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, deprecate, is_accelerate_available, is_transformers_available, logging from ..utils.hub_utils import _get_model_file if is_transformers_available(): from transformers import AutoImageProcessor if is_accelerate_available(): from accelerate import init_empty_weights from ..models.modeling_utils import load_model_dict_into_meta logger = logging.get_logger(__name__) CHECKPOINT_KEY_NAMES = {'v2': 'model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight', 'xl_base': 'conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias', 'xl_refiner': 'conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias', 'upscale': 'model.diffusion_model.input_blocks.10.0.skip_connection.bias', 'controlnet': 'control_model.time_embed.0.weight', 'playground-v2-5': 'edm_mean', 'inpainting': 'model.diffusion_model.input_blocks.0.0.weight', 'clip': 'cond_stage_model.transformer.text_model.embeddings.position_embedding.weight', 'clip_sdxl': 'conditioner.embedders.0.transformer.text_model.embeddings.position_embedding.weight', 'clip_sd3': 'text_encoders.clip_l.transformer.text_model.embeddings.position_embedding.weight', 'open_clip': 'cond_stage_model.model.token_embedding.weight', 'open_clip_sdxl': 'conditioner.embedders.1.model.positional_embedding', 'open_clip_sdxl_refiner': 'conditioner.embedders.0.model.text_projection', 'open_clip_sd3': 'text_encoders.clip_g.transformer.text_model.embeddings.position_embedding.weight', 'stable_cascade_stage_b': 'down_blocks.1.0.channelwise.0.weight', 'stable_cascade_stage_c': 'clip_txt_mapper.weight', 'sd3': 'model.diffusion_model.joint_blocks.0.context_block.adaLN_modulation.1.bias', 'animatediff': 'down_blocks.0.motion_modules.0.temporal_transformer.transformer_blocks.0.attention_blocks.0.pos_encoder.pe', 'animatediff_v2': 'mid_block.motion_modules.0.temporal_transformer.norm.bias', 'animatediff_sdxl_beta': 'up_blocks.2.motion_modules.0.temporal_transformer.norm.weight', 'animatediff_scribble': 'controlnet_cond_embedding.conv_in.weight', 'animatediff_rgb': 'controlnet_cond_embedding.weight', 'flux': ['double_blocks.0.img_attn.norm.key_norm.scale', 'model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale']} DIFFUSERS_DEFAULT_PIPELINE_PATHS = {'xl_base': {'pretrained_model_name_or_path': 'stabilityai/stable-diffusion-xl-base-1.0'}, 'xl_refiner': {'pretrained_model_name_or_path': 'stabilityai/stable-diffusion-xl-refiner-1.0'}, 'xl_inpaint': {'pretrained_model_name_or_path': 'diffusers/stable-diffusion-xl-1.0-inpainting-0.1'}, 'playground-v2-5': {'pretrained_model_name_or_path': 'playgroundai/playground-v2.5-1024px-aesthetic'}, 'upscale': {'pretrained_model_name_or_path': 'stabilityai/stable-diffusion-x4-upscaler'}, 'inpainting': {'pretrained_model_name_or_path': 'Lykon/dreamshaper-8-inpainting'}, 'inpainting_v2': {'pretrained_model_name_or_path': 'stabilityai/stable-diffusion-2-inpainting'}, 'controlnet': {'pretrained_model_name_or_path': 'lllyasviel/control_v11p_sd15_canny'}, 'v2': {'pretrained_model_name_or_path': 'stabilityai/stable-diffusion-2-1'}, 'v1': {'pretrained_model_name_or_path': 'Lykon/dreamshaper-8'}, 'stable_cascade_stage_b': {'pretrained_model_name_or_path': 'stabilityai/stable-cascade', 'subfolder': 'decoder'}, 'stable_cascade_stage_b_lite': {'pretrained_model_name_or_path': 'stabilityai/stable-cascade', 'subfolder': 'decoder_lite'}, 'stable_cascade_stage_c': {'pretrained_model_name_or_path': 'stabilityai/stable-cascade-prior', 'subfolder': 'prior'}, 'stable_cascade_stage_c_lite': {'pretrained_model_name_or_path': 'stabilityai/stable-cascade-prior', 'subfolder': 'prior_lite'}, 'sd3': {'pretrained_model_name_or_path': 'stabilityai/stable-diffusion-3-medium-diffusers'}, 'animatediff_v1': {'pretrained_model_name_or_path': 'guoyww/animatediff-motion-adapter-v1-5'}, 'animatediff_v2': {'pretrained_model_name_or_path': 'guoyww/animatediff-motion-adapter-v1-5-2'}, 'animatediff_v3': {'pretrained_model_name_or_path': 'guoyww/animatediff-motion-adapter-v1-5-3'}, 'animatediff_sdxl_beta': {'pretrained_model_name_or_path': 'guoyww/animatediff-motion-adapter-sdxl-beta'}, 'animatediff_scribble': {'pretrained_model_name_or_path': 'guoyww/animatediff-sparsectrl-scribble'}, 'animatediff_rgb': {'pretrained_model_name_or_path': 'guoyww/animatediff-sparsectrl-rgb'}, 'flux-dev': {'pretrained_model_name_or_path': 'black-forest-labs/FLUX.1-dev'}, 'flux-schnell': {'pretrained_model_name_or_path': 'black-forest-labs/FLUX.1-schnell'}} DIFFUSERS_TO_LDM_DEFAULT_IMAGE_SIZE_MAP = {'xl_base': 1024, 'xl_refiner': 1024, 'xl_inpaint': 1024, 'playground-v2-5': 1024, 'upscale': 512, 'inpainting': 512, 'inpainting_v2': 512, 'controlnet': 512, 'v2': 768, 'v1': 512} DIFFUSERS_TO_LDM_MAPPING = {'unet': {'layers': {'time_embedding.linear_1.weight': 'time_embed.0.weight', 'time_embedding.linear_1.bias': 'time_embed.0.bias', 'time_embedding.linear_2.weight': 'time_embed.2.weight', 'time_embedding.linear_2.bias': 'time_embed.2.bias', 'conv_in.weight': 'input_blocks.0.0.weight', 'conv_in.bias': 'input_blocks.0.0.bias', 'conv_norm_out.weight': 'out.0.weight', 'conv_norm_out.bias': 'out.0.bias', 'conv_out.weight': 'out.2.weight', 'conv_out.bias': 'out.2.bias'}, 'class_embed_type': {'class_embedding.linear_1.weight': 'label_emb.0.0.weight', 'class_embedding.linear_1.bias': 'label_emb.0.0.bias', 'class_embedding.linear_2.weight': 'label_emb.0.2.weight', 'class_embedding.linear_2.bias': 'label_emb.0.2.bias'}, 'addition_embed_type': {'add_embedding.linear_1.weight': 'label_emb.0.0.weight', 'add_embedding.linear_1.bias': 'label_emb.0.0.bias', 'add_embedding.linear_2.weight': 'label_emb.0.2.weight', 'add_embedding.linear_2.bias': 'label_emb.0.2.bias'}}, 'controlnet': {'layers': {'time_embedding.linear_1.weight': 'time_embed.0.weight', 'time_embedding.linear_1.bias': 'time_embed.0.bias', 'time_embedding.linear_2.weight': 'time_embed.2.weight', 'time_embedding.linear_2.bias': 'time_embed.2.bias', 'conv_in.weight': 'input_blocks.0.0.weight', 'conv_in.bias': 'input_blocks.0.0.bias', 'controlnet_cond_embedding.conv_in.weight': 'input_hint_block.0.weight', 'controlnet_cond_embedding.conv_in.bias': 'input_hint_block.0.bias', 'controlnet_cond_embedding.conv_out.weight': 'input_hint_block.14.weight', 'controlnet_cond_embedding.conv_out.bias': 'input_hint_block.14.bias'}, 'class_embed_type': {'class_embedding.linear_1.weight': 'label_emb.0.0.weight', 'class_embedding.linear_1.bias': 'label_emb.0.0.bias', 'class_embedding.linear_2.weight': 'label_emb.0.2.weight', 'class_embedding.linear_2.bias': 'label_emb.0.2.bias'}, 'addition_embed_type': {'add_embedding.linear_1.weight': 'label_emb.0.0.weight', 'add_embedding.linear_1.bias': 'label_emb.0.0.bias', 'add_embedding.linear_2.weight': 'label_emb.0.2.weight', 'add_embedding.linear_2.bias': 'label_emb.0.2.bias'}}, 'vae': {'encoder.conv_in.weight': 'encoder.conv_in.weight', 'encoder.conv_in.bias': 'encoder.conv_in.bias', 'encoder.conv_out.weight': 'encoder.conv_out.weight', 'encoder.conv_out.bias': 'encoder.conv_out.bias', 'encoder.conv_norm_out.weight': 'encoder.norm_out.weight', 'encoder.conv_norm_out.bias': 'encoder.norm_out.bias', 'decoder.conv_in.weight': 'decoder.conv_in.weight', 'decoder.conv_in.bias': 'decoder.conv_in.bias', 'decoder.conv_out.weight': 'decoder.conv_out.weight', 'decoder.conv_out.bias': 'decoder.conv_out.bias', 'decoder.conv_norm_out.weight': 'decoder.norm_out.weight', 'decoder.conv_norm_out.bias': 'decoder.norm_out.bias', 'quant_conv.weight': 'quant_conv.weight', 'quant_conv.bias': 'quant_conv.bias', 'post_quant_conv.weight': 'post_quant_conv.weight', 'post_quant_conv.bias': 'post_quant_conv.bias'}, 'openclip': {'layers': {'text_model.embeddings.position_embedding.weight': 'positional_embedding', 'text_model.embeddings.token_embedding.weight': 'token_embedding.weight', 'text_model.final_layer_norm.weight': 'ln_final.weight', 'text_model.final_layer_norm.bias': 'ln_final.bias', 'text_projection.weight': 'text_projection'}, 'transformer': {'text_model.encoder.layers.': 'resblocks.', 'layer_norm1': 'ln_1', 'layer_norm2': 'ln_2', '.fc1.': '.c_fc.', '.fc2.': '.c_proj.', '.self_attn': '.attn', 'transformer.text_model.final_layer_norm.': 'ln_final.', 'transformer.text_model.embeddings.token_embedding.weight': 'token_embedding.weight', 'transformer.text_model.embeddings.position_embedding.weight': 'positional_embedding'}}} SD_2_TEXT_ENCODER_KEYS_TO_IGNORE = ['cond_stage_model.model.transformer.resblocks.23.attn.in_proj_bias', 'cond_stage_model.model.transformer.resblocks.23.attn.in_proj_weight', 'cond_stage_model.model.transformer.resblocks.23.attn.out_proj.bias', 'cond_stage_model.model.transformer.resblocks.23.attn.out_proj.weight', 'cond_stage_model.model.transformer.resblocks.23.ln_1.bias', 'cond_stage_model.model.transformer.resblocks.23.ln_1.weight', 'cond_stage_model.model.transformer.resblocks.23.ln_2.bias', 'cond_stage_model.model.transformer.resblocks.23.ln_2.weight', 'cond_stage_model.model.transformer.resblocks.23.mlp.c_fc.bias', 'cond_stage_model.model.transformer.resblocks.23.mlp.c_fc.weight', 'cond_stage_model.model.transformer.resblocks.23.mlp.c_proj.bias', 'cond_stage_model.model.transformer.resblocks.23.mlp.c_proj.weight', 'cond_stage_model.model.text_projection'] SCHEDULER_DEFAULT_CONFIG = {'beta_schedule': 'scaled_linear', 'beta_start': 0.00085, 'beta_end': 0.012, 'interpolation_type': 'linear', 'num_train_timesteps': 1000, 'prediction_type': 'epsilon', 'sample_max_value': 1.0, 'set_alpha_to_one': False, 'skip_prk_steps': True, 'steps_offset': 1, 'timestep_spacing': 'leading'} LDM_VAE_KEYS = ['first_stage_model.', 'vae.'] LDM_VAE_DEFAULT_SCALING_FACTOR = 0.18215 PLAYGROUND_VAE_SCALING_FACTOR = 0.5 LDM_UNET_KEY = 'model.diffusion_model.' LDM_CONTROLNET_KEY = 'control_model.' LDM_CLIP_PREFIX_TO_REMOVE = ['cond_stage_model.transformer.', 'conditioner.embedders.0.transformer.'] LDM_OPEN_CLIP_TEXT_PROJECTION_DIM = 1024 SCHEDULER_LEGACY_KWARGS = ['prediction_type', 'scheduler_type'] VALID_URL_PREFIXES = ['https://huggingface.co/', 'huggingface.co/', 'hf.co/', 'https://hf.co/'] class SingleFileComponentError(Exception): def __init__(self, message=None): self.message = message super().__init__(self.message) def is_valid_url(url): result = urlparse(url) if result.scheme and result.netloc: return True return False def _extract_repo_id_and_weights_name(pretrained_model_name_or_path): if not is_valid_url(pretrained_model_name_or_path): raise ValueError('Invalid `pretrained_model_name_or_path` provided. Please set it to a valid URL.') pattern = '([^/]+)/([^/]+)/(?:blob/main/)?(.+)' weights_name = None repo_id = (None,) for prefix in VALID_URL_PREFIXES: pretrained_model_name_or_path = pretrained_model_name_or_path.replace(prefix, '') match = re.match(pattern, pretrained_model_name_or_path) if not match: logger.warning('Unable to identify the repo_id and weights_name from the provided URL.') return (repo_id, weights_name) repo_id = f'{match.group(1)}/{match.group(2)}' weights_name = match.group(3) return (repo_id, weights_name) def _is_model_weights_in_cached_folder(cached_folder, name): pretrained_model_name_or_path = os.path.join(cached_folder, name) weights_exist = False for weights_name in [WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME]: if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)): weights_exist = True return weights_exist def _is_legacy_scheduler_kwargs(kwargs): return any((k in SCHEDULER_LEGACY_KWARGS for k in kwargs.keys())) def load_single_file_checkpoint(pretrained_model_link_or_path, force_download=False, proxies=None, token=None, cache_dir=None, local_files_only=None, revision=None): if os.path.isfile(pretrained_model_link_or_path): pretrained_model_link_or_path = pretrained_model_link_or_path else: (repo_id, weights_name) = _extract_repo_id_and_weights_name(pretrained_model_link_or_path) pretrained_model_link_or_path = _get_model_file(repo_id, weights_name=weights_name, force_download=force_download, cache_dir=cache_dir, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision) checkpoint = load_state_dict(pretrained_model_link_or_path) while 'state_dict' in checkpoint: checkpoint = checkpoint['state_dict'] return checkpoint def fetch_original_config(original_config_file, local_files_only=False): if os.path.isfile(original_config_file): with open(original_config_file, 'r') as fp: original_config_file = fp.read() elif is_valid_url(original_config_file): if local_files_only: raise ValueError('`local_files_only` is set to True, but a URL was provided as `original_config_file`. Please provide a valid local file path.') original_config_file = BytesIO(requests.get(original_config_file).content) else: raise ValueError('Invalid `original_config_file` provided. Please set it to a valid file path or URL.') original_config = yaml.safe_load(original_config_file) return original_config def is_clip_model(checkpoint): if CHECKPOINT_KEY_NAMES['clip'] in checkpoint: return True return False def is_clip_sdxl_model(checkpoint): if CHECKPOINT_KEY_NAMES['clip_sdxl'] in checkpoint: return True return False def is_clip_sd3_model(checkpoint): if CHECKPOINT_KEY_NAMES['clip_sd3'] in checkpoint: return True return False def is_open_clip_model(checkpoint): if CHECKPOINT_KEY_NAMES['open_clip'] in checkpoint: return True return False def is_open_clip_sdxl_model(checkpoint): if CHECKPOINT_KEY_NAMES['open_clip_sdxl'] in checkpoint: return True return False def is_open_clip_sd3_model(checkpoint): if CHECKPOINT_KEY_NAMES['open_clip_sd3'] in checkpoint: return True return False def is_open_clip_sdxl_refiner_model(checkpoint): if CHECKPOINT_KEY_NAMES['open_clip_sdxl_refiner'] in checkpoint: return True return False def is_clip_model_in_single_file(class_obj, checkpoint): is_clip_in_checkpoint = any([is_clip_model(checkpoint), is_clip_sd3_model(checkpoint), is_open_clip_model(checkpoint), is_open_clip_sdxl_model(checkpoint), is_open_clip_sdxl_refiner_model(checkpoint), is_open_clip_sd3_model(checkpoint)]) if (class_obj.__name__ == 'CLIPTextModel' or class_obj.__name__ == 'CLIPTextModelWithProjection') and is_clip_in_checkpoint: return True return False def infer_diffusers_model_type(checkpoint): if CHECKPOINT_KEY_NAMES['inpainting'] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES['inpainting']].shape[1] == 9: if CHECKPOINT_KEY_NAMES['v2'] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES['v2']].shape[-1] == 1024: model_type = 'inpainting_v2' elif CHECKPOINT_KEY_NAMES['xl_base'] in checkpoint: model_type = 'xl_inpaint' else: model_type = 'inpainting' elif CHECKPOINT_KEY_NAMES['v2'] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES['v2']].shape[-1] == 1024: model_type = 'v2' elif CHECKPOINT_KEY_NAMES['playground-v2-5'] in checkpoint: model_type = 'playground-v2-5' elif CHECKPOINT_KEY_NAMES['xl_base'] in checkpoint: model_type = 'xl_base' elif CHECKPOINT_KEY_NAMES['xl_refiner'] in checkpoint: model_type = 'xl_refiner' elif CHECKPOINT_KEY_NAMES['upscale'] in checkpoint: model_type = 'upscale' elif CHECKPOINT_KEY_NAMES['controlnet'] in checkpoint: model_type = 'controlnet' elif CHECKPOINT_KEY_NAMES['stable_cascade_stage_c'] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES['stable_cascade_stage_c']].shape[0] == 1536: model_type = 'stable_cascade_stage_c_lite' elif CHECKPOINT_KEY_NAMES['stable_cascade_stage_c'] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES['stable_cascade_stage_c']].shape[0] == 2048: model_type = 'stable_cascade_stage_c' elif CHECKPOINT_KEY_NAMES['stable_cascade_stage_b'] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES['stable_cascade_stage_b']].shape[-1] == 576: model_type = 'stable_cascade_stage_b_lite' elif CHECKPOINT_KEY_NAMES['stable_cascade_stage_b'] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES['stable_cascade_stage_b']].shape[-1] == 640: model_type = 'stable_cascade_stage_b' elif CHECKPOINT_KEY_NAMES['sd3'] in checkpoint: model_type = 'sd3' elif CHECKPOINT_KEY_NAMES['animatediff'] in checkpoint: if CHECKPOINT_KEY_NAMES['animatediff_scribble'] in checkpoint: model_type = 'animatediff_scribble' elif CHECKPOINT_KEY_NAMES['animatediff_rgb'] in checkpoint: model_type = 'animatediff_rgb' elif CHECKPOINT_KEY_NAMES['animatediff_v2'] in checkpoint: model_type = 'animatediff_v2' elif checkpoint[CHECKPOINT_KEY_NAMES['animatediff_sdxl_beta']].shape[-1] == 320: model_type = 'animatediff_sdxl_beta' elif checkpoint[CHECKPOINT_KEY_NAMES['animatediff']].shape[1] == 24: model_type = 'animatediff_v1' else: model_type = 'animatediff_v3' elif any((key in checkpoint for key in CHECKPOINT_KEY_NAMES['flux'])): if any((g in checkpoint for g in ['guidance_in.in_layer.bias', 'model.diffusion_model.guidance_in.in_layer.bias'])): model_type = 'flux-dev' else: model_type = 'flux-schnell' else: model_type = 'v1' return model_type def fetch_diffusers_config(checkpoint): model_type = infer_diffusers_model_type(checkpoint) model_path = DIFFUSERS_DEFAULT_PIPELINE_PATHS[model_type] return model_path def set_image_size(checkpoint, image_size=None): if image_size: return image_size model_type = infer_diffusers_model_type(checkpoint) image_size = DIFFUSERS_TO_LDM_DEFAULT_IMAGE_SIZE_MAP[model_type] return image_size def conv_attn_to_linear(checkpoint): keys = list(checkpoint.keys()) attn_keys = ['query.weight', 'key.weight', 'value.weight'] for key in keys: if '.'.join(key.split('.')[-2:]) in attn_keys: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0, 0] elif 'proj_attn.weight' in key: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0] def create_unet_diffusers_config_from_ldm(original_config, checkpoint, image_size=None, upcast_attention=None, num_in_channels=None): if image_size is not None: deprecation_message = 'Configuring UNet2DConditionModel with the `image_size` argument to `from_single_file`is deprecated and will be ignored in future versions.' deprecate('image_size', '1.0.0', deprecation_message) image_size = set_image_size(checkpoint, image_size=image_size) if 'unet_config' in original_config['model']['params'] and original_config['model']['params']['unet_config'] is not None: unet_params = original_config['model']['params']['unet_config']['params'] else: unet_params = original_config['model']['params']['network_config']['params'] if num_in_channels is not None: deprecation_message = 'Configuring UNet2DConditionModel with the `num_in_channels` argument to `from_single_file`is deprecated and will be ignored in future versions.' deprecate('image_size', '1.0.0', deprecation_message) in_channels = num_in_channels else: in_channels = unet_params['in_channels'] vae_params = original_config['model']['params']['first_stage_config']['params']['ddconfig'] block_out_channels = [unet_params['model_channels'] * mult for mult in unet_params['channel_mult']] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = 'CrossAttnDownBlock2D' if resolution in unet_params['attention_resolutions'] else 'DownBlock2D' down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = 'CrossAttnUpBlock2D' if resolution in unet_params['attention_resolutions'] else 'UpBlock2D' up_block_types.append(block_type) resolution //= 2 if unet_params['transformer_depth'] is not None: transformer_layers_per_block = unet_params['transformer_depth'] if isinstance(unet_params['transformer_depth'], int) else list(unet_params['transformer_depth']) else: transformer_layers_per_block = 1 vae_scale_factor = 2 ** (len(vae_params['ch_mult']) - 1) head_dim = unet_params['num_heads'] if 'num_heads' in unet_params else None use_linear_projection = unet_params['use_linear_in_transformer'] if 'use_linear_in_transformer' in unet_params else False if use_linear_projection: if head_dim is None: head_dim_mult = unet_params['model_channels'] // unet_params['num_head_channels'] head_dim = [head_dim_mult * c for c in list(unet_params['channel_mult'])] class_embed_type = None addition_embed_type = None addition_time_embed_dim = None projection_class_embeddings_input_dim = None context_dim = None if unet_params['context_dim'] is not None: context_dim = unet_params['context_dim'] if isinstance(unet_params['context_dim'], int) else unet_params['context_dim'][0] if 'num_classes' in unet_params: if unet_params['num_classes'] == 'sequential': if context_dim in [2048, 1280]: addition_embed_type = 'text_time' addition_time_embed_dim = 256 else: class_embed_type = 'projection' assert 'adm_in_channels' in unet_params projection_class_embeddings_input_dim = unet_params['adm_in_channels'] config = {'sample_size': image_size // vae_scale_factor, 'in_channels': in_channels, 'down_block_types': down_block_types, 'block_out_channels': block_out_channels, 'layers_per_block': unet_params['num_res_blocks'], 'cross_attention_dim': context_dim, 'attention_head_dim': head_dim, 'use_linear_projection': use_linear_projection, 'class_embed_type': class_embed_type, 'addition_embed_type': addition_embed_type, 'addition_time_embed_dim': addition_time_embed_dim, 'projection_class_embeddings_input_dim': projection_class_embeddings_input_dim, 'transformer_layers_per_block': transformer_layers_per_block} if upcast_attention is not None: deprecation_message = 'Configuring UNet2DConditionModel with the `upcast_attention` argument to `from_single_file`is deprecated and will be ignored in future versions.' deprecate('image_size', '1.0.0', deprecation_message) config['upcast_attention'] = upcast_attention if 'disable_self_attentions' in unet_params: config['only_cross_attention'] = unet_params['disable_self_attentions'] if 'num_classes' in unet_params and isinstance(unet_params['num_classes'], int): config['num_class_embeds'] = unet_params['num_classes'] config['out_channels'] = unet_params['out_channels'] config['up_block_types'] = up_block_types return config def create_controlnet_diffusers_config_from_ldm(original_config, checkpoint, image_size=None, **kwargs): if image_size is not None: deprecation_message = 'Configuring ControlNetModel with the `image_size` argumentis deprecated and will be ignored in future versions.' deprecate('image_size', '1.0.0', deprecation_message) image_size = set_image_size(checkpoint, image_size=image_size) unet_params = original_config['model']['params']['control_stage_config']['params'] diffusers_unet_config = create_unet_diffusers_config_from_ldm(original_config, image_size=image_size) controlnet_config = {'conditioning_channels': unet_params['hint_channels'], 'in_channels': diffusers_unet_config['in_channels'], 'down_block_types': diffusers_unet_config['down_block_types'], 'block_out_channels': diffusers_unet_config['block_out_channels'], 'layers_per_block': diffusers_unet_config['layers_per_block'], 'cross_attention_dim': diffusers_unet_config['cross_attention_dim'], 'attention_head_dim': diffusers_unet_config['attention_head_dim'], 'use_linear_projection': diffusers_unet_config['use_linear_projection'], 'class_embed_type': diffusers_unet_config['class_embed_type'], 'addition_embed_type': diffusers_unet_config['addition_embed_type'], 'addition_time_embed_dim': diffusers_unet_config['addition_time_embed_dim'], 'projection_class_embeddings_input_dim': diffusers_unet_config['projection_class_embeddings_input_dim'], 'transformer_layers_per_block': diffusers_unet_config['transformer_layers_per_block']} return controlnet_config def create_vae_diffusers_config_from_ldm(original_config, checkpoint, image_size=None, scaling_factor=None): if image_size is not None: deprecation_message = 'Configuring AutoencoderKL with the `image_size` argumentis deprecated and will be ignored in future versions.' deprecate('image_size', '1.0.0', deprecation_message) image_size = set_image_size(checkpoint, image_size=image_size) if 'edm_mean' in checkpoint and 'edm_std' in checkpoint: latents_mean = checkpoint['edm_mean'] latents_std = checkpoint['edm_std'] else: latents_mean = None latents_std = None vae_params = original_config['model']['params']['first_stage_config']['params']['ddconfig'] if scaling_factor is None and latents_mean is not None and (latents_std is not None): scaling_factor = PLAYGROUND_VAE_SCALING_FACTOR elif scaling_factor is None and 'scale_factor' in original_config['model']['params']: scaling_factor = original_config['model']['params']['scale_factor'] elif scaling_factor is None: scaling_factor = LDM_VAE_DEFAULT_SCALING_FACTOR block_out_channels = [vae_params['ch'] * mult for mult in vae_params['ch_mult']] down_block_types = ['DownEncoderBlock2D'] * len(block_out_channels) up_block_types = ['UpDecoderBlock2D'] * len(block_out_channels) config = {'sample_size': image_size, 'in_channels': vae_params['in_channels'], 'out_channels': vae_params['out_ch'], 'down_block_types': down_block_types, 'up_block_types': up_block_types, 'block_out_channels': block_out_channels, 'latent_channels': vae_params['z_channels'], 'layers_per_block': vae_params['num_res_blocks'], 'scaling_factor': scaling_factor} if latents_mean is not None and latents_std is not None: config.update({'latents_mean': latents_mean, 'latents_std': latents_std}) return config def update_unet_resnet_ldm_to_diffusers(ldm_keys, new_checkpoint, checkpoint, mapping=None): for ldm_key in ldm_keys: diffusers_key = ldm_key.replace('in_layers.0', 'norm1').replace('in_layers.2', 'conv1').replace('out_layers.0', 'norm2').replace('out_layers.3', 'conv2').replace('emb_layers.1', 'time_emb_proj').replace('skip_connection', 'conv_shortcut') if mapping: diffusers_key = diffusers_key.replace(mapping['old'], mapping['new']) new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) def update_unet_attention_ldm_to_diffusers(ldm_keys, new_checkpoint, checkpoint, mapping): for ldm_key in ldm_keys: diffusers_key = ldm_key.replace(mapping['old'], mapping['new']) new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) def update_vae_resnet_ldm_to_diffusers(keys, new_checkpoint, checkpoint, mapping): for ldm_key in keys: diffusers_key = ldm_key.replace(mapping['old'], mapping['new']).replace('nin_shortcut', 'conv_shortcut') new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) def update_vae_attentions_ldm_to_diffusers(keys, new_checkpoint, checkpoint, mapping): for ldm_key in keys: diffusers_key = ldm_key.replace(mapping['old'], mapping['new']).replace('norm.weight', 'group_norm.weight').replace('norm.bias', 'group_norm.bias').replace('q.weight', 'to_q.weight').replace('q.bias', 'to_q.bias').replace('k.weight', 'to_k.weight').replace('k.bias', 'to_k.bias').replace('v.weight', 'to_v.weight').replace('v.bias', 'to_v.bias').replace('proj_out.weight', 'to_out.0.weight').replace('proj_out.bias', 'to_out.0.bias') new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) shape = new_checkpoint[diffusers_key].shape if len(shape) == 3: new_checkpoint[diffusers_key] = new_checkpoint[diffusers_key][:, :, 0] elif len(shape) == 4: new_checkpoint[diffusers_key] = new_checkpoint[diffusers_key][:, :, 0, 0] def convert_stable_cascade_unet_single_file_to_diffusers(checkpoint, **kwargs): is_stage_c = 'clip_txt_mapper.weight' in checkpoint if is_stage_c: state_dict = {} for key in checkpoint.keys(): if key.endswith('in_proj_weight'): weights = checkpoint[key].chunk(3, 0) state_dict[key.replace('attn.in_proj_weight', 'to_q.weight')] = weights[0] state_dict[key.replace('attn.in_proj_weight', 'to_k.weight')] = weights[1] state_dict[key.replace('attn.in_proj_weight', 'to_v.weight')] = weights[2] elif key.endswith('in_proj_bias'): weights = checkpoint[key].chunk(3, 0) state_dict[key.replace('attn.in_proj_bias', 'to_q.bias')] = weights[0] state_dict[key.replace('attn.in_proj_bias', 'to_k.bias')] = weights[1] state_dict[key.replace('attn.in_proj_bias', 'to_v.bias')] = weights[2] elif key.endswith('out_proj.weight'): weights = checkpoint[key] state_dict[key.replace('attn.out_proj.weight', 'to_out.0.weight')] = weights elif key.endswith('out_proj.bias'): weights = checkpoint[key] state_dict[key.replace('attn.out_proj.bias', 'to_out.0.bias')] = weights else: state_dict[key] = checkpoint[key] else: state_dict = {} for key in checkpoint.keys(): if key.endswith('in_proj_weight'): weights = checkpoint[key].chunk(3, 0) state_dict[key.replace('attn.in_proj_weight', 'to_q.weight')] = weights[0] state_dict[key.replace('attn.in_proj_weight', 'to_k.weight')] = weights[1] state_dict[key.replace('attn.in_proj_weight', 'to_v.weight')] = weights[2] elif key.endswith('in_proj_bias'): weights = checkpoint[key].chunk(3, 0) state_dict[key.replace('attn.in_proj_bias', 'to_q.bias')] = weights[0] state_dict[key.replace('attn.in_proj_bias', 'to_k.bias')] = weights[1] state_dict[key.replace('attn.in_proj_bias', 'to_v.bias')] = weights[2] elif key.endswith('out_proj.weight'): weights = checkpoint[key] state_dict[key.replace('attn.out_proj.weight', 'to_out.0.weight')] = weights elif key.endswith('out_proj.bias'): weights = checkpoint[key] state_dict[key.replace('attn.out_proj.bias', 'to_out.0.bias')] = weights elif key.endswith('clip_mapper.weight'): weights = checkpoint[key] state_dict[key.replace('clip_mapper.weight', 'clip_txt_pooled_mapper.weight')] = weights elif key.endswith('clip_mapper.bias'): weights = checkpoint[key] state_dict[key.replace('clip_mapper.bias', 'clip_txt_pooled_mapper.bias')] = weights else: state_dict[key] = checkpoint[key] return state_dict def convert_ldm_unet_checkpoint(checkpoint, config, extract_ema=False, **kwargs): unet_state_dict = {} keys = list(checkpoint.keys()) unet_key = LDM_UNET_KEY if sum((k.startswith('model_ema') for k in keys)) > 100 and extract_ema: logger.warning('Checkpoint has both EMA and non-EMA weights.') logger.warning('In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.') for key in keys: if key.startswith('model.diffusion_model'): flat_ema_key = 'model_ema.' + ''.join(key.split('.')[1:]) unet_state_dict[key.replace(unet_key, '')] = checkpoint.get(flat_ema_key) else: if sum((k.startswith('model_ema') for k in keys)) > 100: logger.warning('In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA weights (usually better for inference), please make sure to add the `--extract_ema` flag.') for key in keys: if key.startswith(unet_key): unet_state_dict[key.replace(unet_key, '')] = checkpoint.get(key) new_checkpoint = {} ldm_unet_keys = DIFFUSERS_TO_LDM_MAPPING['unet']['layers'] for (diffusers_key, ldm_key) in ldm_unet_keys.items(): if ldm_key not in unet_state_dict: continue new_checkpoint[diffusers_key] = unet_state_dict[ldm_key] if 'class_embed_type' in config and config['class_embed_type'] in ['timestep', 'projection']: class_embed_keys = DIFFUSERS_TO_LDM_MAPPING['unet']['class_embed_type'] for (diffusers_key, ldm_key) in class_embed_keys.items(): new_checkpoint[diffusers_key] = unet_state_dict[ldm_key] if 'addition_embed_type' in config and config['addition_embed_type'] == 'text_time': addition_embed_keys = DIFFUSERS_TO_LDM_MAPPING['unet']['addition_embed_type'] for (diffusers_key, ldm_key) in addition_embed_keys.items(): new_checkpoint[diffusers_key] = unet_state_dict[ldm_key] if 'num_class_embeds' in config: if config['num_class_embeds'] is not None and 'label_emb.weight' in unet_state_dict: new_checkpoint['class_embedding.weight'] = unet_state_dict['label_emb.weight'] num_input_blocks = len({'.'.join(layer.split('.')[:2]) for layer in unet_state_dict if 'input_blocks' in layer}) input_blocks = {layer_id: [key for key in unet_state_dict if f'input_blocks.{layer_id}' in key] for layer_id in range(num_input_blocks)} num_middle_blocks = len({'.'.join(layer.split('.')[:2]) for layer in unet_state_dict if 'middle_block' in layer}) middle_blocks = {layer_id: [key for key in unet_state_dict if f'middle_block.{layer_id}' in key] for layer_id in range(num_middle_blocks)} num_output_blocks = len({'.'.join(layer.split('.')[:2]) for layer in unet_state_dict if 'output_blocks' in layer}) output_blocks = {layer_id: [key for key in unet_state_dict if f'output_blocks.{layer_id}' in key] for layer_id in range(num_output_blocks)} for i in range(1, num_input_blocks): block_id = (i - 1) // (config['layers_per_block'] + 1) layer_in_block_id = (i - 1) % (config['layers_per_block'] + 1) resnets = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key and f'input_blocks.{i}.0.op' not in key] update_unet_resnet_ldm_to_diffusers(resnets, new_checkpoint, unet_state_dict, {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}) if f'input_blocks.{i}.0.op.weight' in unet_state_dict: new_checkpoint[f'down_blocks.{block_id}.downsamplers.0.conv.weight'] = unet_state_dict.get(f'input_blocks.{i}.0.op.weight') new_checkpoint[f'down_blocks.{block_id}.downsamplers.0.conv.bias'] = unet_state_dict.get(f'input_blocks.{i}.0.op.bias') attentions = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if attentions: update_unet_attention_ldm_to_diffusers(attentions, new_checkpoint, unet_state_dict, {'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}'}) for key in middle_blocks.keys(): diffusers_key = max(key - 1, 0) if key % 2 == 0: update_unet_resnet_ldm_to_diffusers(middle_blocks[key], new_checkpoint, unet_state_dict, mapping={'old': f'middle_block.{key}', 'new': f'mid_block.resnets.{diffusers_key}'}) else: update_unet_attention_ldm_to_diffusers(middle_blocks[key], new_checkpoint, unet_state_dict, mapping={'old': f'middle_block.{key}', 'new': f'mid_block.attentions.{diffusers_key}'}) for i in range(num_output_blocks): block_id = i // (config['layers_per_block'] + 1) layer_in_block_id = i % (config['layers_per_block'] + 1) resnets = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key and f'output_blocks.{i}.0.op' not in key] update_unet_resnet_ldm_to_diffusers(resnets, new_checkpoint, unet_state_dict, {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}) attentions = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key and f'output_blocks.{i}.1.conv' not in key] if attentions: update_unet_attention_ldm_to_diffusers(attentions, new_checkpoint, unet_state_dict, {'old': f'output_blocks.{i}.1', 'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}'}) if f'output_blocks.{i}.1.conv.weight' in unet_state_dict: new_checkpoint[f'up_blocks.{block_id}.upsamplers.0.conv.weight'] = unet_state_dict[f'output_blocks.{i}.1.conv.weight'] new_checkpoint[f'up_blocks.{block_id}.upsamplers.0.conv.bias'] = unet_state_dict[f'output_blocks.{i}.1.conv.bias'] if f'output_blocks.{i}.2.conv.weight' in unet_state_dict: new_checkpoint[f'up_blocks.{block_id}.upsamplers.0.conv.weight'] = unet_state_dict[f'output_blocks.{i}.2.conv.weight'] new_checkpoint[f'up_blocks.{block_id}.upsamplers.0.conv.bias'] = unet_state_dict[f'output_blocks.{i}.2.conv.bias'] return new_checkpoint def convert_controlnet_checkpoint(checkpoint, config, **kwargs): if 'time_embed.0.weight' in checkpoint: controlnet_state_dict = checkpoint else: controlnet_state_dict = {} keys = list(checkpoint.keys()) controlnet_key = LDM_CONTROLNET_KEY for key in keys: if key.startswith(controlnet_key): controlnet_state_dict[key.replace(controlnet_key, '')] = checkpoint.get(key) new_checkpoint = {} ldm_controlnet_keys = DIFFUSERS_TO_LDM_MAPPING['controlnet']['layers'] for (diffusers_key, ldm_key) in ldm_controlnet_keys.items(): if ldm_key not in controlnet_state_dict: continue new_checkpoint[diffusers_key] = controlnet_state_dict[ldm_key] num_input_blocks = len({'.'.join(layer.split('.')[:2]) for layer in controlnet_state_dict if 'input_blocks' in layer}) input_blocks = {layer_id: [key for key in controlnet_state_dict if f'input_blocks.{layer_id}' in key] for layer_id in range(num_input_blocks)} for i in range(1, num_input_blocks): block_id = (i - 1) // (config['layers_per_block'] + 1) layer_in_block_id = (i - 1) % (config['layers_per_block'] + 1) resnets = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key and f'input_blocks.{i}.0.op' not in key] update_unet_resnet_ldm_to_diffusers(resnets, new_checkpoint, controlnet_state_dict, {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}) if f'input_blocks.{i}.0.op.weight' in controlnet_state_dict: new_checkpoint[f'down_blocks.{block_id}.downsamplers.0.conv.weight'] = controlnet_state_dict.get(f'input_blocks.{i}.0.op.weight') new_checkpoint[f'down_blocks.{block_id}.downsamplers.0.conv.bias'] = controlnet_state_dict.get(f'input_blocks.{i}.0.op.bias') attentions = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if attentions: update_unet_attention_ldm_to_diffusers(attentions, new_checkpoint, controlnet_state_dict, {'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}'}) for i in range(num_input_blocks): new_checkpoint[f'controlnet_down_blocks.{i}.weight'] = controlnet_state_dict.get(f'zero_convs.{i}.0.weight') new_checkpoint[f'controlnet_down_blocks.{i}.bias'] = controlnet_state_dict.get(f'zero_convs.{i}.0.bias') num_middle_blocks = len({'.'.join(layer.split('.')[:2]) for layer in controlnet_state_dict if 'middle_block' in layer}) middle_blocks = {layer_id: [key for key in controlnet_state_dict if f'middle_block.{layer_id}' in key] for layer_id in range(num_middle_blocks)} for key in middle_blocks.keys(): diffusers_key = max(key - 1, 0) if key % 2 == 0: update_unet_resnet_ldm_to_diffusers(middle_blocks[key], new_checkpoint, controlnet_state_dict, mapping={'old': f'middle_block.{key}', 'new': f'mid_block.resnets.{diffusers_key}'}) else: update_unet_attention_ldm_to_diffusers(middle_blocks[key], new_checkpoint, controlnet_state_dict, mapping={'old': f'middle_block.{key}', 'new': f'mid_block.attentions.{diffusers_key}'}) new_checkpoint['controlnet_mid_block.weight'] = controlnet_state_dict.get('middle_block_out.0.weight') new_checkpoint['controlnet_mid_block.bias'] = controlnet_state_dict.get('middle_block_out.0.bias') cond_embedding_blocks = {'.'.join(layer.split('.')[:2]) for layer in controlnet_state_dict if 'input_hint_block' in layer and 'input_hint_block.0' not in layer and ('input_hint_block.14' not in layer)} num_cond_embedding_blocks = len(cond_embedding_blocks) for idx in range(1, num_cond_embedding_blocks + 1): diffusers_idx = idx - 1 cond_block_id = 2 * idx new_checkpoint[f'controlnet_cond_embedding.blocks.{diffusers_idx}.weight'] = controlnet_state_dict.get(f'input_hint_block.{cond_block_id}.weight') new_checkpoint[f'controlnet_cond_embedding.blocks.{diffusers_idx}.bias'] = controlnet_state_dict.get(f'input_hint_block.{cond_block_id}.bias') return new_checkpoint def convert_ldm_vae_checkpoint(checkpoint, config): vae_state_dict = {} keys = list(checkpoint.keys()) vae_key = '' for ldm_vae_key in LDM_VAE_KEYS: if any((k.startswith(ldm_vae_key) for k in keys)): vae_key = ldm_vae_key for key in keys: if key.startswith(vae_key): vae_state_dict[key.replace(vae_key, '')] = checkpoint.get(key) new_checkpoint = {} vae_diffusers_ldm_map = DIFFUSERS_TO_LDM_MAPPING['vae'] for (diffusers_key, ldm_key) in vae_diffusers_ldm_map.items(): if ldm_key not in vae_state_dict: continue new_checkpoint[diffusers_key] = vae_state_dict[ldm_key] num_down_blocks = len(config['down_block_types']) down_blocks = {layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(num_down_blocks)} for i in range(num_down_blocks): resnets = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key] update_vae_resnet_ldm_to_diffusers(resnets, new_checkpoint, vae_state_dict, mapping={'old': f'down.{i}.block', 'new': f'down_blocks.{i}.resnets'}) if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict: new_checkpoint[f'encoder.down_blocks.{i}.downsamplers.0.conv.weight'] = vae_state_dict.get(f'encoder.down.{i}.downsample.conv.weight') new_checkpoint[f'encoder.down_blocks.{i}.downsamplers.0.conv.bias'] = vae_state_dict.get(f'encoder.down.{i}.downsample.conv.bias') mid_resnets = [key for key in vae_state_dict if 'encoder.mid.block' in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key] update_vae_resnet_ldm_to_diffusers(resnets, new_checkpoint, vae_state_dict, mapping={'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'}) mid_attentions = [key for key in vae_state_dict if 'encoder.mid.attn' in key] update_vae_attentions_ldm_to_diffusers(mid_attentions, new_checkpoint, vae_state_dict, mapping={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}) num_up_blocks = len(config['up_block_types']) up_blocks = {layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(num_up_blocks)} for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key] update_vae_resnet_ldm_to_diffusers(resnets, new_checkpoint, vae_state_dict, mapping={'old': f'up.{block_id}.block', 'new': f'up_blocks.{i}.resnets'}) if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict: new_checkpoint[f'decoder.up_blocks.{i}.upsamplers.0.conv.weight'] = vae_state_dict[f'decoder.up.{block_id}.upsample.conv.weight'] new_checkpoint[f'decoder.up_blocks.{i}.upsamplers.0.conv.bias'] = vae_state_dict[f'decoder.up.{block_id}.upsample.conv.bias'] mid_resnets = [key for key in vae_state_dict if 'decoder.mid.block' in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key] update_vae_resnet_ldm_to_diffusers(resnets, new_checkpoint, vae_state_dict, mapping={'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'}) mid_attentions = [key for key in vae_state_dict if 'decoder.mid.attn' in key] update_vae_attentions_ldm_to_diffusers(mid_attentions, new_checkpoint, vae_state_dict, mapping={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}) conv_attn_to_linear(new_checkpoint) return new_checkpoint def convert_ldm_clip_checkpoint(checkpoint, remove_prefix=None): keys = list(checkpoint.keys()) text_model_dict = {} remove_prefixes = [] remove_prefixes.extend(LDM_CLIP_PREFIX_TO_REMOVE) if remove_prefix: remove_prefixes.append(remove_prefix) for key in keys: for prefix in remove_prefixes: if key.startswith(prefix): diffusers_key = key.replace(prefix, '') text_model_dict[diffusers_key] = checkpoint.get(key) return text_model_dict def convert_open_clip_checkpoint(text_model, checkpoint, prefix='cond_stage_model.model.'): text_model_dict = {} text_proj_key = prefix + 'text_projection' if text_proj_key in checkpoint: text_proj_dim = int(checkpoint[text_proj_key].shape[0]) elif hasattr(text_model.config, 'projection_dim'): text_proj_dim = text_model.config.projection_dim else: text_proj_dim = LDM_OPEN_CLIP_TEXT_PROJECTION_DIM keys = list(checkpoint.keys()) keys_to_ignore = SD_2_TEXT_ENCODER_KEYS_TO_IGNORE openclip_diffusers_ldm_map = DIFFUSERS_TO_LDM_MAPPING['openclip']['layers'] for (diffusers_key, ldm_key) in openclip_diffusers_ldm_map.items(): ldm_key = prefix + ldm_key if ldm_key not in checkpoint: continue if ldm_key in keys_to_ignore: continue if ldm_key.endswith('text_projection'): text_model_dict[diffusers_key] = checkpoint[ldm_key].T.contiguous() else: text_model_dict[diffusers_key] = checkpoint[ldm_key] for key in keys: if key in keys_to_ignore: continue if not key.startswith(prefix + 'transformer.'): continue diffusers_key = key.replace(prefix + 'transformer.', '') transformer_diffusers_to_ldm_map = DIFFUSERS_TO_LDM_MAPPING['openclip']['transformer'] for (new_key, old_key) in transformer_diffusers_to_ldm_map.items(): diffusers_key = diffusers_key.replace(old_key, new_key).replace('.in_proj_weight', '').replace('.in_proj_bias', '') if key.endswith('.in_proj_weight'): weight_value = checkpoint.get(key) text_model_dict[diffusers_key + '.q_proj.weight'] = weight_value[:text_proj_dim, :].clone().detach() text_model_dict[diffusers_key + '.k_proj.weight'] = weight_value[text_proj_dim:text_proj_dim * 2, :].clone().detach() text_model_dict[diffusers_key + '.v_proj.weight'] = weight_value[text_proj_dim * 2:, :].clone().detach() elif key.endswith('.in_proj_bias'): weight_value = checkpoint.get(key) text_model_dict[diffusers_key + '.q_proj.bias'] = weight_value[:text_proj_dim].clone().detach() text_model_dict[diffusers_key + '.k_proj.bias'] = weight_value[text_proj_dim:text_proj_dim * 2].clone().detach() text_model_dict[diffusers_key + '.v_proj.bias'] = weight_value[text_proj_dim * 2:].clone().detach() else: text_model_dict[diffusers_key] = checkpoint.get(key) return text_model_dict def create_diffusers_clip_model_from_ldm(cls, checkpoint, subfolder='', config=None, torch_dtype=None, local_files_only=None, is_legacy_loading=False): if config: config = {'pretrained_model_name_or_path': config} else: config = fetch_diffusers_config(checkpoint) if is_legacy_loading: logger.warning('Detected legacy CLIP loading behavior. Please run `from_single_file` with `local_files_only=False once to update the local cache directory with the necessary CLIP model config files. Attempting to load CLIP model from legacy cache directory.') if is_clip_model(checkpoint) or is_clip_sdxl_model(checkpoint): clip_config = 'openai/clip-vit-large-patch14' config['pretrained_model_name_or_path'] = clip_config subfolder = '' elif is_open_clip_model(checkpoint): clip_config = 'stabilityai/stable-diffusion-2' config['pretrained_model_name_or_path'] = clip_config subfolder = 'text_encoder' else: clip_config = 'laion/CLIP-ViT-bigG-14-laion2B-39B-b160k' config['pretrained_model_name_or_path'] = clip_config subfolder = '' model_config = cls.config_class.from_pretrained(**config, subfolder=subfolder, local_files_only=local_files_only) ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): model = cls(model_config) position_embedding_dim = model.text_model.embeddings.position_embedding.weight.shape[-1] if is_clip_model(checkpoint): diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint) elif is_clip_sdxl_model(checkpoint) and checkpoint[CHECKPOINT_KEY_NAMES['clip_sdxl']].shape[-1] == position_embedding_dim: diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint) elif is_clip_sd3_model(checkpoint) and checkpoint[CHECKPOINT_KEY_NAMES['clip_sd3']].shape[-1] == position_embedding_dim: diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint, 'text_encoders.clip_l.transformer.') diffusers_format_checkpoint['text_projection.weight'] = torch.eye(position_embedding_dim) elif is_open_clip_model(checkpoint): prefix = 'cond_stage_model.model.' diffusers_format_checkpoint = convert_open_clip_checkpoint(model, checkpoint, prefix=prefix) elif is_open_clip_sdxl_model(checkpoint) and checkpoint[CHECKPOINT_KEY_NAMES['open_clip_sdxl']].shape[-1] == position_embedding_dim: prefix = 'conditioner.embedders.1.model.' diffusers_format_checkpoint = convert_open_clip_checkpoint(model, checkpoint, prefix=prefix) elif is_open_clip_sdxl_refiner_model(checkpoint): prefix = 'conditioner.embedders.0.model.' diffusers_format_checkpoint = convert_open_clip_checkpoint(model, checkpoint, prefix=prefix) elif is_open_clip_sd3_model(checkpoint) and checkpoint[CHECKPOINT_KEY_NAMES['open_clip_sd3']].shape[-1] == position_embedding_dim: diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint, 'text_encoders.clip_g.transformer.') else: raise ValueError('The provided checkpoint does not seem to contain a valid CLIP model.') if is_accelerate_available(): unexpected_keys = load_model_dict_into_meta(model, diffusers_format_checkpoint, dtype=torch_dtype) else: (_, unexpected_keys) = model.load_state_dict(diffusers_format_checkpoint, strict=False) if model._keys_to_ignore_on_load_unexpected is not None: for pat in model._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning(f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}") if torch_dtype is not None: model.to(torch_dtype) model.eval() return model def _legacy_load_scheduler(cls, checkpoint, component_name, original_config=None, **kwargs): scheduler_type = kwargs.get('scheduler_type', None) prediction_type = kwargs.get('prediction_type', None) if scheduler_type is not None: deprecation_message = 'Please pass an instance of a Scheduler object directly to the `scheduler` argument in `from_single_file`\n\nExample:\n\nfrom diffusers import StableDiffusionPipeline, DDIMScheduler\n\nscheduler = DDIMScheduler()\npipe = StableDiffusionPipeline.from_single_file(, scheduler=scheduler)\n' deprecate('scheduler_type', '1.0.0', deprecation_message) if prediction_type is not None: deprecation_message = 'Please configure an instance of a Scheduler with the appropriate `prediction_type` and pass the object directly to the `scheduler` argument in `from_single_file`.\n\nExample:\n\nfrom diffusers import StableDiffusionPipeline, DDIMScheduler\n\nscheduler = DDIMScheduler(prediction_type="v_prediction")\npipe = StableDiffusionPipeline.from_single_file(, scheduler=scheduler)\n' deprecate('prediction_type', '1.0.0', deprecation_message) scheduler_config = SCHEDULER_DEFAULT_CONFIG model_type = infer_diffusers_model_type(checkpoint=checkpoint) global_step = checkpoint['global_step'] if 'global_step' in checkpoint else None if original_config: num_train_timesteps = getattr(original_config['model']['params'], 'timesteps', 1000) else: num_train_timesteps = 1000 scheduler_config['num_train_timesteps'] = num_train_timesteps if model_type == 'v2': if prediction_type is None: prediction_type = 'epsilon' if global_step == 875000 else 'v_prediction' else: prediction_type = prediction_type or 'epsilon' scheduler_config['prediction_type'] = prediction_type if model_type in ['xl_base', 'xl_refiner']: scheduler_type = 'euler' elif model_type == 'playground': scheduler_type = 'edm_dpm_solver_multistep' else: if original_config: beta_start = original_config['model']['params'].get('linear_start') beta_end = original_config['model']['params'].get('linear_end') else: beta_start = 0.02 beta_end = 0.085 scheduler_config['beta_start'] = beta_start scheduler_config['beta_end'] = beta_end scheduler_config['beta_schedule'] = 'scaled_linear' scheduler_config['clip_sample'] = False scheduler_config['set_alpha_to_one'] = False if component_name == 'low_res_scheduler': return cls.from_config({'beta_end': 0.02, 'beta_schedule': 'scaled_linear', 'beta_start': 0.0001, 'clip_sample': True, 'num_train_timesteps': 1000, 'prediction_type': 'epsilon', 'trained_betas': None, 'variance_type': 'fixed_small'}) if scheduler_type is None: return cls.from_config(scheduler_config) elif scheduler_type == 'pndm': scheduler_config['skip_prk_steps'] = True scheduler = PNDMScheduler.from_config(scheduler_config) elif scheduler_type == 'lms': scheduler = LMSDiscreteScheduler.from_config(scheduler_config) elif scheduler_type == 'heun': scheduler = HeunDiscreteScheduler.from_config(scheduler_config) elif scheduler_type == 'euler': scheduler = EulerDiscreteScheduler.from_config(scheduler_config) elif scheduler_type == 'euler-ancestral': scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler_config) elif scheduler_type == 'dpm': scheduler = DPMSolverMultistepScheduler.from_config(scheduler_config) elif scheduler_type == 'ddim': scheduler = DDIMScheduler.from_config(scheduler_config) elif scheduler_type == 'edm_dpm_solver_multistep': scheduler_config = {'algorithm_type': 'dpmsolver++', 'dynamic_thresholding_ratio': 0.995, 'euler_at_final': False, 'final_sigmas_type': 'zero', 'lower_order_final': True, 'num_train_timesteps': 1000, 'prediction_type': 'epsilon', 'rho': 7.0, 'sample_max_value': 1.0, 'sigma_data': 0.5, 'sigma_max': 80.0, 'sigma_min': 0.002, 'solver_order': 2, 'solver_type': 'midpoint', 'thresholding': False} scheduler = EDMDPMSolverMultistepScheduler(**scheduler_config) else: raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!") return scheduler def _legacy_load_clip_tokenizer(cls, checkpoint, config=None, local_files_only=False): if config: config = {'pretrained_model_name_or_path': config} else: config = fetch_diffusers_config(checkpoint) if is_clip_model(checkpoint) or is_clip_sdxl_model(checkpoint): clip_config = 'openai/clip-vit-large-patch14' config['pretrained_model_name_or_path'] = clip_config subfolder = '' elif is_open_clip_model(checkpoint): clip_config = 'stabilityai/stable-diffusion-2' config['pretrained_model_name_or_path'] = clip_config subfolder = 'tokenizer' else: clip_config = 'laion/CLIP-ViT-bigG-14-laion2B-39B-b160k' config['pretrained_model_name_or_path'] = clip_config subfolder = '' tokenizer = cls.from_pretrained(**config, subfolder=subfolder, local_files_only=local_files_only) return tokenizer def _legacy_load_safety_checker(local_files_only, torch_dtype): from ..pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker feature_extractor = AutoImageProcessor.from_pretrained('CompVis/stable-diffusion-safety-checker', local_files_only=local_files_only, torch_dtype=torch_dtype) safety_checker = StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker', local_files_only=local_files_only, torch_dtype=torch_dtype) return {'safety_checker': safety_checker, 'feature_extractor': feature_extractor} def swap_scale_shift(weight, dim): (shift, scale) = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) return new_weight def convert_sd3_transformer_checkpoint_to_diffusers(checkpoint, **kwargs): converted_state_dict = {} keys = list(checkpoint.keys()) for k in keys: if 'model.diffusion_model.' in k: checkpoint[k.replace('model.diffusion_model.', '')] = checkpoint.pop(k) num_layers = list(set((int(k.split('.', 2)[1]) for k in checkpoint if 'joint_blocks' in k)))[-1] + 1 caption_projection_dim = 1536 converted_state_dict['pos_embed.pos_embed'] = checkpoint.pop('pos_embed') converted_state_dict['pos_embed.proj.weight'] = checkpoint.pop('x_embedder.proj.weight') converted_state_dict['pos_embed.proj.bias'] = checkpoint.pop('x_embedder.proj.bias') converted_state_dict['time_text_embed.timestep_embedder.linear_1.weight'] = checkpoint.pop('t_embedder.mlp.0.weight') converted_state_dict['time_text_embed.timestep_embedder.linear_1.bias'] = checkpoint.pop('t_embedder.mlp.0.bias') converted_state_dict['time_text_embed.timestep_embedder.linear_2.weight'] = checkpoint.pop('t_embedder.mlp.2.weight') converted_state_dict['time_text_embed.timestep_embedder.linear_2.bias'] = checkpoint.pop('t_embedder.mlp.2.bias') converted_state_dict['context_embedder.weight'] = checkpoint.pop('context_embedder.weight') converted_state_dict['context_embedder.bias'] = checkpoint.pop('context_embedder.bias') converted_state_dict['time_text_embed.text_embedder.linear_1.weight'] = checkpoint.pop('y_embedder.mlp.0.weight') converted_state_dict['time_text_embed.text_embedder.linear_1.bias'] = checkpoint.pop('y_embedder.mlp.0.bias') converted_state_dict['time_text_embed.text_embedder.linear_2.weight'] = checkpoint.pop('y_embedder.mlp.2.weight') converted_state_dict['time_text_embed.text_embedder.linear_2.bias'] = checkpoint.pop('y_embedder.mlp.2.bias') for i in range(num_layers): (sample_q, sample_k, sample_v) = torch.chunk(checkpoint.pop(f'joint_blocks.{i}.x_block.attn.qkv.weight'), 3, dim=0) (context_q, context_k, context_v) = torch.chunk(checkpoint.pop(f'joint_blocks.{i}.context_block.attn.qkv.weight'), 3, dim=0) (sample_q_bias, sample_k_bias, sample_v_bias) = torch.chunk(checkpoint.pop(f'joint_blocks.{i}.x_block.attn.qkv.bias'), 3, dim=0) (context_q_bias, context_k_bias, context_v_bias) = torch.chunk(checkpoint.pop(f'joint_blocks.{i}.context_block.attn.qkv.bias'), 3, dim=0) converted_state_dict[f'transformer_blocks.{i}.attn.to_q.weight'] = torch.cat([sample_q]) converted_state_dict[f'transformer_blocks.{i}.attn.to_q.bias'] = torch.cat([sample_q_bias]) converted_state_dict[f'transformer_blocks.{i}.attn.to_k.weight'] = torch.cat([sample_k]) converted_state_dict[f'transformer_blocks.{i}.attn.to_k.bias'] = torch.cat([sample_k_bias]) converted_state_dict[f'transformer_blocks.{i}.attn.to_v.weight'] = torch.cat([sample_v]) converted_state_dict[f'transformer_blocks.{i}.attn.to_v.bias'] = torch.cat([sample_v_bias]) converted_state_dict[f'transformer_blocks.{i}.attn.add_q_proj.weight'] = torch.cat([context_q]) converted_state_dict[f'transformer_blocks.{i}.attn.add_q_proj.bias'] = torch.cat([context_q_bias]) converted_state_dict[f'transformer_blocks.{i}.attn.add_k_proj.weight'] = torch.cat([context_k]) converted_state_dict[f'transformer_blocks.{i}.attn.add_k_proj.bias'] = torch.cat([context_k_bias]) converted_state_dict[f'transformer_blocks.{i}.attn.add_v_proj.weight'] = torch.cat([context_v]) converted_state_dict[f'transformer_blocks.{i}.attn.add_v_proj.bias'] = torch.cat([context_v_bias]) converted_state_dict[f'transformer_blocks.{i}.attn.to_out.0.weight'] = checkpoint.pop(f'joint_blocks.{i}.x_block.attn.proj.weight') converted_state_dict[f'transformer_blocks.{i}.attn.to_out.0.bias'] = checkpoint.pop(f'joint_blocks.{i}.x_block.attn.proj.bias') if not i == num_layers - 1: converted_state_dict[f'transformer_blocks.{i}.attn.to_add_out.weight'] = checkpoint.pop(f'joint_blocks.{i}.context_block.attn.proj.weight') converted_state_dict[f'transformer_blocks.{i}.attn.to_add_out.bias'] = checkpoint.pop(f'joint_blocks.{i}.context_block.attn.proj.bias') converted_state_dict[f'transformer_blocks.{i}.norm1.linear.weight'] = checkpoint.pop(f'joint_blocks.{i}.x_block.adaLN_modulation.1.weight') converted_state_dict[f'transformer_blocks.{i}.norm1.linear.bias'] = checkpoint.pop(f'joint_blocks.{i}.x_block.adaLN_modulation.1.bias') if not i == num_layers - 1: converted_state_dict[f'transformer_blocks.{i}.norm1_context.linear.weight'] = checkpoint.pop(f'joint_blocks.{i}.context_block.adaLN_modulation.1.weight') converted_state_dict[f'transformer_blocks.{i}.norm1_context.linear.bias'] = checkpoint.pop(f'joint_blocks.{i}.context_block.adaLN_modulation.1.bias') else: converted_state_dict[f'transformer_blocks.{i}.norm1_context.linear.weight'] = swap_scale_shift(checkpoint.pop(f'joint_blocks.{i}.context_block.adaLN_modulation.1.weight'), dim=caption_projection_dim) converted_state_dict[f'transformer_blocks.{i}.norm1_context.linear.bias'] = swap_scale_shift(checkpoint.pop(f'joint_blocks.{i}.context_block.adaLN_modulation.1.bias'), dim=caption_projection_dim) converted_state_dict[f'transformer_blocks.{i}.ff.net.0.proj.weight'] = checkpoint.pop(f'joint_blocks.{i}.x_block.mlp.fc1.weight') converted_state_dict[f'transformer_blocks.{i}.ff.net.0.proj.bias'] = checkpoint.pop(f'joint_blocks.{i}.x_block.mlp.fc1.bias') converted_state_dict[f'transformer_blocks.{i}.ff.net.2.weight'] = checkpoint.pop(f'joint_blocks.{i}.x_block.mlp.fc2.weight') converted_state_dict[f'transformer_blocks.{i}.ff.net.2.bias'] = checkpoint.pop(f'joint_blocks.{i}.x_block.mlp.fc2.bias') if not i == num_layers - 1: converted_state_dict[f'transformer_blocks.{i}.ff_context.net.0.proj.weight'] = checkpoint.pop(f'joint_blocks.{i}.context_block.mlp.fc1.weight') converted_state_dict[f'transformer_blocks.{i}.ff_context.net.0.proj.bias'] = checkpoint.pop(f'joint_blocks.{i}.context_block.mlp.fc1.bias') converted_state_dict[f'transformer_blocks.{i}.ff_context.net.2.weight'] = checkpoint.pop(f'joint_blocks.{i}.context_block.mlp.fc2.weight') converted_state_dict[f'transformer_blocks.{i}.ff_context.net.2.bias'] = checkpoint.pop(f'joint_blocks.{i}.context_block.mlp.fc2.bias') converted_state_dict['proj_out.weight'] = checkpoint.pop('final_layer.linear.weight') converted_state_dict['proj_out.bias'] = checkpoint.pop('final_layer.linear.bias') converted_state_dict['norm_out.linear.weight'] = swap_scale_shift(checkpoint.pop('final_layer.adaLN_modulation.1.weight'), dim=caption_projection_dim) converted_state_dict['norm_out.linear.bias'] = swap_scale_shift(checkpoint.pop('final_layer.adaLN_modulation.1.bias'), dim=caption_projection_dim) return converted_state_dict def is_t5_in_single_file(checkpoint): if 'text_encoders.t5xxl.transformer.shared.weight' in checkpoint: return True return False def convert_sd3_t5_checkpoint_to_diffusers(checkpoint): keys = list(checkpoint.keys()) text_model_dict = {} remove_prefixes = ['text_encoders.t5xxl.transformer.'] for key in keys: for prefix in remove_prefixes: if key.startswith(prefix): diffusers_key = key.replace(prefix, '') text_model_dict[diffusers_key] = checkpoint.get(key) return text_model_dict def create_diffusers_t5_model_from_checkpoint(cls, checkpoint, subfolder='', config=None, torch_dtype=None, local_files_only=None): if config: config = {'pretrained_model_name_or_path': config} else: config = fetch_diffusers_config(checkpoint) model_config = cls.config_class.from_pretrained(**config, subfolder=subfolder, local_files_only=local_files_only) ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): model = cls(model_config) diffusers_format_checkpoint = convert_sd3_t5_checkpoint_to_diffusers(checkpoint) if is_accelerate_available(): unexpected_keys = load_model_dict_into_meta(model, diffusers_format_checkpoint, dtype=torch_dtype) if model._keys_to_ignore_on_load_unexpected is not None: for pat in model._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning(f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}") else: model.load_state_dict(diffusers_format_checkpoint) use_keep_in_fp32_modules = cls._keep_in_fp32_modules is not None and torch_dtype == torch.float16 if use_keep_in_fp32_modules: keep_in_fp32_modules = model._keep_in_fp32_modules else: keep_in_fp32_modules = [] if keep_in_fp32_modules is not None: for (name, param) in model.named_parameters(): if any((module_to_keep_in_fp32 in name.split('.') for module_to_keep_in_fp32 in keep_in_fp32_modules)): param.data = param.data.to(torch.float32) return model def convert_animatediff_checkpoint_to_diffusers(checkpoint, **kwargs): converted_state_dict = {} for (k, v) in checkpoint.items(): if 'pos_encoder' in k: continue else: converted_state_dict[k.replace('.norms.0', '.norm1').replace('.norms.1', '.norm2').replace('.ff_norm', '.norm3').replace('.attention_blocks.0', '.attn1').replace('.attention_blocks.1', '.attn2').replace('.temporal_transformer', '')] = v return converted_state_dict def convert_flux_transformer_checkpoint_to_diffusers(checkpoint, **kwargs): converted_state_dict = {} keys = list(checkpoint.keys()) for k in keys: if 'model.diffusion_model.' in k: checkpoint[k.replace('model.diffusion_model.', '')] = checkpoint.pop(k) num_layers = list(set((int(k.split('.', 2)[1]) for k in checkpoint if 'double_blocks.' in k)))[-1] + 1 num_single_layers = list(set((int(k.split('.', 2)[1]) for k in checkpoint if 'single_blocks.' in k)))[-1] + 1 mlp_ratio = 4.0 inner_dim = 3072 def swap_scale_shift(weight): (shift, scale) = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) return new_weight converted_state_dict['time_text_embed.timestep_embedder.linear_1.weight'] = checkpoint.pop('time_in.in_layer.weight') converted_state_dict['time_text_embed.timestep_embedder.linear_1.bias'] = checkpoint.pop('time_in.in_layer.bias') converted_state_dict['time_text_embed.timestep_embedder.linear_2.weight'] = checkpoint.pop('time_in.out_layer.weight') converted_state_dict['time_text_embed.timestep_embedder.linear_2.bias'] = checkpoint.pop('time_in.out_layer.bias') converted_state_dict['time_text_embed.text_embedder.linear_1.weight'] = checkpoint.pop('vector_in.in_layer.weight') converted_state_dict['time_text_embed.text_embedder.linear_1.bias'] = checkpoint.pop('vector_in.in_layer.bias') converted_state_dict['time_text_embed.text_embedder.linear_2.weight'] = checkpoint.pop('vector_in.out_layer.weight') converted_state_dict['time_text_embed.text_embedder.linear_2.bias'] = checkpoint.pop('vector_in.out_layer.bias') has_guidance = any(('guidance' in k for k in checkpoint)) if has_guidance: converted_state_dict['time_text_embed.guidance_embedder.linear_1.weight'] = checkpoint.pop('guidance_in.in_layer.weight') converted_state_dict['time_text_embed.guidance_embedder.linear_1.bias'] = checkpoint.pop('guidance_in.in_layer.bias') converted_state_dict['time_text_embed.guidance_embedder.linear_2.weight'] = checkpoint.pop('guidance_in.out_layer.weight') converted_state_dict['time_text_embed.guidance_embedder.linear_2.bias'] = checkpoint.pop('guidance_in.out_layer.bias') converted_state_dict['context_embedder.weight'] = checkpoint.pop('txt_in.weight') converted_state_dict['context_embedder.bias'] = checkpoint.pop('txt_in.bias') converted_state_dict['x_embedder.weight'] = checkpoint.pop('img_in.weight') converted_state_dict['x_embedder.bias'] = checkpoint.pop('img_in.bias') for i in range(num_layers): block_prefix = f'transformer_blocks.{i}.' converted_state_dict[f'{block_prefix}norm1.linear.weight'] = checkpoint.pop(f'double_blocks.{i}.img_mod.lin.weight') converted_state_dict[f'{block_prefix}norm1.linear.bias'] = checkpoint.pop(f'double_blocks.{i}.img_mod.lin.bias') converted_state_dict[f'{block_prefix}norm1_context.linear.weight'] = checkpoint.pop(f'double_blocks.{i}.txt_mod.lin.weight') converted_state_dict[f'{block_prefix}norm1_context.linear.bias'] = checkpoint.pop(f'double_blocks.{i}.txt_mod.lin.bias') (sample_q, sample_k, sample_v) = torch.chunk(checkpoint.pop(f'double_blocks.{i}.img_attn.qkv.weight'), 3, dim=0) (context_q, context_k, context_v) = torch.chunk(checkpoint.pop(f'double_blocks.{i}.txt_attn.qkv.weight'), 3, dim=0) (sample_q_bias, sample_k_bias, sample_v_bias) = torch.chunk(checkpoint.pop(f'double_blocks.{i}.img_attn.qkv.bias'), 3, dim=0) (context_q_bias, context_k_bias, context_v_bias) = torch.chunk(checkpoint.pop(f'double_blocks.{i}.txt_attn.qkv.bias'), 3, dim=0) converted_state_dict[f'{block_prefix}attn.to_q.weight'] = torch.cat([sample_q]) converted_state_dict[f'{block_prefix}attn.to_q.bias'] = torch.cat([sample_q_bias]) converted_state_dict[f'{block_prefix}attn.to_k.weight'] = torch.cat([sample_k]) converted_state_dict[f'{block_prefix}attn.to_k.bias'] = torch.cat([sample_k_bias]) converted_state_dict[f'{block_prefix}attn.to_v.weight'] = torch.cat([sample_v]) converted_state_dict[f'{block_prefix}attn.to_v.bias'] = torch.cat([sample_v_bias]) converted_state_dict[f'{block_prefix}attn.add_q_proj.weight'] = torch.cat([context_q]) converted_state_dict[f'{block_prefix}attn.add_q_proj.bias'] = torch.cat([context_q_bias]) converted_state_dict[f'{block_prefix}attn.add_k_proj.weight'] = torch.cat([context_k]) converted_state_dict[f'{block_prefix}attn.add_k_proj.bias'] = torch.cat([context_k_bias]) converted_state_dict[f'{block_prefix}attn.add_v_proj.weight'] = torch.cat([context_v]) converted_state_dict[f'{block_prefix}attn.add_v_proj.bias'] = torch.cat([context_v_bias]) converted_state_dict[f'{block_prefix}attn.norm_q.weight'] = checkpoint.pop(f'double_blocks.{i}.img_attn.norm.query_norm.scale') converted_state_dict[f'{block_prefix}attn.norm_k.weight'] = checkpoint.pop(f'double_blocks.{i}.img_attn.norm.key_norm.scale') converted_state_dict[f'{block_prefix}attn.norm_added_q.weight'] = checkpoint.pop(f'double_blocks.{i}.txt_attn.norm.query_norm.scale') converted_state_dict[f'{block_prefix}attn.norm_added_k.weight'] = checkpoint.pop(f'double_blocks.{i}.txt_attn.norm.key_norm.scale') converted_state_dict[f'{block_prefix}ff.net.0.proj.weight'] = checkpoint.pop(f'double_blocks.{i}.img_mlp.0.weight') converted_state_dict[f'{block_prefix}ff.net.0.proj.bias'] = checkpoint.pop(f'double_blocks.{i}.img_mlp.0.bias') converted_state_dict[f'{block_prefix}ff.net.2.weight'] = checkpoint.pop(f'double_blocks.{i}.img_mlp.2.weight') converted_state_dict[f'{block_prefix}ff.net.2.bias'] = checkpoint.pop(f'double_blocks.{i}.img_mlp.2.bias') converted_state_dict[f'{block_prefix}ff_context.net.0.proj.weight'] = checkpoint.pop(f'double_blocks.{i}.txt_mlp.0.weight') converted_state_dict[f'{block_prefix}ff_context.net.0.proj.bias'] = checkpoint.pop(f'double_blocks.{i}.txt_mlp.0.bias') converted_state_dict[f'{block_prefix}ff_context.net.2.weight'] = checkpoint.pop(f'double_blocks.{i}.txt_mlp.2.weight') converted_state_dict[f'{block_prefix}ff_context.net.2.bias'] = checkpoint.pop(f'double_blocks.{i}.txt_mlp.2.bias') converted_state_dict[f'{block_prefix}attn.to_out.0.weight'] = checkpoint.pop(f'double_blocks.{i}.img_attn.proj.weight') converted_state_dict[f'{block_prefix}attn.to_out.0.bias'] = checkpoint.pop(f'double_blocks.{i}.img_attn.proj.bias') converted_state_dict[f'{block_prefix}attn.to_add_out.weight'] = checkpoint.pop(f'double_blocks.{i}.txt_attn.proj.weight') converted_state_dict[f'{block_prefix}attn.to_add_out.bias'] = checkpoint.pop(f'double_blocks.{i}.txt_attn.proj.bias') for i in range(num_single_layers): block_prefix = f'single_transformer_blocks.{i}.' converted_state_dict[f'{block_prefix}norm.linear.weight'] = checkpoint.pop(f'single_blocks.{i}.modulation.lin.weight') converted_state_dict[f'{block_prefix}norm.linear.bias'] = checkpoint.pop(f'single_blocks.{i}.modulation.lin.bias') mlp_hidden_dim = int(inner_dim * mlp_ratio) split_size = (inner_dim, inner_dim, inner_dim, mlp_hidden_dim) (q, k, v, mlp) = torch.split(checkpoint.pop(f'single_blocks.{i}.linear1.weight'), split_size, dim=0) (q_bias, k_bias, v_bias, mlp_bias) = torch.split(checkpoint.pop(f'single_blocks.{i}.linear1.bias'), split_size, dim=0) converted_state_dict[f'{block_prefix}attn.to_q.weight'] = torch.cat([q]) converted_state_dict[f'{block_prefix}attn.to_q.bias'] = torch.cat([q_bias]) converted_state_dict[f'{block_prefix}attn.to_k.weight'] = torch.cat([k]) converted_state_dict[f'{block_prefix}attn.to_k.bias'] = torch.cat([k_bias]) converted_state_dict[f'{block_prefix}attn.to_v.weight'] = torch.cat([v]) converted_state_dict[f'{block_prefix}attn.to_v.bias'] = torch.cat([v_bias]) converted_state_dict[f'{block_prefix}proj_mlp.weight'] = torch.cat([mlp]) converted_state_dict[f'{block_prefix}proj_mlp.bias'] = torch.cat([mlp_bias]) converted_state_dict[f'{block_prefix}attn.norm_q.weight'] = checkpoint.pop(f'single_blocks.{i}.norm.query_norm.scale') converted_state_dict[f'{block_prefix}attn.norm_k.weight'] = checkpoint.pop(f'single_blocks.{i}.norm.key_norm.scale') converted_state_dict[f'{block_prefix}proj_out.weight'] = checkpoint.pop(f'single_blocks.{i}.linear2.weight') converted_state_dict[f'{block_prefix}proj_out.bias'] = checkpoint.pop(f'single_blocks.{i}.linear2.bias') converted_state_dict['proj_out.weight'] = checkpoint.pop('final_layer.linear.weight') converted_state_dict['proj_out.bias'] = checkpoint.pop('final_layer.linear.bias') converted_state_dict['norm_out.linear.weight'] = swap_scale_shift(checkpoint.pop('final_layer.adaLN_modulation.1.weight')) converted_state_dict['norm_out.linear.bias'] = swap_scale_shift(checkpoint.pop('final_layer.adaLN_modulation.1.bias')) return converted_state_dict # File: diffusers-main/src/diffusers/loaders/textual_inversion.py from typing import Dict, List, Optional, Union import safetensors import torch from huggingface_hub.utils import validate_hf_hub_args from torch import nn from ..models.modeling_utils import load_state_dict from ..utils import _get_model_file, is_accelerate_available, is_transformers_available, logging if is_transformers_available(): from transformers import PreTrainedModel, PreTrainedTokenizer if is_accelerate_available(): from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module logger = logging.get_logger(__name__) TEXT_INVERSION_NAME = 'learned_embeds.bin' TEXT_INVERSION_NAME_SAFE = 'learned_embeds.safetensors' @validate_hf_hub_args def load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs): cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) local_files_only = kwargs.pop('local_files_only', None) token = kwargs.pop('token', None) revision = kwargs.pop('revision', None) subfolder = kwargs.pop('subfolder', None) weight_name = kwargs.pop('weight_name', None) use_safetensors = kwargs.pop('use_safetensors', None) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {'file_type': 'text_inversion', 'framework': 'pytorch'} state_dicts = [] for pretrained_model_name_or_path in pretrained_model_name_or_paths: if not isinstance(pretrained_model_name_or_path, (dict, torch.Tensor)): model_file = None if use_safetensors and weight_name is None or (weight_name is not None and weight_name.endswith('.safetensors')): try: model_file = _get_model_file(pretrained_model_name_or_path, weights_name=weight_name or TEXT_INVERSION_NAME_SAFE, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent) state_dict = safetensors.torch.load_file(model_file, device='cpu') except Exception as e: if not allow_pickle: raise e model_file = None if model_file is None: model_file = _get_model_file(pretrained_model_name_or_path, weights_name=weight_name or TEXT_INVERSION_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent) state_dict = load_state_dict(model_file) else: state_dict = pretrained_model_name_or_path state_dicts.append(state_dict) return state_dicts class TextualInversionLoaderMixin: def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: 'PreTrainedTokenizer'): if not isinstance(prompt, List): prompts = [prompt] else: prompts = prompt prompts = [self._maybe_convert_prompt(p, tokenizer) for p in prompts] if not isinstance(prompt, List): return prompts[0] return prompts def _maybe_convert_prompt(self, prompt: str, tokenizer: 'PreTrainedTokenizer'): tokens = tokenizer.tokenize(prompt) unique_tokens = set(tokens) for token in unique_tokens: if token in tokenizer.added_tokens_encoder: replacement = token i = 1 while f'{token}_{i}' in tokenizer.added_tokens_encoder: replacement += f' {token}_{i}' i += 1 prompt = prompt.replace(token, replacement) return prompt def _check_text_inv_inputs(self, tokenizer, text_encoder, pretrained_model_name_or_paths, tokens): if tokenizer is None: raise ValueError(f'{self.__class__.__name__} requires `self.tokenizer` or passing a `tokenizer` of type `PreTrainedTokenizer` for calling `{self.load_textual_inversion.__name__}`') if text_encoder is None: raise ValueError(f'{self.__class__.__name__} requires `self.text_encoder` or passing a `text_encoder` of type `PreTrainedModel` for calling `{self.load_textual_inversion.__name__}`') if len(pretrained_model_name_or_paths) > 1 and len(pretrained_model_name_or_paths) != len(tokens): raise ValueError(f'You have passed a list of models of length {len(pretrained_model_name_or_paths)}, and list of tokens of length {len(tokens)} Make sure both lists have the same length.') valid_tokens = [t for t in tokens if t is not None] if len(set(valid_tokens)) < len(valid_tokens): raise ValueError(f'You have passed a list of tokens that contains duplicates: {tokens}') @staticmethod def _retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer): all_tokens = [] all_embeddings = [] for (state_dict, token) in zip(state_dicts, tokens): if isinstance(state_dict, torch.Tensor): if token is None: raise ValueError('You are trying to load a textual inversion embedding that has been saved as a PyTorch tensor. Make sure to pass the name of the corresponding token in this case: `token=...`.') loaded_token = token embedding = state_dict elif len(state_dict) == 1: (loaded_token, embedding) = next(iter(state_dict.items())) elif 'string_to_param' in state_dict: loaded_token = state_dict['name'] embedding = state_dict['string_to_param']['*'] else: raise ValueError(f'Loaded state dictionary is incorrect: {state_dict}. \n\nPlease verify that the loaded state dictionary of the textual embedding either only has a single key or includes the `string_to_param` input key.') if token is not None and loaded_token != token: logger.info(f'The loaded token: {loaded_token} is overwritten by the passed token {token}.') else: token = loaded_token if token in tokenizer.get_vocab(): raise ValueError(f'Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder.') all_tokens.append(token) all_embeddings.append(embedding) return (all_tokens, all_embeddings) @staticmethod def _extend_tokens_and_embeddings(tokens, embeddings, tokenizer): all_tokens = [] all_embeddings = [] for (embedding, token) in zip(embeddings, tokens): if f'{token}_1' in tokenizer.get_vocab(): multi_vector_tokens = [token] i = 1 while f'{token}_{i}' in tokenizer.added_tokens_encoder: multi_vector_tokens.append(f'{token}_{i}') i += 1 raise ValueError(f'Multi-vector Token {multi_vector_tokens} already in tokenizer vocabulary. Please choose a different token name or remove the {multi_vector_tokens} and embedding from the tokenizer and text encoder.') is_multi_vector = len(embedding.shape) > 1 and embedding.shape[0] > 1 if is_multi_vector: all_tokens += [token] + [f'{token}_{i}' for i in range(1, embedding.shape[0])] all_embeddings += [e for e in embedding] else: all_tokens += [token] all_embeddings += [embedding[0]] if len(embedding.shape) > 1 else [embedding] return (all_tokens, all_embeddings) @validate_hf_hub_args def load_textual_inversion(self, pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]], token: Optional[Union[str, List[str]]]=None, tokenizer: Optional['PreTrainedTokenizer']=None, text_encoder: Optional['PreTrainedModel']=None, **kwargs): tokenizer = tokenizer or getattr(self, 'tokenizer', None) text_encoder = text_encoder or getattr(self, 'text_encoder', None) pretrained_model_name_or_paths = [pretrained_model_name_or_path] if not isinstance(pretrained_model_name_or_path, list) else pretrained_model_name_or_path tokens = [token] if not isinstance(token, list) else token if tokens[0] is None: tokens = tokens * len(pretrained_model_name_or_paths) self._check_text_inv_inputs(tokenizer, text_encoder, pretrained_model_name_or_paths, tokens) state_dicts = load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs) if len(tokens) > 1 and len(state_dicts) == 1: if isinstance(state_dicts[0], torch.Tensor): state_dicts = list(state_dicts[0]) if len(tokens) != len(state_dicts): raise ValueError(f'You have passed a state_dict contains {len(state_dicts)} embeddings, and list of tokens of length {len(tokens)} Make sure both have the same length.') (tokens, embeddings) = self._retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer) (tokens, embeddings) = self._extend_tokens_and_embeddings(tokens, embeddings, tokenizer) expected_emb_dim = text_encoder.get_input_embeddings().weight.shape[-1] if any((expected_emb_dim != emb.shape[-1] for emb in embeddings)): raise ValueError('Loaded embeddings are of incorrect shape. Expected each textual inversion embedding to be of shape {input_embeddings.shape[-1]}, but are {embeddings.shape[-1]} ') is_model_cpu_offload = False is_sequential_cpu_offload = False if self.hf_device_map is None: for (_, component) in self.components.items(): if isinstance(component, nn.Module): if hasattr(component, '_hf_hook'): is_model_cpu_offload = isinstance(getattr(component, '_hf_hook'), CpuOffload) is_sequential_cpu_offload = isinstance(getattr(component, '_hf_hook'), AlignDevicesHook) or (hasattr(component._hf_hook, 'hooks') and isinstance(component._hf_hook.hooks[0], AlignDevicesHook)) logger.info('Accelerate hooks detected. Since you have called `load_textual_inversion()`, the previous hooks will be first removed. Then the textual inversion parameters will be loaded and the hooks will be applied again.') remove_hook_from_module(component, recurse=is_sequential_cpu_offload) device = text_encoder.device dtype = text_encoder.dtype text_encoder.resize_token_embeddings(len(tokenizer) + len(tokens)) input_embeddings = text_encoder.get_input_embeddings().weight for (token, embedding) in zip(tokens, embeddings): tokenizer.add_tokens(token) token_id = tokenizer.convert_tokens_to_ids(token) input_embeddings.data[token_id] = embedding logger.info(f'Loaded textual inversion embedding for {token}.') input_embeddings.to(dtype=dtype, device=device) if is_model_cpu_offload: self.enable_model_cpu_offload() elif is_sequential_cpu_offload: self.enable_sequential_cpu_offload() def unload_textual_inversion(self, tokens: Optional[Union[str, List[str]]]=None, tokenizer: Optional['PreTrainedTokenizer']=None, text_encoder: Optional['PreTrainedModel']=None): tokenizer = tokenizer or getattr(self, 'tokenizer', None) text_encoder = text_encoder or getattr(self, 'text_encoder', None) token_ids = [] last_special_token_id = None if tokens: if isinstance(tokens, str): tokens = [tokens] for (added_token_id, added_token) in tokenizer.added_tokens_decoder.items(): if not added_token.special: if added_token.content in tokens: token_ids.append(added_token_id) else: last_special_token_id = added_token_id if len(token_ids) == 0: raise ValueError('No tokens to remove found') else: tokens = [] for (added_token_id, added_token) in tokenizer.added_tokens_decoder.items(): if not added_token.special: token_ids.append(added_token_id) tokens.append(added_token.content) else: last_special_token_id = added_token_id for (token_id, token_to_remove) in zip(token_ids, tokens): del tokenizer._added_tokens_decoder[token_id] del tokenizer._added_tokens_encoder[token_to_remove] key_id = 1 for token_id in tokenizer.added_tokens_decoder: if token_id > last_special_token_id and token_id > last_special_token_id + key_id: token = tokenizer._added_tokens_decoder[token_id] tokenizer._added_tokens_decoder[last_special_token_id + key_id] = token del tokenizer._added_tokens_decoder[token_id] tokenizer._added_tokens_encoder[token.content] = last_special_token_id + key_id key_id += 1 tokenizer._update_trie() text_embedding_dim = text_encoder.get_input_embeddings().embedding_dim temp_text_embedding_weights = text_encoder.get_input_embeddings().weight text_embedding_weights = temp_text_embedding_weights[:last_special_token_id + 1] to_append = [] for i in range(last_special_token_id + 1, temp_text_embedding_weights.shape[0]): if i not in token_ids: to_append.append(temp_text_embedding_weights[i].unsqueeze(0)) if len(to_append) > 0: to_append = torch.cat(to_append, dim=0) text_embedding_weights = torch.cat([text_embedding_weights, to_append], dim=0) text_embeddings_filtered = nn.Embedding(text_embedding_weights.shape[0], text_embedding_dim) text_embeddings_filtered.weight.data = text_embedding_weights text_encoder.set_input_embeddings(text_embeddings_filtered) # File: diffusers-main/src/diffusers/loaders/unet.py import os from collections import defaultdict from contextlib import nullcontext from pathlib import Path from typing import Callable, Dict, Union import safetensors import torch import torch.nn.functional as F from huggingface_hub.utils import validate_hf_hub_args from torch import nn from ..models.embeddings import ImageProjection, IPAdapterFaceIDImageProjection, IPAdapterFaceIDPlusImageProjection, IPAdapterFullImageProjection, IPAdapterPlusImageProjection, MultiIPAdapterImageProjection from ..models.modeling_utils import load_model_dict_into_meta, load_state_dict from ..utils import USE_PEFT_BACKEND, _get_model_file, convert_unet_state_dict_to_peft, get_adapter_name, get_peft_kwargs, is_accelerate_available, is_peft_version, is_torch_version, logging from .lora_pipeline import LORA_WEIGHT_NAME, LORA_WEIGHT_NAME_SAFE, TEXT_ENCODER_NAME, UNET_NAME from .utils import AttnProcsLayers if is_accelerate_available(): from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module logger = logging.get_logger(__name__) CUSTOM_DIFFUSION_WEIGHT_NAME = 'pytorch_custom_diffusion_weights.bin' CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE = 'pytorch_custom_diffusion_weights.safetensors' class UNet2DConditionLoadersMixin: text_encoder_name = TEXT_ENCODER_NAME unet_name = UNET_NAME @validate_hf_hub_args def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) local_files_only = kwargs.pop('local_files_only', None) token = kwargs.pop('token', None) revision = kwargs.pop('revision', None) subfolder = kwargs.pop('subfolder', None) weight_name = kwargs.pop('weight_name', None) use_safetensors = kwargs.pop('use_safetensors', None) adapter_name = kwargs.pop('adapter_name', None) _pipeline = kwargs.pop('_pipeline', None) network_alphas = kwargs.pop('network_alphas', None) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = {'file_type': 'attn_procs_weights', 'framework': 'pytorch'} model_file = None if not isinstance(pretrained_model_name_or_path_or_dict, dict): if use_safetensors and weight_name is None or (weight_name is not None and weight_name.endswith('.safetensors')): try: model_file = _get_model_file(pretrained_model_name_or_path_or_dict, weights_name=weight_name or LORA_WEIGHT_NAME_SAFE, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent) state_dict = safetensors.torch.load_file(model_file, device='cpu') except IOError as e: if not allow_pickle: raise e pass if model_file is None: model_file = _get_model_file(pretrained_model_name_or_path_or_dict, weights_name=weight_name or LORA_WEIGHT_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent) state_dict = load_state_dict(model_file) else: state_dict = pretrained_model_name_or_path_or_dict is_custom_diffusion = any(('custom_diffusion' in k for k in state_dict.keys())) is_lora = all(('lora' in k or k.endswith('.alpha') for k in state_dict.keys())) is_model_cpu_offload = False is_sequential_cpu_offload = False if is_custom_diffusion: attn_processors = self._process_custom_diffusion(state_dict=state_dict) elif is_lora: (is_model_cpu_offload, is_sequential_cpu_offload) = self._process_lora(state_dict=state_dict, unet_identifier_key=self.unet_name, network_alphas=network_alphas, adapter_name=adapter_name, _pipeline=_pipeline) else: raise ValueError(f'{model_file} does not seem to be in the correct format expected by Custom Diffusion training.') if is_custom_diffusion and _pipeline is not None: (is_model_cpu_offload, is_sequential_cpu_offload) = self._optionally_disable_offloading(_pipeline=_pipeline) self.set_attn_processor(attn_processors) self.to(dtype=self.dtype, device=self.device) if is_model_cpu_offload: _pipeline.enable_model_cpu_offload() elif is_sequential_cpu_offload: _pipeline.enable_sequential_cpu_offload() def _process_custom_diffusion(self, state_dict): from ..models.attention_processor import CustomDiffusionAttnProcessor attn_processors = {} custom_diffusion_grouped_dict = defaultdict(dict) for (key, value) in state_dict.items(): if len(value) == 0: custom_diffusion_grouped_dict[key] = {} else: if 'to_out' in key: (attn_processor_key, sub_key) = ('.'.join(key.split('.')[:-3]), '.'.join(key.split('.')[-3:])) else: (attn_processor_key, sub_key) = ('.'.join(key.split('.')[:-2]), '.'.join(key.split('.')[-2:])) custom_diffusion_grouped_dict[attn_processor_key][sub_key] = value for (key, value_dict) in custom_diffusion_grouped_dict.items(): if len(value_dict) == 0: attn_processors[key] = CustomDiffusionAttnProcessor(train_kv=False, train_q_out=False, hidden_size=None, cross_attention_dim=None) else: cross_attention_dim = value_dict['to_k_custom_diffusion.weight'].shape[1] hidden_size = value_dict['to_k_custom_diffusion.weight'].shape[0] train_q_out = True if 'to_q_custom_diffusion.weight' in value_dict else False attn_processors[key] = CustomDiffusionAttnProcessor(train_kv=True, train_q_out=train_q_out, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim) attn_processors[key].load_state_dict(value_dict) return attn_processors def _process_lora(self, state_dict, unet_identifier_key, network_alphas, adapter_name, _pipeline): if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for this method.') from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict keys = list(state_dict.keys()) unet_keys = [k for k in keys if k.startswith(unet_identifier_key)] unet_state_dict = {k.replace(f'{unet_identifier_key}.', ''): v for (k, v) in state_dict.items() if k in unet_keys} if network_alphas is not None: alpha_keys = [k for k in network_alphas.keys() if k.startswith(unet_identifier_key)] network_alphas = {k.replace(f'{unet_identifier_key}.', ''): v for (k, v) in network_alphas.items() if k in alpha_keys} is_model_cpu_offload = False is_sequential_cpu_offload = False state_dict_to_be_used = unet_state_dict if len(unet_state_dict) > 0 else state_dict if len(state_dict_to_be_used) > 0: if adapter_name in getattr(self, 'peft_config', {}): raise ValueError(f'Adapter name {adapter_name} already in use in the Unet - please select a new adapter name.') state_dict = convert_unet_state_dict_to_peft(state_dict_to_be_used) if network_alphas is not None: network_alphas = convert_unet_state_dict_to_peft(network_alphas) rank = {} for (key, val) in state_dict.items(): if 'lora_B' in key: rank[key] = val.shape[1] lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict, is_unet=True) if 'use_dora' in lora_config_kwargs: if lora_config_kwargs['use_dora']: if is_peft_version('<', '0.9.0'): raise ValueError('You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`.') elif is_peft_version('<', '0.9.0'): lora_config_kwargs.pop('use_dora') lora_config = LoraConfig(**lora_config_kwargs) if adapter_name is None: adapter_name = get_adapter_name(self) (is_model_cpu_offload, is_sequential_cpu_offload) = self._optionally_disable_offloading(_pipeline) inject_adapter_in_model(lora_config, self, adapter_name=adapter_name) incompatible_keys = set_peft_model_state_dict(self, state_dict, adapter_name) if incompatible_keys is not None: unexpected_keys = getattr(incompatible_keys, 'unexpected_keys', None) if unexpected_keys: logger.warning(f'Loading adapter weights from state_dict led to unexpected keys not found in the model: {unexpected_keys}. ') return (is_model_cpu_offload, is_sequential_cpu_offload) @classmethod def _optionally_disable_offloading(cls, _pipeline): is_model_cpu_offload = False is_sequential_cpu_offload = False if _pipeline is not None and _pipeline.hf_device_map is None: for (_, component) in _pipeline.components.items(): if isinstance(component, nn.Module) and hasattr(component, '_hf_hook'): if not is_model_cpu_offload: is_model_cpu_offload = isinstance(component._hf_hook, CpuOffload) if not is_sequential_cpu_offload: is_sequential_cpu_offload = isinstance(component._hf_hook, AlignDevicesHook) or (hasattr(component._hf_hook, 'hooks') and isinstance(component._hf_hook.hooks[0], AlignDevicesHook)) logger.info('Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again.') remove_hook_from_module(component, recurse=is_sequential_cpu_offload) return (is_model_cpu_offload, is_sequential_cpu_offload) def save_attn_procs(self, save_directory: Union[str, os.PathLike], is_main_process: bool=True, weight_name: str=None, save_function: Callable=None, safe_serialization: bool=True, **kwargs): from ..models.attention_processor import CustomDiffusionAttnProcessor, CustomDiffusionAttnProcessor2_0, CustomDiffusionXFormersAttnProcessor if os.path.isfile(save_directory): logger.error(f'Provided path ({save_directory}) should be a directory, not a file') return is_custom_diffusion = any((isinstance(x, (CustomDiffusionAttnProcessor, CustomDiffusionAttnProcessor2_0, CustomDiffusionXFormersAttnProcessor)) for (_, x) in self.attn_processors.items())) if is_custom_diffusion: state_dict = self._get_custom_diffusion_state_dict() if save_function is None and safe_serialization: empty_state_dict = {k: v for (k, v) in state_dict.items() if not isinstance(v, torch.Tensor)} if len(empty_state_dict) > 0: logger.warning(f'Safetensors does not support saving dicts with non-tensor values. The following keys will be ignored: {empty_state_dict.keys()}') state_dict = {k: v for (k, v) in state_dict.items() if isinstance(v, torch.Tensor)} else: if not USE_PEFT_BACKEND: raise ValueError('PEFT backend is required for saving LoRAs using the `save_attn_procs()` method.') from peft.utils import get_peft_model_state_dict state_dict = get_peft_model_state_dict(self) if save_function is None: if safe_serialization: def save_function(weights, filename): return safetensors.torch.save_file(weights, filename, metadata={'format': 'pt'}) else: save_function = torch.save os.makedirs(save_directory, exist_ok=True) if weight_name is None: if safe_serialization: weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE if is_custom_diffusion else LORA_WEIGHT_NAME_SAFE else: weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME if is_custom_diffusion else LORA_WEIGHT_NAME save_path = Path(save_directory, weight_name).as_posix() save_function(state_dict, save_path) logger.info(f'Model weights saved in {save_path}') def _get_custom_diffusion_state_dict(self): from ..models.attention_processor import CustomDiffusionAttnProcessor, CustomDiffusionAttnProcessor2_0, CustomDiffusionXFormersAttnProcessor model_to_save = AttnProcsLayers({y: x for (y, x) in self.attn_processors.items() if isinstance(x, (CustomDiffusionAttnProcessor, CustomDiffusionAttnProcessor2_0, CustomDiffusionXFormersAttnProcessor))}) state_dict = model_to_save.state_dict() for (name, attn) in self.attn_processors.items(): if len(attn.state_dict()) == 0: state_dict[name] = {} return state_dict def _convert_ip_adapter_image_proj_to_diffusers(self, state_dict, low_cpu_mem_usage=False): if low_cpu_mem_usage: if is_accelerate_available(): from accelerate import init_empty_weights else: low_cpu_mem_usage = False logger.warning('Cannot initialize model with low cpu memory usage because `accelerate` was not found in the environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip install accelerate\n```\n.') if low_cpu_mem_usage is True and (not is_torch_version('>=', '1.9.0')): raise NotImplementedError('Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set `low_cpu_mem_usage=False`.') updated_state_dict = {} image_projection = None init_context = init_empty_weights if low_cpu_mem_usage else nullcontext if 'proj.weight' in state_dict: num_image_text_embeds = 4 clip_embeddings_dim = state_dict['proj.weight'].shape[-1] cross_attention_dim = state_dict['proj.weight'].shape[0] // 4 with init_context(): image_projection = ImageProjection(cross_attention_dim=cross_attention_dim, image_embed_dim=clip_embeddings_dim, num_image_text_embeds=num_image_text_embeds) for (key, value) in state_dict.items(): diffusers_name = key.replace('proj', 'image_embeds') updated_state_dict[diffusers_name] = value elif 'proj.3.weight' in state_dict: clip_embeddings_dim = state_dict['proj.0.weight'].shape[0] cross_attention_dim = state_dict['proj.3.weight'].shape[0] with init_context(): image_projection = IPAdapterFullImageProjection(cross_attention_dim=cross_attention_dim, image_embed_dim=clip_embeddings_dim) for (key, value) in state_dict.items(): diffusers_name = key.replace('proj.0', 'ff.net.0.proj') diffusers_name = diffusers_name.replace('proj.2', 'ff.net.2') diffusers_name = diffusers_name.replace('proj.3', 'norm') updated_state_dict[diffusers_name] = value elif 'perceiver_resampler.proj_in.weight' in state_dict: id_embeddings_dim = state_dict['proj.0.weight'].shape[1] embed_dims = state_dict['perceiver_resampler.proj_in.weight'].shape[0] hidden_dims = state_dict['perceiver_resampler.proj_in.weight'].shape[1] output_dims = state_dict['perceiver_resampler.proj_out.weight'].shape[0] heads = state_dict['perceiver_resampler.layers.0.0.to_q.weight'].shape[0] // 64 with init_context(): image_projection = IPAdapterFaceIDPlusImageProjection(embed_dims=embed_dims, output_dims=output_dims, hidden_dims=hidden_dims, heads=heads, id_embeddings_dim=id_embeddings_dim) for (key, value) in state_dict.items(): diffusers_name = key.replace('perceiver_resampler.', '') diffusers_name = diffusers_name.replace('0.to', 'attn.to') diffusers_name = diffusers_name.replace('0.1.0.', '0.ff.0.') diffusers_name = diffusers_name.replace('0.1.1.weight', '0.ff.1.net.0.proj.weight') diffusers_name = diffusers_name.replace('0.1.3.weight', '0.ff.1.net.2.weight') diffusers_name = diffusers_name.replace('1.1.0.', '1.ff.0.') diffusers_name = diffusers_name.replace('1.1.1.weight', '1.ff.1.net.0.proj.weight') diffusers_name = diffusers_name.replace('1.1.3.weight', '1.ff.1.net.2.weight') diffusers_name = diffusers_name.replace('2.1.0.', '2.ff.0.') diffusers_name = diffusers_name.replace('2.1.1.weight', '2.ff.1.net.0.proj.weight') diffusers_name = diffusers_name.replace('2.1.3.weight', '2.ff.1.net.2.weight') diffusers_name = diffusers_name.replace('3.1.0.', '3.ff.0.') diffusers_name = diffusers_name.replace('3.1.1.weight', '3.ff.1.net.0.proj.weight') diffusers_name = diffusers_name.replace('3.1.3.weight', '3.ff.1.net.2.weight') diffusers_name = diffusers_name.replace('layers.0.0', 'layers.0.ln0') diffusers_name = diffusers_name.replace('layers.0.1', 'layers.0.ln1') diffusers_name = diffusers_name.replace('layers.1.0', 'layers.1.ln0') diffusers_name = diffusers_name.replace('layers.1.1', 'layers.1.ln1') diffusers_name = diffusers_name.replace('layers.2.0', 'layers.2.ln0') diffusers_name = diffusers_name.replace('layers.2.1', 'layers.2.ln1') diffusers_name = diffusers_name.replace('layers.3.0', 'layers.3.ln0') diffusers_name = diffusers_name.replace('layers.3.1', 'layers.3.ln1') if 'norm1' in diffusers_name: updated_state_dict[diffusers_name.replace('0.norm1', '0')] = value elif 'norm2' in diffusers_name: updated_state_dict[diffusers_name.replace('0.norm2', '1')] = value elif 'to_kv' in diffusers_name: v_chunk = value.chunk(2, dim=0) updated_state_dict[diffusers_name.replace('to_kv', 'to_k')] = v_chunk[0] updated_state_dict[diffusers_name.replace('to_kv', 'to_v')] = v_chunk[1] elif 'to_out' in diffusers_name: updated_state_dict[diffusers_name.replace('to_out', 'to_out.0')] = value elif 'proj.0.weight' == diffusers_name: updated_state_dict['proj.net.0.proj.weight'] = value elif 'proj.0.bias' == diffusers_name: updated_state_dict['proj.net.0.proj.bias'] = value elif 'proj.2.weight' == diffusers_name: updated_state_dict['proj.net.2.weight'] = value elif 'proj.2.bias' == diffusers_name: updated_state_dict['proj.net.2.bias'] = value else: updated_state_dict[diffusers_name] = value elif 'norm.weight' in state_dict: id_embeddings_dim_in = state_dict['proj.0.weight'].shape[1] id_embeddings_dim_out = state_dict['proj.0.weight'].shape[0] multiplier = id_embeddings_dim_out // id_embeddings_dim_in norm_layer = 'norm.weight' cross_attention_dim = state_dict[norm_layer].shape[0] num_tokens = state_dict['proj.2.weight'].shape[0] // cross_attention_dim with init_context(): image_projection = IPAdapterFaceIDImageProjection(cross_attention_dim=cross_attention_dim, image_embed_dim=id_embeddings_dim_in, mult=multiplier, num_tokens=num_tokens) for (key, value) in state_dict.items(): diffusers_name = key.replace('proj.0', 'ff.net.0.proj') diffusers_name = diffusers_name.replace('proj.2', 'ff.net.2') updated_state_dict[diffusers_name] = value else: num_image_text_embeds = state_dict['latents'].shape[1] embed_dims = state_dict['proj_in.weight'].shape[1] output_dims = state_dict['proj_out.weight'].shape[0] hidden_dims = state_dict['latents'].shape[2] attn_key_present = any(('attn' in k for k in state_dict)) heads = state_dict['layers.0.attn.to_q.weight'].shape[0] // 64 if attn_key_present else state_dict['layers.0.0.to_q.weight'].shape[0] // 64 with init_context(): image_projection = IPAdapterPlusImageProjection(embed_dims=embed_dims, output_dims=output_dims, hidden_dims=hidden_dims, heads=heads, num_queries=num_image_text_embeds) for (key, value) in state_dict.items(): diffusers_name = key.replace('0.to', '2.to') diffusers_name = diffusers_name.replace('0.0.norm1', '0.ln0') diffusers_name = diffusers_name.replace('0.0.norm2', '0.ln1') diffusers_name = diffusers_name.replace('1.0.norm1', '1.ln0') diffusers_name = diffusers_name.replace('1.0.norm2', '1.ln1') diffusers_name = diffusers_name.replace('2.0.norm1', '2.ln0') diffusers_name = diffusers_name.replace('2.0.norm2', '2.ln1') diffusers_name = diffusers_name.replace('3.0.norm1', '3.ln0') diffusers_name = diffusers_name.replace('3.0.norm2', '3.ln1') if 'to_kv' in diffusers_name: parts = diffusers_name.split('.') parts[2] = 'attn' diffusers_name = '.'.join(parts) v_chunk = value.chunk(2, dim=0) updated_state_dict[diffusers_name.replace('to_kv', 'to_k')] = v_chunk[0] updated_state_dict[diffusers_name.replace('to_kv', 'to_v')] = v_chunk[1] elif 'to_q' in diffusers_name: parts = diffusers_name.split('.') parts[2] = 'attn' diffusers_name = '.'.join(parts) updated_state_dict[diffusers_name] = value elif 'to_out' in diffusers_name: parts = diffusers_name.split('.') parts[2] = 'attn' diffusers_name = '.'.join(parts) updated_state_dict[diffusers_name.replace('to_out', 'to_out.0')] = value else: diffusers_name = diffusers_name.replace('0.1.0', '0.ff.0') diffusers_name = diffusers_name.replace('0.1.1', '0.ff.1.net.0.proj') diffusers_name = diffusers_name.replace('0.1.3', '0.ff.1.net.2') diffusers_name = diffusers_name.replace('1.1.0', '1.ff.0') diffusers_name = diffusers_name.replace('1.1.1', '1.ff.1.net.0.proj') diffusers_name = diffusers_name.replace('1.1.3', '1.ff.1.net.2') diffusers_name = diffusers_name.replace('2.1.0', '2.ff.0') diffusers_name = diffusers_name.replace('2.1.1', '2.ff.1.net.0.proj') diffusers_name = diffusers_name.replace('2.1.3', '2.ff.1.net.2') diffusers_name = diffusers_name.replace('3.1.0', '3.ff.0') diffusers_name = diffusers_name.replace('3.1.1', '3.ff.1.net.0.proj') diffusers_name = diffusers_name.replace('3.1.3', '3.ff.1.net.2') updated_state_dict[diffusers_name] = value if not low_cpu_mem_usage: image_projection.load_state_dict(updated_state_dict, strict=True) else: load_model_dict_into_meta(image_projection, updated_state_dict, device=self.device, dtype=self.dtype) return image_projection def _convert_ip_adapter_attn_to_diffusers(self, state_dicts, low_cpu_mem_usage=False): from ..models.attention_processor import IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0 if low_cpu_mem_usage: if is_accelerate_available(): from accelerate import init_empty_weights else: low_cpu_mem_usage = False logger.warning('Cannot initialize model with low cpu memory usage because `accelerate` was not found in the environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip install accelerate\n```\n.') if low_cpu_mem_usage is True and (not is_torch_version('>=', '1.9.0')): raise NotImplementedError('Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set `low_cpu_mem_usage=False`.') attn_procs = {} key_id = 1 init_context = init_empty_weights if low_cpu_mem_usage else nullcontext for name in self.attn_processors.keys(): cross_attention_dim = None if name.endswith('attn1.processor') else self.config.cross_attention_dim if name.startswith('mid_block'): hidden_size = self.config.block_out_channels[-1] elif name.startswith('up_blocks'): block_id = int(name[len('up_blocks.')]) hidden_size = list(reversed(self.config.block_out_channels))[block_id] elif name.startswith('down_blocks'): block_id = int(name[len('down_blocks.')]) hidden_size = self.config.block_out_channels[block_id] if cross_attention_dim is None or 'motion_modules' in name: attn_processor_class = self.attn_processors[name].__class__ attn_procs[name] = attn_processor_class() else: attn_processor_class = IPAdapterAttnProcessor2_0 if hasattr(F, 'scaled_dot_product_attention') else IPAdapterAttnProcessor num_image_text_embeds = [] for state_dict in state_dicts: if 'proj.weight' in state_dict['image_proj']: num_image_text_embeds += [4] elif 'proj.3.weight' in state_dict['image_proj']: num_image_text_embeds += [257] elif 'perceiver_resampler.proj_in.weight' in state_dict['image_proj']: num_image_text_embeds += [4] elif 'norm.weight' in state_dict['image_proj']: num_image_text_embeds += [4] else: num_image_text_embeds += [state_dict['image_proj']['latents'].shape[1]] with init_context(): attn_procs[name] = attn_processor_class(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0, num_tokens=num_image_text_embeds) value_dict = {} for (i, state_dict) in enumerate(state_dicts): value_dict.update({f'to_k_ip.{i}.weight': state_dict['ip_adapter'][f'{key_id}.to_k_ip.weight']}) value_dict.update({f'to_v_ip.{i}.weight': state_dict['ip_adapter'][f'{key_id}.to_v_ip.weight']}) if not low_cpu_mem_usage: attn_procs[name].load_state_dict(value_dict) else: device = next(iter(value_dict.values())).device dtype = next(iter(value_dict.values())).dtype load_model_dict_into_meta(attn_procs[name], value_dict, device=device, dtype=dtype) key_id += 2 return attn_procs def _load_ip_adapter_weights(self, state_dicts, low_cpu_mem_usage=False): if not isinstance(state_dicts, list): state_dicts = [state_dicts] if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == 'text_proj' and (not hasattr(self, 'text_encoder_hid_proj')): self.text_encoder_hid_proj = self.encoder_hid_proj self.encoder_hid_proj = None attn_procs = self._convert_ip_adapter_attn_to_diffusers(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage) self.set_attn_processor(attn_procs) image_projection_layers = [] for state_dict in state_dicts: image_projection_layer = self._convert_ip_adapter_image_proj_to_diffusers(state_dict['image_proj'], low_cpu_mem_usage=low_cpu_mem_usage) image_projection_layers.append(image_projection_layer) self.encoder_hid_proj = MultiIPAdapterImageProjection(image_projection_layers) self.config.encoder_hid_dim_type = 'ip_image_proj' self.to(dtype=self.dtype, device=self.device) def _load_ip_adapter_loras(self, state_dicts): lora_dicts = {} for (key_id, name) in enumerate(self.attn_processors.keys()): for (i, state_dict) in enumerate(state_dicts): if f'{key_id}.to_k_lora.down.weight' in state_dict['ip_adapter']: if i not in lora_dicts: lora_dicts[i] = {} lora_dicts[i].update({f'unet.{name}.to_k_lora.down.weight': state_dict['ip_adapter'][f'{key_id}.to_k_lora.down.weight']}) lora_dicts[i].update({f'unet.{name}.to_q_lora.down.weight': state_dict['ip_adapter'][f'{key_id}.to_q_lora.down.weight']}) lora_dicts[i].update({f'unet.{name}.to_v_lora.down.weight': state_dict['ip_adapter'][f'{key_id}.to_v_lora.down.weight']}) lora_dicts[i].update({f'unet.{name}.to_out_lora.down.weight': state_dict['ip_adapter'][f'{key_id}.to_out_lora.down.weight']}) lora_dicts[i].update({f'unet.{name}.to_k_lora.up.weight': state_dict['ip_adapter'][f'{key_id}.to_k_lora.up.weight']}) lora_dicts[i].update({f'unet.{name}.to_q_lora.up.weight': state_dict['ip_adapter'][f'{key_id}.to_q_lora.up.weight']}) lora_dicts[i].update({f'unet.{name}.to_v_lora.up.weight': state_dict['ip_adapter'][f'{key_id}.to_v_lora.up.weight']}) lora_dicts[i].update({f'unet.{name}.to_out_lora.up.weight': state_dict['ip_adapter'][f'{key_id}.to_out_lora.up.weight']}) return lora_dicts # File: diffusers-main/src/diffusers/loaders/unet_loader_utils.py import copy from typing import TYPE_CHECKING, Dict, List, Union from ..utils import logging if TYPE_CHECKING: from ..models import UNet2DConditionModel logger = logging.get_logger(__name__) def _translate_into_actual_layer_name(name): if name == 'mid': return 'mid_block.attentions.0' (updown, block, attn) = name.split('.') updown = updown.replace('down', 'down_blocks').replace('up', 'up_blocks') block = block.replace('block_', '') attn = 'attentions.' + attn return '.'.join((updown, block, attn)) def _maybe_expand_lora_scales(unet: 'UNet2DConditionModel', weight_scales: List[Union[float, Dict]], default_scale=1.0): blocks_with_transformer = {'down': [i for (i, block) in enumerate(unet.down_blocks) if hasattr(block, 'attentions')], 'up': [i for (i, block) in enumerate(unet.up_blocks) if hasattr(block, 'attentions')]} transformer_per_block = {'down': unet.config.layers_per_block, 'up': unet.config.layers_per_block + 1} expanded_weight_scales = [_maybe_expand_lora_scales_for_one_adapter(weight_for_adapter, blocks_with_transformer, transformer_per_block, unet.state_dict(), default_scale=default_scale) for weight_for_adapter in weight_scales] return expanded_weight_scales def _maybe_expand_lora_scales_for_one_adapter(scales: Union[float, Dict], blocks_with_transformer: Dict[str, int], transformer_per_block: Dict[str, int], state_dict: None, default_scale: float=1.0): if sorted(blocks_with_transformer.keys()) != ['down', 'up']: raise ValueError("blocks_with_transformer needs to be a dict with keys `'down' and `'up'`") if sorted(transformer_per_block.keys()) != ['down', 'up']: raise ValueError("transformer_per_block needs to be a dict with keys `'down' and `'up'`") if not isinstance(scales, dict): return scales scales = copy.deepcopy(scales) if 'mid' not in scales: scales['mid'] = default_scale elif isinstance(scales['mid'], list): if len(scales['mid']) == 1: scales['mid'] = scales['mid'][0] else: raise ValueError(f"Expected 1 scales for mid, got {len(scales['mid'])}.") for updown in ['up', 'down']: if updown not in scales: scales[updown] = default_scale if not isinstance(scales[updown], dict): scales[updown] = {f'block_{i}': copy.deepcopy(scales[updown]) for i in blocks_with_transformer[updown]} for i in blocks_with_transformer[updown]: block = f'block_{i}' if block not in scales[updown]: scales[updown][block] = default_scale if not isinstance(scales[updown][block], list): scales[updown][block] = [scales[updown][block] for _ in range(transformer_per_block[updown])] elif len(scales[updown][block]) == 1: scales[updown][block] = scales[updown][block] * transformer_per_block[updown] elif len(scales[updown][block]) != transformer_per_block[updown]: raise ValueError(f'Expected {transformer_per_block[updown]} scales for {updown}.{block}, got {len(scales[updown][block])}.') for i in blocks_with_transformer[updown]: block = f'block_{i}' for (tf_idx, value) in enumerate(scales[updown][block]): scales[f'{updown}.{block}.{tf_idx}'] = value del scales[updown] for layer in scales.keys(): if not any((_translate_into_actual_layer_name(layer) in module for module in state_dict.keys())): raise ValueError(f"Can't set lora scale for layer {layer}. It either doesn't exist in this unet or it has no attentions.") return {_translate_into_actual_layer_name(name): weight for (name, weight) in scales.items()} # File: diffusers-main/src/diffusers/loaders/utils.py from typing import Dict import torch class AttnProcsLayers(torch.nn.Module): def __init__(self, state_dict: Dict[str, torch.Tensor]): super().__init__() self.layers = torch.nn.ModuleList(state_dict.values()) self.mapping = dict(enumerate(state_dict.keys())) self.rev_mapping = {v: k for (k, v) in enumerate(state_dict.keys())} self.split_keys = ['.processor', '.self_attn'] def map_to(module, state_dict, *args, **kwargs): new_state_dict = {} for (key, value) in state_dict.items(): num = int(key.split('.')[1]) new_key = key.replace(f'layers.{num}', module.mapping[num]) new_state_dict[new_key] = value return new_state_dict def remap_key(key, state_dict): for k in self.split_keys: if k in key: return key.split(k)[0] + k raise ValueError(f'There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}.') def map_from(module, state_dict, *args, **kwargs): all_keys = list(state_dict.keys()) for key in all_keys: replace_key = remap_key(key, state_dict) new_key = key.replace(replace_key, f'layers.{module.rev_mapping[replace_key]}') state_dict[new_key] = state_dict[key] del state_dict[key] self._register_state_dict_hook(map_to) self._register_load_state_dict_pre_hook(map_from, with_module=True) # File: diffusers-main/src/diffusers/models/__init__.py from typing import TYPE_CHECKING from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, is_flax_available, is_torch_available _import_structure = {} if is_torch_available(): _import_structure['adapter'] = ['MultiAdapter', 'T2IAdapter'] _import_structure['autoencoders.autoencoder_asym_kl'] = ['AsymmetricAutoencoderKL'] _import_structure['autoencoders.autoencoder_kl'] = ['AutoencoderKL'] _import_structure['autoencoders.autoencoder_kl_cogvideox'] = ['AutoencoderKLCogVideoX'] _import_structure['autoencoders.autoencoder_kl_temporal_decoder'] = ['AutoencoderKLTemporalDecoder'] _import_structure['autoencoders.autoencoder_oobleck'] = ['AutoencoderOobleck'] _import_structure['autoencoders.autoencoder_tiny'] = ['AutoencoderTiny'] _import_structure['autoencoders.consistency_decoder_vae'] = ['ConsistencyDecoderVAE'] _import_structure['autoencoders.vq_model'] = ['VQModel'] _import_structure['controlnet'] = ['ControlNetModel'] _import_structure['controlnet_flux'] = ['FluxControlNetModel', 'FluxMultiControlNetModel'] _import_structure['controlnet_hunyuan'] = ['HunyuanDiT2DControlNetModel', 'HunyuanDiT2DMultiControlNetModel'] _import_structure['controlnet_sd3'] = ['SD3ControlNetModel', 'SD3MultiControlNetModel'] _import_structure['controlnet_sparsectrl'] = ['SparseControlNetModel'] _import_structure['controlnet_xs'] = ['ControlNetXSAdapter', 'UNetControlNetXSModel'] _import_structure['embeddings'] = ['ImageProjection'] _import_structure['modeling_utils'] = ['ModelMixin'] _import_structure['transformers.auraflow_transformer_2d'] = ['AuraFlowTransformer2DModel'] _import_structure['transformers.cogvideox_transformer_3d'] = ['CogVideoXTransformer3DModel'] _import_structure['transformers.dit_transformer_2d'] = ['DiTTransformer2DModel'] _import_structure['transformers.dual_transformer_2d'] = ['DualTransformer2DModel'] _import_structure['transformers.hunyuan_transformer_2d'] = ['HunyuanDiT2DModel'] _import_structure['transformers.latte_transformer_3d'] = ['LatteTransformer3DModel'] _import_structure['transformers.lumina_nextdit2d'] = ['LuminaNextDiT2DModel'] _import_structure['transformers.pixart_transformer_2d'] = ['PixArtTransformer2DModel'] _import_structure['transformers.prior_transformer'] = ['PriorTransformer'] _import_structure['transformers.stable_audio_transformer'] = ['StableAudioDiTModel'] _import_structure['transformers.t5_film_transformer'] = ['T5FilmDecoder'] _import_structure['transformers.transformer_2d'] = ['Transformer2DModel'] _import_structure['transformers.transformer_flux'] = ['FluxTransformer2DModel'] _import_structure['transformers.transformer_sd3'] = ['SD3Transformer2DModel'] _import_structure['transformers.transformer_temporal'] = ['TransformerTemporalModel'] _import_structure['unets.unet_1d'] = ['UNet1DModel'] _import_structure['unets.unet_2d'] = ['UNet2DModel'] _import_structure['unets.unet_2d_condition'] = ['UNet2DConditionModel'] _import_structure['unets.unet_3d_condition'] = ['UNet3DConditionModel'] _import_structure['unets.unet_i2vgen_xl'] = ['I2VGenXLUNet'] _import_structure['unets.unet_kandinsky3'] = ['Kandinsky3UNet'] _import_structure['unets.unet_motion_model'] = ['MotionAdapter', 'UNetMotionModel'] _import_structure['unets.unet_spatio_temporal_condition'] = ['UNetSpatioTemporalConditionModel'] _import_structure['unets.unet_stable_cascade'] = ['StableCascadeUNet'] _import_structure['unets.uvit_2d'] = ['UVit2DModel'] if is_flax_available(): _import_structure['controlnet_flax'] = ['FlaxControlNetModel'] _import_structure['unets.unet_2d_condition_flax'] = ['FlaxUNet2DConditionModel'] _import_structure['vae_flax'] = ['FlaxAutoencoderKL'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if is_torch_available(): from .adapter import MultiAdapter, T2IAdapter from .autoencoders import AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderKLCogVideoX, AutoencoderKLTemporalDecoder, AutoencoderOobleck, AutoencoderTiny, ConsistencyDecoderVAE, VQModel from .controlnet import ControlNetModel from .controlnet_flux import FluxControlNetModel, FluxMultiControlNetModel from .controlnet_hunyuan import HunyuanDiT2DControlNetModel, HunyuanDiT2DMultiControlNetModel from .controlnet_sd3 import SD3ControlNetModel, SD3MultiControlNetModel from .controlnet_sparsectrl import SparseControlNetModel from .controlnet_xs import ControlNetXSAdapter, UNetControlNetXSModel from .embeddings import ImageProjection from .modeling_utils import ModelMixin from .transformers import AuraFlowTransformer2DModel, CogVideoXTransformer3DModel, DiTTransformer2DModel, DualTransformer2DModel, FluxTransformer2DModel, HunyuanDiT2DModel, LatteTransformer3DModel, LuminaNextDiT2DModel, PixArtTransformer2DModel, PriorTransformer, SD3Transformer2DModel, StableAudioDiTModel, T5FilmDecoder, Transformer2DModel, TransformerTemporalModel from .unets import I2VGenXLUNet, Kandinsky3UNet, MotionAdapter, StableCascadeUNet, UNet1DModel, UNet2DConditionModel, UNet2DModel, UNet3DConditionModel, UNetMotionModel, UNetSpatioTemporalConditionModel, UVit2DModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unets import FlaxUNet2DConditionModel from .vae_flax import FlaxAutoencoderKL else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: diffusers-main/src/diffusers/models/activations.py import torch import torch.nn.functional as F from torch import nn from ..utils import deprecate from ..utils.import_utils import is_torch_npu_available if is_torch_npu_available(): import torch_npu ACTIVATION_FUNCTIONS = {'swish': nn.SiLU(), 'silu': nn.SiLU(), 'mish': nn.Mish(), 'gelu': nn.GELU(), 'relu': nn.ReLU()} def get_activation(act_fn: str) -> nn.Module: act_fn = act_fn.lower() if act_fn in ACTIVATION_FUNCTIONS: return ACTIVATION_FUNCTIONS[act_fn] else: raise ValueError(f'Unsupported activation function: {act_fn}') class FP32SiLU(nn.Module): def __init__(self): super().__init__() def forward(self, inputs: torch.Tensor) -> torch.Tensor: return F.silu(inputs.float(), inplace=False).to(inputs.dtype) class GELU(nn.Module): def __init__(self, dim_in: int, dim_out: int, approximate: str='none', bias: bool=True): super().__init__() self.proj = nn.Linear(dim_in, dim_out, bias=bias) self.approximate = approximate def gelu(self, gate: torch.Tensor) -> torch.Tensor: if gate.device.type != 'mps': return F.gelu(gate, approximate=self.approximate) return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype) def forward(self, hidden_states): hidden_states = self.proj(hidden_states) hidden_states = self.gelu(hidden_states) return hidden_states class GEGLU(nn.Module): def __init__(self, dim_in: int, dim_out: int, bias: bool=True): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2, bias=bias) def gelu(self, gate: torch.Tensor) -> torch.Tensor: if gate.device.type != 'mps': return F.gelu(gate) return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype) def forward(self, hidden_states, *args, **kwargs): if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) hidden_states = self.proj(hidden_states) if is_torch_npu_available(): return torch_npu.npu_geglu(hidden_states, dim=-1, approximate=1)[0] else: (hidden_states, gate) = hidden_states.chunk(2, dim=-1) return hidden_states * self.gelu(gate) class SwiGLU(nn.Module): def __init__(self, dim_in: int, dim_out: int, bias: bool=True): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2, bias=bias) self.activation = nn.SiLU() def forward(self, hidden_states): hidden_states = self.proj(hidden_states) (hidden_states, gate) = hidden_states.chunk(2, dim=-1) return hidden_states * self.activation(gate) class ApproximateGELU(nn.Module): def __init__(self, dim_in: int, dim_out: int, bias: bool=True): super().__init__() self.proj = nn.Linear(dim_in, dim_out, bias=bias) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj(x) return x * torch.sigmoid(1.702 * x) # File: diffusers-main/src/diffusers/models/adapter.py import os from typing import Callable, List, Optional, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import logging from .modeling_utils import ModelMixin logger = logging.get_logger(__name__) class MultiAdapter(ModelMixin): def __init__(self, adapters: List['T2IAdapter']): super(MultiAdapter, self).__init__() self.num_adapter = len(adapters) self.adapters = nn.ModuleList(adapters) if len(adapters) == 0: raise ValueError('Expecting at least one adapter') if len(adapters) == 1: raise ValueError('For a single adapter, please use the `T2IAdapter` class instead of `MultiAdapter`') first_adapter_total_downscale_factor = adapters[0].total_downscale_factor first_adapter_downscale_factor = adapters[0].downscale_factor for idx in range(1, len(adapters)): if adapters[idx].total_downscale_factor != first_adapter_total_downscale_factor or adapters[idx].downscale_factor != first_adapter_downscale_factor: raise ValueError(f'Expecting all adapters to have the same downscaling behavior, but got:\nadapters[0].total_downscale_factor={first_adapter_total_downscale_factor}\nadapters[0].downscale_factor={first_adapter_downscale_factor}\nadapter[`{idx}`].total_downscale_factor={adapters[idx].total_downscale_factor}\nadapter[`{idx}`].downscale_factor={adapters[idx].downscale_factor}') self.total_downscale_factor = first_adapter_total_downscale_factor self.downscale_factor = first_adapter_downscale_factor def forward(self, xs: torch.Tensor, adapter_weights: Optional[List[float]]=None) -> List[torch.Tensor]: if adapter_weights is None: adapter_weights = torch.tensor([1 / self.num_adapter] * self.num_adapter) else: adapter_weights = torch.tensor(adapter_weights) accume_state = None for (x, w, adapter) in zip(xs, adapter_weights, self.adapters): features = adapter(x) if accume_state is None: accume_state = features for i in range(len(accume_state)): accume_state[i] = w * accume_state[i] else: for i in range(len(features)): accume_state[i] += w * features[i] return accume_state def save_pretrained(self, save_directory: Union[str, os.PathLike], is_main_process: bool=True, save_function: Callable=None, safe_serialization: bool=True, variant: Optional[str]=None): idx = 0 model_path_to_save = save_directory for adapter in self.adapters: adapter.save_pretrained(model_path_to_save, is_main_process=is_main_process, save_function=save_function, safe_serialization=safe_serialization, variant=variant) idx += 1 model_path_to_save = model_path_to_save + f'_{idx}' @classmethod def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs): idx = 0 adapters = [] model_path_to_load = pretrained_model_path while os.path.isdir(model_path_to_load): adapter = T2IAdapter.from_pretrained(model_path_to_load, **kwargs) adapters.append(adapter) idx += 1 model_path_to_load = pretrained_model_path + f'_{idx}' logger.info(f'{len(adapters)} adapters loaded from {pretrained_model_path}.') if len(adapters) == 0: raise ValueError(f"No T2IAdapters found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}.") return cls(adapters) class T2IAdapter(ModelMixin, ConfigMixin): @register_to_config def __init__(self, in_channels: int=3, channels: List[int]=[320, 640, 1280, 1280], num_res_blocks: int=2, downscale_factor: int=8, adapter_type: str='full_adapter'): super().__init__() if adapter_type == 'full_adapter': self.adapter = FullAdapter(in_channels, channels, num_res_blocks, downscale_factor) elif adapter_type == 'full_adapter_xl': self.adapter = FullAdapterXL(in_channels, channels, num_res_blocks, downscale_factor) elif adapter_type == 'light_adapter': self.adapter = LightAdapter(in_channels, channels, num_res_blocks, downscale_factor) else: raise ValueError(f"Unsupported adapter_type: '{adapter_type}'. Choose either 'full_adapter' or 'full_adapter_xl' or 'light_adapter'.") def forward(self, x: torch.Tensor) -> List[torch.Tensor]: return self.adapter(x) @property def total_downscale_factor(self): return self.adapter.total_downscale_factor @property def downscale_factor(self): return self.adapter.unshuffle.downscale_factor class FullAdapter(nn.Module): def __init__(self, in_channels: int=3, channels: List[int]=[320, 640, 1280, 1280], num_res_blocks: int=2, downscale_factor: int=8): super().__init__() in_channels = in_channels * downscale_factor ** 2 self.unshuffle = nn.PixelUnshuffle(downscale_factor) self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1) self.body = nn.ModuleList([AdapterBlock(channels[0], channels[0], num_res_blocks), *[AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True) for i in range(1, len(channels))]]) self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 1) def forward(self, x: torch.Tensor) -> List[torch.Tensor]: x = self.unshuffle(x) x = self.conv_in(x) features = [] for block in self.body: x = block(x) features.append(x) return features class FullAdapterXL(nn.Module): def __init__(self, in_channels: int=3, channels: List[int]=[320, 640, 1280, 1280], num_res_blocks: int=2, downscale_factor: int=16): super().__init__() in_channels = in_channels * downscale_factor ** 2 self.unshuffle = nn.PixelUnshuffle(downscale_factor) self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1) self.body = [] for i in range(len(channels)): if i == 1: self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks)) elif i == 2: self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True)) else: self.body.append(AdapterBlock(channels[i], channels[i], num_res_blocks)) self.body = nn.ModuleList(self.body) self.total_downscale_factor = downscale_factor * 2 def forward(self, x: torch.Tensor) -> List[torch.Tensor]: x = self.unshuffle(x) x = self.conv_in(x) features = [] for block in self.body: x = block(x) features.append(x) return features class AdapterBlock(nn.Module): def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool=False): super().__init__() self.downsample = None if down: self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True) self.in_conv = None if in_channels != out_channels: self.in_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) self.resnets = nn.Sequential(*[AdapterResnetBlock(out_channels) for _ in range(num_res_blocks)]) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.downsample is not None: x = self.downsample(x) if self.in_conv is not None: x = self.in_conv(x) x = self.resnets(x) return x class AdapterResnetBlock(nn.Module): def __init__(self, channels: int): super().__init__() self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.act = nn.ReLU() self.block2 = nn.Conv2d(channels, channels, kernel_size=1) def forward(self, x: torch.Tensor) -> torch.Tensor: h = self.act(self.block1(x)) h = self.block2(h) return h + x class LightAdapter(nn.Module): def __init__(self, in_channels: int=3, channels: List[int]=[320, 640, 1280], num_res_blocks: int=4, downscale_factor: int=8): super().__init__() in_channels = in_channels * downscale_factor ** 2 self.unshuffle = nn.PixelUnshuffle(downscale_factor) self.body = nn.ModuleList([LightAdapterBlock(in_channels, channels[0], num_res_blocks), *[LightAdapterBlock(channels[i], channels[i + 1], num_res_blocks, down=True) for i in range(len(channels) - 1)], LightAdapterBlock(channels[-1], channels[-1], num_res_blocks, down=True)]) self.total_downscale_factor = downscale_factor * 2 ** len(channels) def forward(self, x: torch.Tensor) -> List[torch.Tensor]: x = self.unshuffle(x) features = [] for block in self.body: x = block(x) features.append(x) return features class LightAdapterBlock(nn.Module): def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool=False): super().__init__() mid_channels = out_channels // 4 self.downsample = None if down: self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True) self.in_conv = nn.Conv2d(in_channels, mid_channels, kernel_size=1) self.resnets = nn.Sequential(*[LightAdapterResnetBlock(mid_channels) for _ in range(num_res_blocks)]) self.out_conv = nn.Conv2d(mid_channels, out_channels, kernel_size=1) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.downsample is not None: x = self.downsample(x) x = self.in_conv(x) x = self.resnets(x) x = self.out_conv(x) return x class LightAdapterResnetBlock(nn.Module): def __init__(self, channels: int): super().__init__() self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.act = nn.ReLU() self.block2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) def forward(self, x: torch.Tensor) -> torch.Tensor: h = self.act(self.block1(x)) h = self.block2(h) return h + x # File: diffusers-main/src/diffusers/models/attention.py from typing import Any, Dict, List, Optional, Tuple import torch import torch.nn.functional as F from torch import nn from ..utils import deprecate, logging from ..utils.torch_utils import maybe_allow_in_graph from .activations import GEGLU, GELU, ApproximateGELU, FP32SiLU, SwiGLU from .attention_processor import Attention, JointAttnProcessor2_0 from .embeddings import SinusoidalPositionalEmbedding from .normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, RMSNorm logger = logging.get_logger(__name__) def _chunked_feed_forward(ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int): if hidden_states.shape[chunk_dim] % chunk_size != 0: raise ValueError(f'`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.') num_chunks = hidden_states.shape[chunk_dim] // chunk_size ff_output = torch.cat([ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)], dim=chunk_dim) return ff_output @maybe_allow_in_graph class GatedSelfAttentionDense(nn.Module): def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int): super().__init__() self.linear = nn.Linear(context_dim, query_dim) self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head) self.ff = FeedForward(query_dim, activation_fn='geglu') self.norm1 = nn.LayerNorm(query_dim) self.norm2 = nn.LayerNorm(query_dim) self.register_parameter('alpha_attn', nn.Parameter(torch.tensor(0.0))) self.register_parameter('alpha_dense', nn.Parameter(torch.tensor(0.0))) self.enabled = True def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor: if not self.enabled: return x n_visual = x.shape[1] objs = self.linear(objs) x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :] x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x)) return x @maybe_allow_in_graph class JointTransformerBlock(nn.Module): def __init__(self, dim, num_attention_heads, attention_head_dim, context_pre_only=False): super().__init__() self.context_pre_only = context_pre_only context_norm_type = 'ada_norm_continous' if context_pre_only else 'ada_norm_zero' self.norm1 = AdaLayerNormZero(dim) if context_norm_type == 'ada_norm_continous': self.norm1_context = AdaLayerNormContinuous(dim, dim, elementwise_affine=False, eps=1e-06, bias=True, norm_type='layer_norm') elif context_norm_type == 'ada_norm_zero': self.norm1_context = AdaLayerNormZero(dim) else: raise ValueError(f'Unknown context_norm_type: {context_norm_type}, currently only support `ada_norm_continous`, `ada_norm_zero`') if hasattr(F, 'scaled_dot_product_attention'): processor = JointAttnProcessor2_0() else: raise ValueError('The current PyTorch version does not support the `scaled_dot_product_attention` function.') self.attn = Attention(query_dim=dim, cross_attention_dim=None, added_kv_proj_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, context_pre_only=context_pre_only, bias=True, processor=processor) self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-06) self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn='gelu-approximate') if not context_pre_only: self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-06) self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn='gelu-approximate') else: self.norm2_context = None self.ff_context = None self._chunk_size = None self._chunk_dim = 0 def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int=0): self._chunk_size = chunk_size self._chunk_dim = dim def forward(self, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor, temb: torch.FloatTensor): (norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp) = self.norm1(hidden_states, emb=temb) if self.context_pre_only: norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states, temb) else: (norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp) = self.norm1_context(encoder_hidden_states, emb=temb) (attn_output, context_attn_output) = self.attn(hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states) attn_output = gate_msa.unsqueeze(1) * attn_output hidden_states = hidden_states + attn_output norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) ff_output = gate_mlp.unsqueeze(1) * ff_output hidden_states = hidden_states + ff_output if self.context_pre_only: encoder_hidden_states = None else: context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output encoder_hidden_states = encoder_hidden_states + context_attn_output norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] if self._chunk_size is not None: context_ff_output = _chunked_feed_forward(self.ff_context, norm_encoder_hidden_states, self._chunk_dim, self._chunk_size) else: context_ff_output = self.ff_context(norm_encoder_hidden_states) encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output return (encoder_hidden_states, hidden_states) @maybe_allow_in_graph class BasicTransformerBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout=0.0, cross_attention_dim: Optional[int]=None, activation_fn: str='geglu', num_embeds_ada_norm: Optional[int]=None, attention_bias: bool=False, only_cross_attention: bool=False, double_self_attention: bool=False, upcast_attention: bool=False, norm_elementwise_affine: bool=True, norm_type: str='layer_norm', norm_eps: float=1e-05, final_dropout: bool=False, attention_type: str='default', positional_embeddings: Optional[str]=None, num_positional_embeddings: Optional[int]=None, ada_norm_continous_conditioning_embedding_dim: Optional[int]=None, ada_norm_bias: Optional[int]=None, ff_inner_dim: Optional[int]=None, ff_bias: bool=True, attention_out_bias: bool=True): super().__init__() self.dim = dim self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim self.dropout = dropout self.cross_attention_dim = cross_attention_dim self.activation_fn = activation_fn self.attention_bias = attention_bias self.double_self_attention = double_self_attention self.norm_elementwise_affine = norm_elementwise_affine self.positional_embeddings = positional_embeddings self.num_positional_embeddings = num_positional_embeddings self.only_cross_attention = only_cross_attention self.use_ada_layer_norm_zero = num_embeds_ada_norm is not None and norm_type == 'ada_norm_zero' self.use_ada_layer_norm = num_embeds_ada_norm is not None and norm_type == 'ada_norm' self.use_ada_layer_norm_single = norm_type == 'ada_norm_single' self.use_layer_norm = norm_type == 'layer_norm' self.use_ada_layer_norm_continuous = norm_type == 'ada_norm_continuous' if norm_type in ('ada_norm', 'ada_norm_zero') and num_embeds_ada_norm is None: raise ValueError(f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.') self.norm_type = norm_type self.num_embeds_ada_norm = num_embeds_ada_norm if positional_embeddings and num_positional_embeddings is None: raise ValueError('If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined.') if positional_embeddings == 'sinusoidal': self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings) else: self.pos_embed = None if norm_type == 'ada_norm': self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) elif norm_type == 'ada_norm_zero': self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm) elif norm_type == 'ada_norm_continuous': self.norm1 = AdaLayerNormContinuous(dim, ada_norm_continous_conditioning_embedding_dim, norm_elementwise_affine, norm_eps, ada_norm_bias, 'rms_norm') else: self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) self.attn1 = Attention(query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=upcast_attention, out_bias=attention_out_bias) if cross_attention_dim is not None or double_self_attention: if norm_type == 'ada_norm': self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm) elif norm_type == 'ada_norm_continuous': self.norm2 = AdaLayerNormContinuous(dim, ada_norm_continous_conditioning_embedding_dim, norm_elementwise_affine, norm_eps, ada_norm_bias, 'rms_norm') else: self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) self.attn2 = Attention(query_dim=dim, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention, out_bias=attention_out_bias) else: if norm_type == 'ada_norm_single': self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) else: self.norm2 = None self.attn2 = None if norm_type == 'ada_norm_continuous': self.norm3 = AdaLayerNormContinuous(dim, ada_norm_continous_conditioning_embedding_dim, norm_elementwise_affine, norm_eps, ada_norm_bias, 'layer_norm') elif norm_type in ['ada_norm_zero', 'ada_norm', 'layer_norm']: self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) elif norm_type == 'layer_norm_i2vgen': self.norm3 = None self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout, inner_dim=ff_inner_dim, bias=ff_bias) if attention_type == 'gated' or attention_type == 'gated-text-image': self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim) if norm_type == 'ada_norm_single': self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim ** 0.5) self._chunk_size = None self._chunk_dim = 0 def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int=0): self._chunk_size = chunk_size self._chunk_dim = dim def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, timestep: Optional[torch.LongTensor]=None, cross_attention_kwargs: Dict[str, Any]=None, class_labels: Optional[torch.LongTensor]=None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]]=None) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') batch_size = hidden_states.shape[0] if self.norm_type == 'ada_norm': norm_hidden_states = self.norm1(hidden_states, timestep) elif self.norm_type == 'ada_norm_zero': (norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp) = self.norm1(hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype) elif self.norm_type in ['layer_norm', 'layer_norm_i2vgen']: norm_hidden_states = self.norm1(hidden_states) elif self.norm_type == 'ada_norm_continuous': norm_hidden_states = self.norm1(hidden_states, added_cond_kwargs['pooled_text_emb']) elif self.norm_type == 'ada_norm_single': (shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp) = (self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1)).chunk(6, dim=1) norm_hidden_states = self.norm1(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa else: raise ValueError('Incorrect norm used') if self.pos_embed is not None: norm_hidden_states = self.pos_embed(norm_hidden_states) cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} gligen_kwargs = cross_attention_kwargs.pop('gligen', None) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs) if self.norm_type == 'ada_norm_zero': attn_output = gate_msa.unsqueeze(1) * attn_output elif self.norm_type == 'ada_norm_single': attn_output = gate_msa * attn_output hidden_states = attn_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) if gligen_kwargs is not None: hidden_states = self.fuser(hidden_states, gligen_kwargs['objs']) if self.attn2 is not None: if self.norm_type == 'ada_norm': norm_hidden_states = self.norm2(hidden_states, timestep) elif self.norm_type in ['ada_norm_zero', 'layer_norm', 'layer_norm_i2vgen']: norm_hidden_states = self.norm2(hidden_states) elif self.norm_type == 'ada_norm_single': norm_hidden_states = hidden_states elif self.norm_type == 'ada_norm_continuous': norm_hidden_states = self.norm2(hidden_states, added_cond_kwargs['pooled_text_emb']) else: raise ValueError('Incorrect norm') if self.pos_embed is not None and self.norm_type != 'ada_norm_single': norm_hidden_states = self.pos_embed(norm_hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs) hidden_states = attn_output + hidden_states if self.norm_type == 'ada_norm_continuous': norm_hidden_states = self.norm3(hidden_states, added_cond_kwargs['pooled_text_emb']) elif not self.norm_type == 'ada_norm_single': norm_hidden_states = self.norm3(hidden_states) if self.norm_type == 'ada_norm_zero': norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self.norm_type == 'ada_norm_single': norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp if self._chunk_size is not None: ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) if self.norm_type == 'ada_norm_zero': ff_output = gate_mlp.unsqueeze(1) * ff_output elif self.norm_type == 'ada_norm_single': ff_output = gate_mlp * ff_output hidden_states = ff_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) return hidden_states class LuminaFeedForward(nn.Module): def __init__(self, dim: int, inner_dim: int, multiple_of: Optional[int]=256, ffn_dim_multiplier: Optional[float]=None): super().__init__() inner_dim = int(2 * inner_dim / 3) if ffn_dim_multiplier is not None: inner_dim = int(ffn_dim_multiplier * inner_dim) inner_dim = multiple_of * ((inner_dim + multiple_of - 1) // multiple_of) self.linear_1 = nn.Linear(dim, inner_dim, bias=False) self.linear_2 = nn.Linear(inner_dim, dim, bias=False) self.linear_3 = nn.Linear(dim, inner_dim, bias=False) self.silu = FP32SiLU() def forward(self, x): return self.linear_2(self.silu(self.linear_1(x)) * self.linear_3(x)) @maybe_allow_in_graph class TemporalBasicTransformerBlock(nn.Module): def __init__(self, dim: int, time_mix_inner_dim: int, num_attention_heads: int, attention_head_dim: int, cross_attention_dim: Optional[int]=None): super().__init__() self.is_res = dim == time_mix_inner_dim self.norm_in = nn.LayerNorm(dim) self.ff_in = FeedForward(dim, dim_out=time_mix_inner_dim, activation_fn='geglu') self.norm1 = nn.LayerNorm(time_mix_inner_dim) self.attn1 = Attention(query_dim=time_mix_inner_dim, heads=num_attention_heads, dim_head=attention_head_dim, cross_attention_dim=None) if cross_attention_dim is not None: self.norm2 = nn.LayerNorm(time_mix_inner_dim) self.attn2 = Attention(query_dim=time_mix_inner_dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim) else: self.norm2 = None self.attn2 = None self.norm3 = nn.LayerNorm(time_mix_inner_dim) self.ff = FeedForward(time_mix_inner_dim, activation_fn='geglu') self._chunk_size = None self._chunk_dim = None def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs): self._chunk_size = chunk_size self._chunk_dim = 1 def forward(self, hidden_states: torch.Tensor, num_frames: int, encoder_hidden_states: Optional[torch.Tensor]=None) -> torch.Tensor: batch_size = hidden_states.shape[0] (batch_frames, seq_length, channels) = hidden_states.shape batch_size = batch_frames // num_frames hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels) residual = hidden_states hidden_states = self.norm_in(hidden_states) if self._chunk_size is not None: hidden_states = _chunked_feed_forward(self.ff_in, hidden_states, self._chunk_dim, self._chunk_size) else: hidden_states = self.ff_in(hidden_states) if self.is_res: hidden_states = hidden_states + residual norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) hidden_states = attn_output + hidden_states if self.attn2 is not None: norm_hidden_states = self.norm2(hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states) hidden_states = attn_output + hidden_states norm_hidden_states = self.norm3(hidden_states) if self._chunk_size is not None: ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) if self.is_res: hidden_states = ff_output + hidden_states else: hidden_states = ff_output hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels) return hidden_states class SkipFFTransformerBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, kv_input_dim: int, kv_input_dim_proj_use_bias: bool, dropout=0.0, cross_attention_dim: Optional[int]=None, attention_bias: bool=False, attention_out_bias: bool=True): super().__init__() if kv_input_dim != dim: self.kv_mapper = nn.Linear(kv_input_dim, dim, kv_input_dim_proj_use_bias) else: self.kv_mapper = None self.norm1 = RMSNorm(dim, 1e-06) self.attn1 = Attention(query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim, out_bias=attention_out_bias) self.norm2 = RMSNorm(dim, 1e-06) self.attn2 = Attention(query_dim=dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, out_bias=attention_out_bias) def forward(self, hidden_states, encoder_hidden_states, cross_attention_kwargs): cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} if self.kv_mapper is not None: encoder_hidden_states = self.kv_mapper(F.silu(encoder_hidden_states)) norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=encoder_hidden_states, **cross_attention_kwargs) hidden_states = attn_output + hidden_states norm_hidden_states = self.norm2(hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states, **cross_attention_kwargs) hidden_states = attn_output + hidden_states return hidden_states @maybe_allow_in_graph class FreeNoiseTransformerBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout: float=0.0, cross_attention_dim: Optional[int]=None, activation_fn: str='geglu', num_embeds_ada_norm: Optional[int]=None, attention_bias: bool=False, only_cross_attention: bool=False, double_self_attention: bool=False, upcast_attention: bool=False, norm_elementwise_affine: bool=True, norm_type: str='layer_norm', norm_eps: float=1e-05, final_dropout: bool=False, positional_embeddings: Optional[str]=None, num_positional_embeddings: Optional[int]=None, ff_inner_dim: Optional[int]=None, ff_bias: bool=True, attention_out_bias: bool=True, context_length: int=16, context_stride: int=4, weighting_scheme: str='pyramid'): super().__init__() self.dim = dim self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim self.dropout = dropout self.cross_attention_dim = cross_attention_dim self.activation_fn = activation_fn self.attention_bias = attention_bias self.double_self_attention = double_self_attention self.norm_elementwise_affine = norm_elementwise_affine self.positional_embeddings = positional_embeddings self.num_positional_embeddings = num_positional_embeddings self.only_cross_attention = only_cross_attention self.set_free_noise_properties(context_length, context_stride, weighting_scheme) self.use_ada_layer_norm_zero = num_embeds_ada_norm is not None and norm_type == 'ada_norm_zero' self.use_ada_layer_norm = num_embeds_ada_norm is not None and norm_type == 'ada_norm' self.use_ada_layer_norm_single = norm_type == 'ada_norm_single' self.use_layer_norm = norm_type == 'layer_norm' self.use_ada_layer_norm_continuous = norm_type == 'ada_norm_continuous' if norm_type in ('ada_norm', 'ada_norm_zero') and num_embeds_ada_norm is None: raise ValueError(f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.') self.norm_type = norm_type self.num_embeds_ada_norm = num_embeds_ada_norm if positional_embeddings and num_positional_embeddings is None: raise ValueError('If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined.') if positional_embeddings == 'sinusoidal': self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings) else: self.pos_embed = None self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) self.attn1 = Attention(query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=upcast_attention, out_bias=attention_out_bias) if cross_attention_dim is not None or double_self_attention: self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) self.attn2 = Attention(query_dim=dim, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention, out_bias=attention_out_bias) self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout, inner_dim=ff_inner_dim, bias=ff_bias) self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) self._chunk_size = None self._chunk_dim = 0 def _get_frame_indices(self, num_frames: int) -> List[Tuple[int, int]]: frame_indices = [] for i in range(0, num_frames - self.context_length + 1, self.context_stride): window_start = i window_end = min(num_frames, i + self.context_length) frame_indices.append((window_start, window_end)) return frame_indices def _get_frame_weights(self, num_frames: int, weighting_scheme: str='pyramid') -> List[float]: if weighting_scheme == 'flat': weights = [1.0] * num_frames elif weighting_scheme == 'pyramid': if num_frames % 2 == 0: mid = num_frames // 2 weights = list(range(1, mid + 1)) weights = weights + weights[::-1] else: mid = (num_frames + 1) // 2 weights = list(range(1, mid)) weights = weights + [mid] + weights[::-1] elif weighting_scheme == 'delayed_reverse_sawtooth': if num_frames % 2 == 0: mid = num_frames // 2 weights = [0.01] * (mid - 1) + [mid] weights = weights + list(range(mid, 0, -1)) else: mid = (num_frames + 1) // 2 weights = [0.01] * mid weights = weights + list(range(mid, 0, -1)) else: raise ValueError(f'Unsupported value for weighting_scheme={weighting_scheme}') return weights def set_free_noise_properties(self, context_length: int, context_stride: int, weighting_scheme: str='pyramid') -> None: self.context_length = context_length self.context_stride = context_stride self.weighting_scheme = weighting_scheme def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int=0) -> None: self._chunk_size = chunk_size self._chunk_dim = dim def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Dict[str, Any]=None, *args, **kwargs) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} device = hidden_states.device dtype = hidden_states.dtype num_frames = hidden_states.size(1) frame_indices = self._get_frame_indices(num_frames) frame_weights = self._get_frame_weights(self.context_length, self.weighting_scheme) frame_weights = torch.tensor(frame_weights, device=device, dtype=dtype).unsqueeze(0).unsqueeze(-1) is_last_frame_batch_complete = frame_indices[-1][1] == num_frames if not is_last_frame_batch_complete: if num_frames < self.context_length: raise ValueError(f'Expected num_frames={num_frames!r} to be greater or equal than self.context_length={self.context_length!r}') last_frame_batch_length = num_frames - frame_indices[-1][1] frame_indices.append((num_frames - self.context_length, num_frames)) num_times_accumulated = torch.zeros((1, num_frames, 1), device=device) accumulated_values = torch.zeros_like(hidden_states) for (i, (frame_start, frame_end)) in enumerate(frame_indices): weights = torch.ones_like(num_times_accumulated[:, frame_start:frame_end]) weights *= frame_weights hidden_states_chunk = hidden_states[:, frame_start:frame_end] norm_hidden_states = self.norm1(hidden_states_chunk) if self.pos_embed is not None: norm_hidden_states = self.pos_embed(norm_hidden_states) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs) hidden_states_chunk = attn_output + hidden_states_chunk if hidden_states_chunk.ndim == 4: hidden_states_chunk = hidden_states_chunk.squeeze(1) if self.attn2 is not None: norm_hidden_states = self.norm2(hidden_states_chunk) if self.pos_embed is not None and self.norm_type != 'ada_norm_single': norm_hidden_states = self.pos_embed(norm_hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs) hidden_states_chunk = attn_output + hidden_states_chunk if i == len(frame_indices) - 1 and (not is_last_frame_batch_complete): accumulated_values[:, -last_frame_batch_length:] += hidden_states_chunk[:, -last_frame_batch_length:] * weights[:, -last_frame_batch_length:] num_times_accumulated[:, -last_frame_batch_length:] += weights[:, -last_frame_batch_length] else: accumulated_values[:, frame_start:frame_end] += hidden_states_chunk * weights num_times_accumulated[:, frame_start:frame_end] += weights hidden_states = torch.cat([torch.where(num_times_split > 0, accumulated_split / num_times_split, accumulated_split) for (accumulated_split, num_times_split) in zip(accumulated_values.split(self.context_length, dim=1), num_times_accumulated.split(self.context_length, dim=1))], dim=1).to(dtype) norm_hidden_states = self.norm3(hidden_states) if self._chunk_size is not None: ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) hidden_states = ff_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) return hidden_states class FeedForward(nn.Module): def __init__(self, dim: int, dim_out: Optional[int]=None, mult: int=4, dropout: float=0.0, activation_fn: str='geglu', final_dropout: bool=False, inner_dim=None, bias: bool=True): super().__init__() if inner_dim is None: inner_dim = int(dim * mult) dim_out = dim_out if dim_out is not None else dim if activation_fn == 'gelu': act_fn = GELU(dim, inner_dim, bias=bias) if activation_fn == 'gelu-approximate': act_fn = GELU(dim, inner_dim, approximate='tanh', bias=bias) elif activation_fn == 'geglu': act_fn = GEGLU(dim, inner_dim, bias=bias) elif activation_fn == 'geglu-approximate': act_fn = ApproximateGELU(dim, inner_dim, bias=bias) elif activation_fn == 'swiglu': act_fn = SwiGLU(dim, inner_dim, bias=bias) self.net = nn.ModuleList([]) self.net.append(act_fn) self.net.append(nn.Dropout(dropout)) self.net.append(nn.Linear(inner_dim, dim_out, bias=bias)) if final_dropout: self.net.append(nn.Dropout(dropout)) def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) for module in self.net: hidden_states = module(hidden_states) return hidden_states # File: diffusers-main/src/diffusers/models/attention_flax.py import functools import math import flax.linen as nn import jax import jax.numpy as jnp def _query_chunk_attention(query, key, value, precision, key_chunk_size: int=4096): (num_kv, num_heads, k_features) = key.shape[-3:] v_features = value.shape[-1] key_chunk_size = min(key_chunk_size, num_kv) query = query / jnp.sqrt(k_features) @functools.partial(jax.checkpoint, prevent_cse=False) def summarize_chunk(query, key, value): attn_weights = jnp.einsum('...qhd,...khd->...qhk', query, key, precision=precision) max_score = jnp.max(attn_weights, axis=-1, keepdims=True) max_score = jax.lax.stop_gradient(max_score) exp_weights = jnp.exp(attn_weights - max_score) exp_values = jnp.einsum('...vhf,...qhv->...qhf', value, exp_weights, precision=precision) max_score = jnp.einsum('...qhk->...qh', max_score) return (exp_values, exp_weights.sum(axis=-1), max_score) def chunk_scanner(chunk_idx): key_chunk = jax.lax.dynamic_slice(operand=key, start_indices=[0] * (key.ndim - 3) + [chunk_idx, 0, 0], slice_sizes=list(key.shape[:-3]) + [key_chunk_size, num_heads, k_features]) value_chunk = jax.lax.dynamic_slice(operand=value, start_indices=[0] * (value.ndim - 3) + [chunk_idx, 0, 0], slice_sizes=list(value.shape[:-3]) + [key_chunk_size, num_heads, v_features]) return summarize_chunk(query, key_chunk, value_chunk) (chunk_values, chunk_weights, chunk_max) = jax.lax.map(f=chunk_scanner, xs=jnp.arange(0, num_kv, key_chunk_size)) global_max = jnp.max(chunk_max, axis=0, keepdims=True) max_diffs = jnp.exp(chunk_max - global_max) chunk_values *= jnp.expand_dims(max_diffs, axis=-1) chunk_weights *= max_diffs all_values = chunk_values.sum(axis=0) all_weights = jnp.expand_dims(chunk_weights, -1).sum(axis=0) return all_values / all_weights def jax_memory_efficient_attention(query, key, value, precision=jax.lax.Precision.HIGHEST, query_chunk_size: int=1024, key_chunk_size: int=4096): (num_q, num_heads, q_features) = query.shape[-3:] def chunk_scanner(chunk_idx, _): query_chunk = jax.lax.dynamic_slice(operand=query, start_indices=[0] * (query.ndim - 3) + [chunk_idx, 0, 0], slice_sizes=list(query.shape[:-3]) + [min(query_chunk_size, num_q), num_heads, q_features]) return (chunk_idx + query_chunk_size, _query_chunk_attention(query=query_chunk, key=key, value=value, precision=precision, key_chunk_size=key_chunk_size)) (_, res) = jax.lax.scan(f=chunk_scanner, init=0, xs=None, length=math.ceil(num_q / query_chunk_size)) return jnp.concatenate(res, axis=-3) class FlaxAttention(nn.Module): query_dim: int heads: int = 8 dim_head: int = 64 dropout: float = 0.0 use_memory_efficient_attention: bool = False split_head_dim: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): inner_dim = self.dim_head * self.heads self.scale = self.dim_head ** (-0.5) self.query = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name='to_q') self.key = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name='to_k') self.value = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name='to_v') self.proj_attn = nn.Dense(self.query_dim, dtype=self.dtype, name='to_out_0') self.dropout_layer = nn.Dropout(rate=self.dropout) def reshape_heads_to_batch_dim(self, tensor): (batch_size, seq_len, dim) = tensor.shape head_size = self.heads tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size) tensor = jnp.transpose(tensor, (0, 2, 1, 3)) tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size) return tensor def reshape_batch_dim_to_heads(self, tensor): (batch_size, seq_len, dim) = tensor.shape head_size = self.heads tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) tensor = jnp.transpose(tensor, (0, 2, 1, 3)) tensor = tensor.reshape(batch_size // head_size, seq_len, dim * head_size) return tensor def __call__(self, hidden_states, context=None, deterministic=True): context = hidden_states if context is None else context query_proj = self.query(hidden_states) key_proj = self.key(context) value_proj = self.value(context) if self.split_head_dim: b = hidden_states.shape[0] query_states = jnp.reshape(query_proj, (b, -1, self.heads, self.dim_head)) key_states = jnp.reshape(key_proj, (b, -1, self.heads, self.dim_head)) value_states = jnp.reshape(value_proj, (b, -1, self.heads, self.dim_head)) else: query_states = self.reshape_heads_to_batch_dim(query_proj) key_states = self.reshape_heads_to_batch_dim(key_proj) value_states = self.reshape_heads_to_batch_dim(value_proj) if self.use_memory_efficient_attention: query_states = query_states.transpose(1, 0, 2) key_states = key_states.transpose(1, 0, 2) value_states = value_states.transpose(1, 0, 2) flatten_latent_dim = query_states.shape[-3] if flatten_latent_dim % 64 == 0: query_chunk_size = int(flatten_latent_dim / 64) elif flatten_latent_dim % 16 == 0: query_chunk_size = int(flatten_latent_dim / 16) elif flatten_latent_dim % 4 == 0: query_chunk_size = int(flatten_latent_dim / 4) else: query_chunk_size = int(flatten_latent_dim) hidden_states = jax_memory_efficient_attention(query_states, key_states, value_states, query_chunk_size=query_chunk_size, key_chunk_size=4096 * 4) hidden_states = hidden_states.transpose(1, 0, 2) else: if self.split_head_dim: attention_scores = jnp.einsum('b t n h, b f n h -> b n f t', key_states, query_states) else: attention_scores = jnp.einsum('b i d, b j d->b i j', query_states, key_states) attention_scores = attention_scores * self.scale attention_probs = nn.softmax(attention_scores, axis=-1 if self.split_head_dim else 2) if self.split_head_dim: hidden_states = jnp.einsum('b n f t, b t n h -> b f n h', attention_probs, value_states) b = hidden_states.shape[0] hidden_states = jnp.reshape(hidden_states, (b, -1, self.heads * self.dim_head)) else: hidden_states = jnp.einsum('b i j, b j d -> b i d', attention_probs, value_states) hidden_states = self.reshape_batch_dim_to_heads(hidden_states) hidden_states = self.proj_attn(hidden_states) return self.dropout_layer(hidden_states, deterministic=deterministic) class FlaxBasicTransformerBlock(nn.Module): dim: int n_heads: int d_head: int dropout: float = 0.0 only_cross_attention: bool = False dtype: jnp.dtype = jnp.float32 use_memory_efficient_attention: bool = False split_head_dim: bool = False def setup(self): self.attn1 = FlaxAttention(self.dim, self.n_heads, self.d_head, self.dropout, self.use_memory_efficient_attention, self.split_head_dim, dtype=self.dtype) self.attn2 = FlaxAttention(self.dim, self.n_heads, self.d_head, self.dropout, self.use_memory_efficient_attention, self.split_head_dim, dtype=self.dtype) self.ff = FlaxFeedForward(dim=self.dim, dropout=self.dropout, dtype=self.dtype) self.norm1 = nn.LayerNorm(epsilon=1e-05, dtype=self.dtype) self.norm2 = nn.LayerNorm(epsilon=1e-05, dtype=self.dtype) self.norm3 = nn.LayerNorm(epsilon=1e-05, dtype=self.dtype) self.dropout_layer = nn.Dropout(rate=self.dropout) def __call__(self, hidden_states, context, deterministic=True): residual = hidden_states if self.only_cross_attention: hidden_states = self.attn1(self.norm1(hidden_states), context, deterministic=deterministic) else: hidden_states = self.attn1(self.norm1(hidden_states), deterministic=deterministic) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.attn2(self.norm2(hidden_states), context, deterministic=deterministic) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.ff(self.norm3(hidden_states), deterministic=deterministic) hidden_states = hidden_states + residual return self.dropout_layer(hidden_states, deterministic=deterministic) class FlaxTransformer2DModel(nn.Module): in_channels: int n_heads: int d_head: int depth: int = 1 dropout: float = 0.0 use_linear_projection: bool = False only_cross_attention: bool = False dtype: jnp.dtype = jnp.float32 use_memory_efficient_attention: bool = False split_head_dim: bool = False def setup(self): self.norm = nn.GroupNorm(num_groups=32, epsilon=1e-05) inner_dim = self.n_heads * self.d_head if self.use_linear_projection: self.proj_in = nn.Dense(inner_dim, dtype=self.dtype) else: self.proj_in = nn.Conv(inner_dim, kernel_size=(1, 1), strides=(1, 1), padding='VALID', dtype=self.dtype) self.transformer_blocks = [FlaxBasicTransformerBlock(inner_dim, self.n_heads, self.d_head, dropout=self.dropout, only_cross_attention=self.only_cross_attention, dtype=self.dtype, use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim) for _ in range(self.depth)] if self.use_linear_projection: self.proj_out = nn.Dense(inner_dim, dtype=self.dtype) else: self.proj_out = nn.Conv(inner_dim, kernel_size=(1, 1), strides=(1, 1), padding='VALID', dtype=self.dtype) self.dropout_layer = nn.Dropout(rate=self.dropout) def __call__(self, hidden_states, context, deterministic=True): (batch, height, width, channels) = hidden_states.shape residual = hidden_states hidden_states = self.norm(hidden_states) if self.use_linear_projection: hidden_states = hidden_states.reshape(batch, height * width, channels) hidden_states = self.proj_in(hidden_states) else: hidden_states = self.proj_in(hidden_states) hidden_states = hidden_states.reshape(batch, height * width, channels) for transformer_block in self.transformer_blocks: hidden_states = transformer_block(hidden_states, context, deterministic=deterministic) if self.use_linear_projection: hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape(batch, height, width, channels) else: hidden_states = hidden_states.reshape(batch, height, width, channels) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states + residual return self.dropout_layer(hidden_states, deterministic=deterministic) class FlaxFeedForward(nn.Module): dim: int dropout: float = 0.0 dtype: jnp.dtype = jnp.float32 def setup(self): self.net_0 = FlaxGEGLU(self.dim, self.dropout, self.dtype) self.net_2 = nn.Dense(self.dim, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): hidden_states = self.net_0(hidden_states, deterministic=deterministic) hidden_states = self.net_2(hidden_states) return hidden_states class FlaxGEGLU(nn.Module): dim: int dropout: float = 0.0 dtype: jnp.dtype = jnp.float32 def setup(self): inner_dim = self.dim * 4 self.proj = nn.Dense(inner_dim * 2, dtype=self.dtype) self.dropout_layer = nn.Dropout(rate=self.dropout) def __call__(self, hidden_states, deterministic=True): hidden_states = self.proj(hidden_states) (hidden_linear, hidden_gelu) = jnp.split(hidden_states, 2, axis=2) return self.dropout_layer(hidden_linear * nn.gelu(hidden_gelu), deterministic=deterministic) # File: diffusers-main/src/diffusers/models/attention_processor.py import inspect import math from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from ..image_processor import IPAdapterMaskProcessor from ..utils import deprecate, logging from ..utils.import_utils import is_torch_npu_available, is_xformers_available from ..utils.torch_utils import is_torch_version, maybe_allow_in_graph logger = logging.get_logger(__name__) if is_torch_npu_available(): import torch_npu if is_xformers_available(): import xformers import xformers.ops else: xformers = None @maybe_allow_in_graph class Attention(nn.Module): def __init__(self, query_dim: int, cross_attention_dim: Optional[int]=None, heads: int=8, kv_heads: Optional[int]=None, dim_head: int=64, dropout: float=0.0, bias: bool=False, upcast_attention: bool=False, upcast_softmax: bool=False, cross_attention_norm: Optional[str]=None, cross_attention_norm_num_groups: int=32, qk_norm: Optional[str]=None, added_kv_proj_dim: Optional[int]=None, added_proj_bias: Optional[bool]=True, norm_num_groups: Optional[int]=None, spatial_norm_dim: Optional[int]=None, out_bias: bool=True, scale_qk: bool=True, only_cross_attention: bool=False, eps: float=1e-05, rescale_output_factor: float=1.0, residual_connection: bool=False, _from_deprecated_attn_block: bool=False, processor: Optional['AttnProcessor']=None, out_dim: int=None, context_pre_only=None, pre_only=False): super().__init__() from .normalization import FP32LayerNorm, RMSNorm self.inner_dim = out_dim if out_dim is not None else dim_head * heads self.inner_kv_dim = self.inner_dim if kv_heads is None else dim_head * kv_heads self.query_dim = query_dim self.use_bias = bias self.is_cross_attention = cross_attention_dim is not None self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim self.upcast_attention = upcast_attention self.upcast_softmax = upcast_softmax self.rescale_output_factor = rescale_output_factor self.residual_connection = residual_connection self.dropout = dropout self.fused_projections = False self.out_dim = out_dim if out_dim is not None else query_dim self.context_pre_only = context_pre_only self.pre_only = pre_only self._from_deprecated_attn_block = _from_deprecated_attn_block self.scale_qk = scale_qk self.scale = dim_head ** (-0.5) if self.scale_qk else 1.0 self.heads = out_dim // dim_head if out_dim is not None else heads self.sliceable_head_dim = heads self.added_kv_proj_dim = added_kv_proj_dim self.only_cross_attention = only_cross_attention if self.added_kv_proj_dim is None and self.only_cross_attention: raise ValueError('`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`.') if norm_num_groups is not None: self.group_norm = nn.GroupNorm(num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True) else: self.group_norm = None if spatial_norm_dim is not None: self.spatial_norm = SpatialNorm(f_channels=query_dim, zq_channels=spatial_norm_dim) else: self.spatial_norm = None if qk_norm is None: self.norm_q = None self.norm_k = None elif qk_norm == 'layer_norm': self.norm_q = nn.LayerNorm(dim_head, eps=eps) self.norm_k = nn.LayerNorm(dim_head, eps=eps) elif qk_norm == 'fp32_layer_norm': self.norm_q = FP32LayerNorm(dim_head, elementwise_affine=False, bias=False, eps=eps) self.norm_k = FP32LayerNorm(dim_head, elementwise_affine=False, bias=False, eps=eps) elif qk_norm == 'layer_norm_across_heads': self.norm_q = nn.LayerNorm(dim_head * heads, eps=eps) self.norm_k = nn.LayerNorm(dim_head * kv_heads, eps=eps) elif qk_norm == 'rms_norm': self.norm_q = RMSNorm(dim_head, eps=eps) self.norm_k = RMSNorm(dim_head, eps=eps) else: raise ValueError(f"unknown qk_norm: {qk_norm}. Should be None or 'layer_norm'") if cross_attention_norm is None: self.norm_cross = None elif cross_attention_norm == 'layer_norm': self.norm_cross = nn.LayerNorm(self.cross_attention_dim) elif cross_attention_norm == 'group_norm': if self.added_kv_proj_dim is not None: norm_cross_num_channels = added_kv_proj_dim else: norm_cross_num_channels = self.cross_attention_dim self.norm_cross = nn.GroupNorm(num_channels=norm_cross_num_channels, num_groups=cross_attention_norm_num_groups, eps=1e-05, affine=True) else: raise ValueError(f"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'") self.to_q = nn.Linear(query_dim, self.inner_dim, bias=bias) if not self.only_cross_attention: self.to_k = nn.Linear(self.cross_attention_dim, self.inner_kv_dim, bias=bias) self.to_v = nn.Linear(self.cross_attention_dim, self.inner_kv_dim, bias=bias) else: self.to_k = None self.to_v = None self.added_proj_bias = added_proj_bias if self.added_kv_proj_dim is not None: self.add_k_proj = nn.Linear(added_kv_proj_dim, self.inner_kv_dim, bias=added_proj_bias) self.add_v_proj = nn.Linear(added_kv_proj_dim, self.inner_kv_dim, bias=added_proj_bias) if self.context_pre_only is not None: self.add_q_proj = nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) if not self.pre_only: self.to_out = nn.ModuleList([]) self.to_out.append(nn.Linear(self.inner_dim, self.out_dim, bias=out_bias)) self.to_out.append(nn.Dropout(dropout)) if self.context_pre_only is not None and (not self.context_pre_only): self.to_add_out = nn.Linear(self.inner_dim, self.out_dim, bias=out_bias) if qk_norm is not None and added_kv_proj_dim is not None: if qk_norm == 'fp32_layer_norm': self.norm_added_q = FP32LayerNorm(dim_head, elementwise_affine=False, bias=False, eps=eps) self.norm_added_k = FP32LayerNorm(dim_head, elementwise_affine=False, bias=False, eps=eps) elif qk_norm == 'rms_norm': self.norm_added_q = RMSNorm(dim_head, eps=eps) self.norm_added_k = RMSNorm(dim_head, eps=eps) else: self.norm_added_q = None self.norm_added_k = None if processor is None: processor = AttnProcessor2_0() if hasattr(F, 'scaled_dot_product_attention') and self.scale_qk else AttnProcessor() self.set_processor(processor) def set_use_npu_flash_attention(self, use_npu_flash_attention: bool) -> None: if use_npu_flash_attention: processor = AttnProcessorNPU() else: processor = AttnProcessor2_0() if hasattr(F, 'scaled_dot_product_attention') and self.scale_qk else AttnProcessor() self.set_processor(processor) def set_use_memory_efficient_attention_xformers(self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable]=None) -> None: is_custom_diffusion = hasattr(self, 'processor') and isinstance(self.processor, (CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, CustomDiffusionAttnProcessor2_0)) is_added_kv_processor = hasattr(self, 'processor') and isinstance(self.processor, (AttnAddedKVProcessor, AttnAddedKVProcessor2_0, SlicedAttnAddedKVProcessor, XFormersAttnAddedKVProcessor)) if use_memory_efficient_attention_xformers: if is_added_kv_processor and is_custom_diffusion: raise NotImplementedError(f'Memory efficient attention is currently not supported for custom diffusion for attention processor type {self.processor}') if not is_xformers_available(): raise ModuleNotFoundError('Refer to https://github.com/facebookresearch/xformers for more information on how to install xformers', name='xformers') elif not torch.cuda.is_available(): raise ValueError("torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is only available for GPU ") else: try: _ = xformers.ops.memory_efficient_attention(torch.randn((1, 2, 40), device='cuda'), torch.randn((1, 2, 40), device='cuda'), torch.randn((1, 2, 40), device='cuda')) except Exception as e: raise e if is_custom_diffusion: processor = CustomDiffusionXFormersAttnProcessor(train_kv=self.processor.train_kv, train_q_out=self.processor.train_q_out, hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim, attention_op=attention_op) processor.load_state_dict(self.processor.state_dict()) if hasattr(self.processor, 'to_k_custom_diffusion'): processor.to(self.processor.to_k_custom_diffusion.weight.device) elif is_added_kv_processor: logger.info('Memory efficient attention with `xformers` might currently not work correctly if an attention mask is required for the attention operation.') processor = XFormersAttnAddedKVProcessor(attention_op=attention_op) else: processor = XFormersAttnProcessor(attention_op=attention_op) elif is_custom_diffusion: attn_processor_class = CustomDiffusionAttnProcessor2_0 if hasattr(F, 'scaled_dot_product_attention') else CustomDiffusionAttnProcessor processor = attn_processor_class(train_kv=self.processor.train_kv, train_q_out=self.processor.train_q_out, hidden_size=self.processor.hidden_size, cross_attention_dim=self.processor.cross_attention_dim) processor.load_state_dict(self.processor.state_dict()) if hasattr(self.processor, 'to_k_custom_diffusion'): processor.to(self.processor.to_k_custom_diffusion.weight.device) else: processor = AttnProcessor2_0() if hasattr(F, 'scaled_dot_product_attention') and self.scale_qk else AttnProcessor() self.set_processor(processor) def set_attention_slice(self, slice_size: int) -> None: if slice_size is not None and slice_size > self.sliceable_head_dim: raise ValueError(f'slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.') if slice_size is not None and self.added_kv_proj_dim is not None: processor = SlicedAttnAddedKVProcessor(slice_size) elif slice_size is not None: processor = SlicedAttnProcessor(slice_size) elif self.added_kv_proj_dim is not None: processor = AttnAddedKVProcessor() else: processor = AttnProcessor2_0() if hasattr(F, 'scaled_dot_product_attention') and self.scale_qk else AttnProcessor() self.set_processor(processor) def set_processor(self, processor: 'AttnProcessor') -> None: if hasattr(self, 'processor') and isinstance(self.processor, torch.nn.Module) and (not isinstance(processor, torch.nn.Module)): logger.info(f'You are removing possibly trained weights of {self.processor} with {processor}') self._modules.pop('processor') self.processor = processor def get_processor(self, return_deprecated_lora: bool=False) -> 'AttentionProcessor': if not return_deprecated_lora: return self.processor def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, **cross_attention_kwargs) -> torch.Tensor: attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) quiet_attn_parameters = {'ip_adapter_masks'} unused_kwargs = [k for (k, _) in cross_attention_kwargs.items() if k not in attn_parameters and k not in quiet_attn_parameters] if len(unused_kwargs) > 0: logger.warning(f'cross_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored.') cross_attention_kwargs = {k: w for (k, w) in cross_attention_kwargs.items() if k in attn_parameters} return self.processor(self, hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **cross_attention_kwargs) def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor: head_size = self.heads (batch_size, seq_len, dim) = tensor.shape tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) return tensor def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int=3) -> torch.Tensor: head_size = self.heads if tensor.ndim == 3: (batch_size, seq_len, dim) = tensor.shape extra_dim = 1 else: (batch_size, extra_dim, seq_len, dim) = tensor.shape tensor = tensor.reshape(batch_size, seq_len * extra_dim, head_size, dim // head_size) tensor = tensor.permute(0, 2, 1, 3) if out_dim == 3: tensor = tensor.reshape(batch_size * head_size, seq_len * extra_dim, dim // head_size) return tensor def get_attention_scores(self, query: torch.Tensor, key: torch.Tensor, attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: dtype = query.dtype if self.upcast_attention: query = query.float() key = key.float() if attention_mask is None: baddbmm_input = torch.empty(query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device) beta = 0 else: baddbmm_input = attention_mask beta = 1 attention_scores = torch.baddbmm(baddbmm_input, query, key.transpose(-1, -2), beta=beta, alpha=self.scale) del baddbmm_input if self.upcast_softmax: attention_scores = attention_scores.float() attention_probs = attention_scores.softmax(dim=-1) del attention_scores attention_probs = attention_probs.to(dtype) return attention_probs def prepare_attention_mask(self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int=3) -> torch.Tensor: head_size = self.heads if attention_mask is None: return attention_mask current_length: int = attention_mask.shape[-1] if current_length != target_length: if attention_mask.device.type == 'mps': padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length) padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device) attention_mask = torch.cat([attention_mask, padding], dim=2) else: attention_mask = F.pad(attention_mask, (0, target_length), value=0.0) if out_dim == 3: if attention_mask.shape[0] < batch_size * head_size: attention_mask = attention_mask.repeat_interleave(head_size, dim=0) elif out_dim == 4: attention_mask = attention_mask.unsqueeze(1) attention_mask = attention_mask.repeat_interleave(head_size, dim=1) return attention_mask def norm_encoder_hidden_states(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: assert self.norm_cross is not None, 'self.norm_cross must be defined to call self.norm_encoder_hidden_states' if isinstance(self.norm_cross, nn.LayerNorm): encoder_hidden_states = self.norm_cross(encoder_hidden_states) elif isinstance(self.norm_cross, nn.GroupNorm): encoder_hidden_states = encoder_hidden_states.transpose(1, 2) encoder_hidden_states = self.norm_cross(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.transpose(1, 2) else: assert False return encoder_hidden_states @torch.no_grad() def fuse_projections(self, fuse=True): device = self.to_q.weight.data.device dtype = self.to_q.weight.data.dtype if not self.is_cross_attention: concatenated_weights = torch.cat([self.to_q.weight.data, self.to_k.weight.data, self.to_v.weight.data]) in_features = concatenated_weights.shape[1] out_features = concatenated_weights.shape[0] self.to_qkv = nn.Linear(in_features, out_features, bias=self.use_bias, device=device, dtype=dtype) self.to_qkv.weight.copy_(concatenated_weights) if self.use_bias: concatenated_bias = torch.cat([self.to_q.bias.data, self.to_k.bias.data, self.to_v.bias.data]) self.to_qkv.bias.copy_(concatenated_bias) else: concatenated_weights = torch.cat([self.to_k.weight.data, self.to_v.weight.data]) in_features = concatenated_weights.shape[1] out_features = concatenated_weights.shape[0] self.to_kv = nn.Linear(in_features, out_features, bias=self.use_bias, device=device, dtype=dtype) self.to_kv.weight.copy_(concatenated_weights) if self.use_bias: concatenated_bias = torch.cat([self.to_k.bias.data, self.to_v.bias.data]) self.to_kv.bias.copy_(concatenated_bias) if hasattr(self, 'add_q_proj') and hasattr(self, 'add_k_proj') and hasattr(self, 'add_v_proj'): concatenated_weights = torch.cat([self.add_q_proj.weight.data, self.add_k_proj.weight.data, self.add_v_proj.weight.data]) in_features = concatenated_weights.shape[1] out_features = concatenated_weights.shape[0] self.to_added_qkv = nn.Linear(in_features, out_features, bias=self.added_proj_bias, device=device, dtype=dtype) self.to_added_qkv.weight.copy_(concatenated_weights) if self.added_proj_bias: concatenated_bias = torch.cat([self.add_q_proj.bias.data, self.add_k_proj.bias.data, self.add_v_proj.bias.data]) self.to_added_qkv.bias.copy_(concatenated_bias) self.fused_projections = fuse class AttnProcessor: def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class CustomDiffusionAttnProcessor(nn.Module): def __init__(self, train_kv: bool=True, train_q_out: bool=True, hidden_size: Optional[int]=None, cross_attention_dim: Optional[int]=None, out_bias: bool=True, dropout: float=0.0): super().__init__() self.train_kv = train_kv self.train_q_out = train_q_out self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim if self.train_kv: self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) if self.train_q_out: self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) self.to_out_custom_diffusion = nn.ModuleList([]) self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) self.to_out_custom_diffusion.append(nn.Dropout(dropout)) def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: (batch_size, sequence_length, _) = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if self.train_q_out: query = self.to_q_custom_diffusion(hidden_states).to(attn.to_q.weight.dtype) else: query = attn.to_q(hidden_states.to(attn.to_q.weight.dtype)) if encoder_hidden_states is None: crossattn = False encoder_hidden_states = hidden_states else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if self.train_kv: key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) key = key.to(attn.to_q.weight.dtype) value = value.to(attn.to_q.weight.dtype) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if crossattn: detach = torch.ones_like(key) detach[:, :1, :] = detach[:, :1, :] * 0.0 key = detach * key + (1 - detach) * key.detach() value = detach * value + (1 - detach) * value.detach() query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) if self.train_q_out: hidden_states = self.to_out_custom_diffusion[0](hidden_states) hidden_states = self.to_out_custom_diffusion[1](hidden_states) else: hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states class AttnAddedKVProcessor: def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) residual = hidden_states hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) query = attn.head_to_batch_dim(query) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class AttnAddedKVProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('AttnAddedKVProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) residual = hidden_states hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size, out_dim=4) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) query = attn.head_to_batch_dim(query, out_dim=4) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj, out_dim=4) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj, out_dim=4) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key, out_dim=4) value = attn.head_to_batch_dim(value, out_dim=4) key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) value = torch.cat([encoder_hidden_states_value_proj, value], dim=2) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, residual.shape[1]) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class JointAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor=None, attention_mask: Optional[torch.FloatTensor]=None, *args, **kwargs) -> torch.FloatTensor: residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) context_input_ndim = encoder_hidden_states.ndim if context_input_ndim == 4: (batch_size, channel, height, width) = encoder_hidden_states.shape encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size = encoder_hidden_states.shape[0] query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) query = torch.cat([query, encoder_hidden_states_query_proj], dim=1) key = torch.cat([key, encoder_hidden_states_key_proj], dim=1) value = torch.cat([value, encoder_hidden_states_value_proj], dim=1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) (hidden_states, encoder_hidden_states) = (hidden_states[:, :residual.shape[1]], hidden_states[:, residual.shape[1]:]) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if not attn.context_pre_only: encoder_hidden_states = attn.to_add_out(encoder_hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if context_input_ndim == 4: encoder_hidden_states = encoder_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) return (hidden_states, encoder_hidden_states) class PAGJointAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('PAGJointAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor=None) -> torch.FloatTensor: residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) context_input_ndim = encoder_hidden_states.ndim if context_input_ndim == 4: (batch_size, channel, height, width) = encoder_hidden_states.shape encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2) identity_block_size = hidden_states.shape[1] (hidden_states_org, hidden_states_ptb) = hidden_states.chunk(2) (encoder_hidden_states_org, encoder_hidden_states_ptb) = encoder_hidden_states.chunk(2) batch_size = encoder_hidden_states_org.shape[0] query_org = attn.to_q(hidden_states_org) key_org = attn.to_k(hidden_states_org) value_org = attn.to_v(hidden_states_org) encoder_hidden_states_org_query_proj = attn.add_q_proj(encoder_hidden_states_org) encoder_hidden_states_org_key_proj = attn.add_k_proj(encoder_hidden_states_org) encoder_hidden_states_org_value_proj = attn.add_v_proj(encoder_hidden_states_org) query_org = torch.cat([query_org, encoder_hidden_states_org_query_proj], dim=1) key_org = torch.cat([key_org, encoder_hidden_states_org_key_proj], dim=1) value_org = torch.cat([value_org, encoder_hidden_states_org_value_proj], dim=1) inner_dim = key_org.shape[-1] head_dim = inner_dim // attn.heads query_org = query_org.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key_org = key_org.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value_org = value_org.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states_org = F.scaled_dot_product_attention(query_org, key_org, value_org, dropout_p=0.0, is_causal=False) hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_org = hidden_states_org.to(query_org.dtype) (hidden_states_org, encoder_hidden_states_org) = (hidden_states_org[:, :residual.shape[1]], hidden_states_org[:, residual.shape[1]:]) hidden_states_org = attn.to_out[0](hidden_states_org) hidden_states_org = attn.to_out[1](hidden_states_org) if not attn.context_pre_only: encoder_hidden_states_org = attn.to_add_out(encoder_hidden_states_org) if input_ndim == 4: hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) if context_input_ndim == 4: encoder_hidden_states_org = encoder_hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) batch_size = encoder_hidden_states_ptb.shape[0] query_ptb = attn.to_q(hidden_states_ptb) key_ptb = attn.to_k(hidden_states_ptb) value_ptb = attn.to_v(hidden_states_ptb) encoder_hidden_states_ptb_query_proj = attn.add_q_proj(encoder_hidden_states_ptb) encoder_hidden_states_ptb_key_proj = attn.add_k_proj(encoder_hidden_states_ptb) encoder_hidden_states_ptb_value_proj = attn.add_v_proj(encoder_hidden_states_ptb) query_ptb = torch.cat([query_ptb, encoder_hidden_states_ptb_query_proj], dim=1) key_ptb = torch.cat([key_ptb, encoder_hidden_states_ptb_key_proj], dim=1) value_ptb = torch.cat([value_ptb, encoder_hidden_states_ptb_value_proj], dim=1) inner_dim = key_ptb.shape[-1] head_dim = inner_dim // attn.heads query_ptb = query_ptb.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key_ptb = key_ptb.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value_ptb = value_ptb.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) seq_len = query_ptb.size(2) full_mask = torch.zeros((seq_len, seq_len), device=query_ptb.device, dtype=query_ptb.dtype) full_mask[:identity_block_size, :identity_block_size] = float('-inf') full_mask[:identity_block_size, :identity_block_size].fill_diagonal_(0) full_mask = full_mask.unsqueeze(0).unsqueeze(0) hidden_states_ptb = F.scaled_dot_product_attention(query_ptb, key_ptb, value_ptb, attn_mask=full_mask, dropout_p=0.0, is_causal=False) hidden_states_ptb = hidden_states_ptb.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_ptb = hidden_states_ptb.to(query_ptb.dtype) (hidden_states_ptb, encoder_hidden_states_ptb) = (hidden_states_ptb[:, :residual.shape[1]], hidden_states_ptb[:, residual.shape[1]:]) hidden_states_ptb = attn.to_out[0](hidden_states_ptb) hidden_states_ptb = attn.to_out[1](hidden_states_ptb) if not attn.context_pre_only: encoder_hidden_states_ptb = attn.to_add_out(encoder_hidden_states_ptb) if input_ndim == 4: hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) if context_input_ndim == 4: encoder_hidden_states_ptb = encoder_hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) hidden_states = torch.cat([hidden_states_org, hidden_states_ptb]) encoder_hidden_states = torch.cat([encoder_hidden_states_org, encoder_hidden_states_ptb]) return (hidden_states, encoder_hidden_states) class PAGCFGJointAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('PAGCFGJointAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor=None, attention_mask: Optional[torch.FloatTensor]=None, *args, **kwargs) -> torch.FloatTensor: residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) context_input_ndim = encoder_hidden_states.ndim if context_input_ndim == 4: (batch_size, channel, height, width) = encoder_hidden_states.shape encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2) identity_block_size = hidden_states.shape[1] (hidden_states_uncond, hidden_states_org, hidden_states_ptb) = hidden_states.chunk(3) hidden_states_org = torch.cat([hidden_states_uncond, hidden_states_org]) (encoder_hidden_states_uncond, encoder_hidden_states_org, encoder_hidden_states_ptb) = encoder_hidden_states.chunk(3) encoder_hidden_states_org = torch.cat([encoder_hidden_states_uncond, encoder_hidden_states_org]) batch_size = encoder_hidden_states_org.shape[0] query_org = attn.to_q(hidden_states_org) key_org = attn.to_k(hidden_states_org) value_org = attn.to_v(hidden_states_org) encoder_hidden_states_org_query_proj = attn.add_q_proj(encoder_hidden_states_org) encoder_hidden_states_org_key_proj = attn.add_k_proj(encoder_hidden_states_org) encoder_hidden_states_org_value_proj = attn.add_v_proj(encoder_hidden_states_org) query_org = torch.cat([query_org, encoder_hidden_states_org_query_proj], dim=1) key_org = torch.cat([key_org, encoder_hidden_states_org_key_proj], dim=1) value_org = torch.cat([value_org, encoder_hidden_states_org_value_proj], dim=1) inner_dim = key_org.shape[-1] head_dim = inner_dim // attn.heads query_org = query_org.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key_org = key_org.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value_org = value_org.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states_org = F.scaled_dot_product_attention(query_org, key_org, value_org, dropout_p=0.0, is_causal=False) hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_org = hidden_states_org.to(query_org.dtype) (hidden_states_org, encoder_hidden_states_org) = (hidden_states_org[:, :residual.shape[1]], hidden_states_org[:, residual.shape[1]:]) hidden_states_org = attn.to_out[0](hidden_states_org) hidden_states_org = attn.to_out[1](hidden_states_org) if not attn.context_pre_only: encoder_hidden_states_org = attn.to_add_out(encoder_hidden_states_org) if input_ndim == 4: hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) if context_input_ndim == 4: encoder_hidden_states_org = encoder_hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) batch_size = encoder_hidden_states_ptb.shape[0] query_ptb = attn.to_q(hidden_states_ptb) key_ptb = attn.to_k(hidden_states_ptb) value_ptb = attn.to_v(hidden_states_ptb) encoder_hidden_states_ptb_query_proj = attn.add_q_proj(encoder_hidden_states_ptb) encoder_hidden_states_ptb_key_proj = attn.add_k_proj(encoder_hidden_states_ptb) encoder_hidden_states_ptb_value_proj = attn.add_v_proj(encoder_hidden_states_ptb) query_ptb = torch.cat([query_ptb, encoder_hidden_states_ptb_query_proj], dim=1) key_ptb = torch.cat([key_ptb, encoder_hidden_states_ptb_key_proj], dim=1) value_ptb = torch.cat([value_ptb, encoder_hidden_states_ptb_value_proj], dim=1) inner_dim = key_ptb.shape[-1] head_dim = inner_dim // attn.heads query_ptb = query_ptb.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key_ptb = key_ptb.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value_ptb = value_ptb.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) seq_len = query_ptb.size(2) full_mask = torch.zeros((seq_len, seq_len), device=query_ptb.device, dtype=query_ptb.dtype) full_mask[:identity_block_size, :identity_block_size] = float('-inf') full_mask[:identity_block_size, :identity_block_size].fill_diagonal_(0) full_mask = full_mask.unsqueeze(0).unsqueeze(0) hidden_states_ptb = F.scaled_dot_product_attention(query_ptb, key_ptb, value_ptb, attn_mask=full_mask, dropout_p=0.0, is_causal=False) hidden_states_ptb = hidden_states_ptb.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_ptb = hidden_states_ptb.to(query_ptb.dtype) (hidden_states_ptb, encoder_hidden_states_ptb) = (hidden_states_ptb[:, :residual.shape[1]], hidden_states_ptb[:, residual.shape[1]:]) hidden_states_ptb = attn.to_out[0](hidden_states_ptb) hidden_states_ptb = attn.to_out[1](hidden_states_ptb) if not attn.context_pre_only: encoder_hidden_states_ptb = attn.to_add_out(encoder_hidden_states_ptb) if input_ndim == 4: hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) if context_input_ndim == 4: encoder_hidden_states_ptb = encoder_hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) hidden_states = torch.cat([hidden_states_org, hidden_states_ptb]) encoder_hidden_states = torch.cat([encoder_hidden_states_org, encoder_hidden_states_ptb]) return (hidden_states, encoder_hidden_states) class FusedJointAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor=None, attention_mask: Optional[torch.FloatTensor]=None, *args, **kwargs) -> torch.FloatTensor: residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) context_input_ndim = encoder_hidden_states.ndim if context_input_ndim == 4: (batch_size, channel, height, width) = encoder_hidden_states.shape encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size = encoder_hidden_states.shape[0] qkv = attn.to_qkv(hidden_states) split_size = qkv.shape[-1] // 3 (query, key, value) = torch.split(qkv, split_size, dim=-1) encoder_qkv = attn.to_added_qkv(encoder_hidden_states) split_size = encoder_qkv.shape[-1] // 3 (encoder_hidden_states_query_proj, encoder_hidden_states_key_proj, encoder_hidden_states_value_proj) = torch.split(encoder_qkv, split_size, dim=-1) query = torch.cat([query, encoder_hidden_states_query_proj], dim=1) key = torch.cat([key, encoder_hidden_states_key_proj], dim=1) value = torch.cat([value, encoder_hidden_states_value_proj], dim=1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) (hidden_states, encoder_hidden_states) = (hidden_states[:, :residual.shape[1]], hidden_states[:, residual.shape[1]:]) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if not attn.context_pre_only: encoder_hidden_states = attn.to_add_out(encoder_hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if context_input_ndim == 4: encoder_hidden_states = encoder_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) return (hidden_states, encoder_hidden_states) class AuraFlowAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention') and is_torch_version('<', '2.1'): raise ImportError('AuraFlowAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to at least 2.1 or above as we use `scale` in `F.scaled_dot_product_attention()`. ') def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor=None, *args, **kwargs) -> torch.FloatTensor: batch_size = hidden_states.shape[0] query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) if encoder_hidden_states is not None: encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim) key = key.view(batch_size, -1, attn.heads, head_dim) value = value.view(batch_size, -1, attn.heads, head_dim) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) if encoder_hidden_states is not None: encoder_hidden_states_query_proj = encoder_hidden_states_query_proj.view(batch_size, -1, attn.heads, head_dim) encoder_hidden_states_key_proj = encoder_hidden_states_key_proj.view(batch_size, -1, attn.heads, head_dim) encoder_hidden_states_value_proj = encoder_hidden_states_value_proj.view(batch_size, -1, attn.heads, head_dim) if attn.norm_added_q is not None: encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) if attn.norm_added_k is not None: encoder_hidden_states_key_proj = attn.norm_added_q(encoder_hidden_states_key_proj) query = torch.cat([encoder_hidden_states_query_proj, query], dim=1) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, scale=attn.scale, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if encoder_hidden_states is not None: (hidden_states, encoder_hidden_states) = (hidden_states[:, encoder_hidden_states.shape[1]:], hidden_states[:, :encoder_hidden_states.shape[1]]) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if encoder_hidden_states is not None: encoder_hidden_states = attn.to_add_out(encoder_hidden_states) if encoder_hidden_states is not None: return (hidden_states, encoder_hidden_states) else: return hidden_states class FusedAuraFlowAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention') and is_torch_version('<', '2.1'): raise ImportError('FusedAuraFlowAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to at least 2.1 or above as we use `scale` in `F.scaled_dot_product_attention()`. ') def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor=None, *args, **kwargs) -> torch.FloatTensor: batch_size = hidden_states.shape[0] qkv = attn.to_qkv(hidden_states) split_size = qkv.shape[-1] // 3 (query, key, value) = torch.split(qkv, split_size, dim=-1) if encoder_hidden_states is not None: encoder_qkv = attn.to_added_qkv(encoder_hidden_states) split_size = encoder_qkv.shape[-1] // 3 (encoder_hidden_states_query_proj, encoder_hidden_states_key_proj, encoder_hidden_states_value_proj) = torch.split(encoder_qkv, split_size, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim) key = key.view(batch_size, -1, attn.heads, head_dim) value = value.view(batch_size, -1, attn.heads, head_dim) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) if encoder_hidden_states is not None: encoder_hidden_states_query_proj = encoder_hidden_states_query_proj.view(batch_size, -1, attn.heads, head_dim) encoder_hidden_states_key_proj = encoder_hidden_states_key_proj.view(batch_size, -1, attn.heads, head_dim) encoder_hidden_states_value_proj = encoder_hidden_states_value_proj.view(batch_size, -1, attn.heads, head_dim) if attn.norm_added_q is not None: encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) if attn.norm_added_k is not None: encoder_hidden_states_key_proj = attn.norm_added_q(encoder_hidden_states_key_proj) query = torch.cat([encoder_hidden_states_query_proj, query], dim=1) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, scale=attn.scale, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if encoder_hidden_states is not None: (hidden_states, encoder_hidden_states) = (hidden_states[:, encoder_hidden_states.shape[1]:], hidden_states[:, :encoder_hidden_states.shape[1]]) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if encoder_hidden_states is not None: encoder_hidden_states = attn.to_add_out(encoder_hidden_states) if encoder_hidden_states is not None: return (hidden_states, encoder_hidden_states) else: return hidden_states class FluxAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('FluxAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor=None, attention_mask: Optional[torch.FloatTensor]=None, image_rotary_emb: Optional[torch.Tensor]=None) -> torch.FloatTensor: (batch_size, _, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) if encoder_hidden_states is not None: encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_query_proj = encoder_hidden_states_query_proj.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) encoder_hidden_states_key_proj = encoder_hidden_states_key_proj.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) encoder_hidden_states_value_proj = encoder_hidden_states_value_proj.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_added_q is not None: encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) if attn.norm_added_k is not None: encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) query = torch.cat([encoder_hidden_states_query_proj, query], dim=2) key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) value = torch.cat([encoder_hidden_states_value_proj, value], dim=2) if image_rotary_emb is not None: from .embeddings import apply_rotary_emb query = apply_rotary_emb(query, image_rotary_emb) key = apply_rotary_emb(key, image_rotary_emb) hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if encoder_hidden_states is not None: (encoder_hidden_states, hidden_states) = (hidden_states[:, :encoder_hidden_states.shape[1]], hidden_states[:, encoder_hidden_states.shape[1]:]) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states = attn.to_add_out(encoder_hidden_states) return (hidden_states, encoder_hidden_states) else: return hidden_states class FusedFluxAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('FusedFluxAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor=None, attention_mask: Optional[torch.FloatTensor]=None, image_rotary_emb: Optional[torch.Tensor]=None) -> torch.FloatTensor: (batch_size, _, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape qkv = attn.to_qkv(hidden_states) split_size = qkv.shape[-1] // 3 (query, key, value) = torch.split(qkv, split_size, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) if encoder_hidden_states is not None: encoder_qkv = attn.to_added_qkv(encoder_hidden_states) split_size = encoder_qkv.shape[-1] // 3 (encoder_hidden_states_query_proj, encoder_hidden_states_key_proj, encoder_hidden_states_value_proj) = torch.split(encoder_qkv, split_size, dim=-1) encoder_hidden_states_query_proj = encoder_hidden_states_query_proj.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) encoder_hidden_states_key_proj = encoder_hidden_states_key_proj.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) encoder_hidden_states_value_proj = encoder_hidden_states_value_proj.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_added_q is not None: encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) if attn.norm_added_k is not None: encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) query = torch.cat([encoder_hidden_states_query_proj, query], dim=2) key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) value = torch.cat([encoder_hidden_states_value_proj, value], dim=2) if image_rotary_emb is not None: from .embeddings import apply_rotary_emb query = apply_rotary_emb(query, image_rotary_emb) key = apply_rotary_emb(key, image_rotary_emb) hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if encoder_hidden_states is not None: (encoder_hidden_states, hidden_states) = (hidden_states[:, :encoder_hidden_states.shape[1]], hidden_states[:, encoder_hidden_states.shape[1]:]) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states = attn.to_add_out(encoder_hidden_states) return (hidden_states, encoder_hidden_states) else: return hidden_states class CogVideoXAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, image_rotary_emb: Optional[torch.Tensor]=None) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1) hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) if image_rotary_emb is not None: from .embeddings import apply_rotary_emb query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) if not attn.is_cross_attention: key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) (encoder_hidden_states, hidden_states) = hidden_states.split([text_seq_length, hidden_states.size(1) - text_seq_length], dim=1) return (hidden_states, encoder_hidden_states) class FusedCogVideoXAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, image_rotary_emb: Optional[torch.Tensor]=None) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1) hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) qkv = attn.to_qkv(hidden_states) split_size = qkv.shape[-1] // 3 (query, key, value) = torch.split(qkv, split_size, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) if image_rotary_emb is not None: from .embeddings import apply_rotary_emb query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) if not attn.is_cross_attention: key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) (encoder_hidden_states, hidden_states) = hidden_states.split([text_seq_length, hidden_states.size(1) - text_seq_length], dim=1) return (hidden_states, encoder_hidden_states) class XFormersAttnAddedKVProcessor: def __init__(self, attention_op: Optional[Callable]=None): self.attention_op = attention_op def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: residual = hidden_states hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) query = attn.head_to_batch_dim(query) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.batch_to_head_dim(hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class XFormersAttnProcessor: def __init__(self, attention_op: Optional[Callable]=None): self.attention_op = attention_op def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (batch_size, key_tokens, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, key_tokens, batch_size) if attention_mask is not None: (_, query_tokens, _) = hidden_states.shape attention_mask = attention_mask.expand(-1, query_tokens, -1) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query).contiguous() key = attn.head_to_batch_dim(key).contiguous() value = attn.head_to_batch_dim(value).contiguous() hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.batch_to_head_dim(hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class AttnProcessorNPU: def __init__(self): if not is_torch_npu_available(): raise ImportError('AttnProcessorNPU requires torch_npu extensions and is supported only on npu devices.') def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if query.dtype in (torch.float16, torch.bfloat16): hidden_states = torch_npu.npu_fusion_attention(query, key, value, attn.heads, input_layout='BNSD', pse=None, atten_mask=attention_mask, scale=1.0 / math.sqrt(query.shape[-1]), pre_tockens=65536, next_tockens=65536, keep_prob=1.0, sync=False, inner_precise=0)[0] else: hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class AttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class StableAudioAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('StableAudioAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def apply_partial_rotary_emb(self, x: torch.Tensor, freqs_cis: Tuple[torch.Tensor]) -> torch.Tensor: from .embeddings import apply_rotary_emb rot_dim = freqs_cis[0].shape[-1] (x_to_rotate, x_unrotated) = (x[..., :rot_dim], x[..., rot_dim:]) x_rotated = apply_rotary_emb(x_to_rotate, freqs_cis, use_real=True, use_real_unbind_dim=-2) out = torch.cat((x_rotated, x_unrotated), dim=-1) return out def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, rotary_emb: Optional[torch.Tensor]=None) -> torch.Tensor: from .embeddings import apply_rotary_emb residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) head_dim = query.shape[-1] // attn.heads kv_heads = key.shape[-1] // head_dim query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, kv_heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, kv_heads, head_dim).transpose(1, 2) if kv_heads != attn.heads: heads_per_kv_head = attn.heads // kv_heads key = torch.repeat_interleave(key, heads_per_kv_head, dim=1) value = torch.repeat_interleave(value, heads_per_kv_head, dim=1) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) if rotary_emb is not None: query_dtype = query.dtype key_dtype = key.dtype query = query.to(torch.float32) key = key.to(torch.float32) rot_dim = rotary_emb[0].shape[-1] (query_to_rotate, query_unrotated) = (query[..., :rot_dim], query[..., rot_dim:]) query_rotated = apply_rotary_emb(query_to_rotate, rotary_emb, use_real=True, use_real_unbind_dim=-2) query = torch.cat((query_rotated, query_unrotated), dim=-1) if not attn.is_cross_attention: (key_to_rotate, key_unrotated) = (key[..., :rot_dim], key[..., rot_dim:]) key_rotated = apply_rotary_emb(key_to_rotate, rotary_emb, use_real=True, use_real_unbind_dim=-2) key = torch.cat((key_rotated, key_unrotated), dim=-1) query = query.to(query_dtype) key = key.to(key_dtype) hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class HunyuanAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None, image_rotary_emb: Optional[torch.Tensor]=None) -> torch.Tensor: from .embeddings import apply_rotary_emb residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb) if not attn.is_cross_attention: key = apply_rotary_emb(key, image_rotary_emb) hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class FusedHunyuanAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('FusedHunyuanAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None, image_rotary_emb: Optional[torch.Tensor]=None) -> torch.Tensor: from .embeddings import apply_rotary_emb residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) if encoder_hidden_states is None: qkv = attn.to_qkv(hidden_states) split_size = qkv.shape[-1] // 3 (query, key, value) = torch.split(qkv, split_size, dim=-1) else: if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) query = attn.to_q(hidden_states) kv = attn.to_kv(encoder_hidden_states) split_size = kv.shape[-1] // 2 (key, value) = torch.split(kv, split_size, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb) if not attn.is_cross_attention: key = apply_rotary_emb(key, image_rotary_emb) hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class PAGHunyuanAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('PAGHunyuanAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None, image_rotary_emb: Optional[torch.Tensor]=None) -> torch.Tensor: from .embeddings import apply_rotary_emb residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (hidden_states_org, hidden_states_ptb) = hidden_states.chunk(2) (batch_size, sequence_length, _) = hidden_states_org.shape if encoder_hidden_states is None else encoder_hidden_states.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states_org = attn.group_norm(hidden_states_org.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states_org) if encoder_hidden_states is None: encoder_hidden_states = hidden_states_org elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb) if not attn.is_cross_attention: key = apply_rotary_emb(key, image_rotary_emb) hidden_states_org = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_org = hidden_states_org.to(query.dtype) hidden_states_org = attn.to_out[0](hidden_states_org) hidden_states_org = attn.to_out[1](hidden_states_org) if input_ndim == 4: hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.group_norm is not None: hidden_states_ptb = attn.group_norm(hidden_states_ptb.transpose(1, 2)).transpose(1, 2) hidden_states_ptb = attn.to_v(hidden_states_ptb) hidden_states_ptb = hidden_states_ptb.to(query.dtype) hidden_states_ptb = attn.to_out[0](hidden_states_ptb) hidden_states_ptb = attn.to_out[1](hidden_states_ptb) if input_ndim == 4: hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) hidden_states = torch.cat([hidden_states_org, hidden_states_ptb]) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class PAGCFGHunyuanAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('PAGCFGHunyuanAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None, image_rotary_emb: Optional[torch.Tensor]=None) -> torch.Tensor: from .embeddings import apply_rotary_emb residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (hidden_states_uncond, hidden_states_org, hidden_states_ptb) = hidden_states.chunk(3) hidden_states_org = torch.cat([hidden_states_uncond, hidden_states_org]) (batch_size, sequence_length, _) = hidden_states_org.shape if encoder_hidden_states is None else encoder_hidden_states.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states_org = attn.group_norm(hidden_states_org.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states_org) if encoder_hidden_states is None: encoder_hidden_states = hidden_states_org elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb) if not attn.is_cross_attention: key = apply_rotary_emb(key, image_rotary_emb) hidden_states_org = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_org = hidden_states_org.to(query.dtype) hidden_states_org = attn.to_out[0](hidden_states_org) hidden_states_org = attn.to_out[1](hidden_states_org) if input_ndim == 4: hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.group_norm is not None: hidden_states_ptb = attn.group_norm(hidden_states_ptb.transpose(1, 2)).transpose(1, 2) hidden_states_ptb = attn.to_v(hidden_states_ptb) hidden_states_ptb = hidden_states_ptb.to(query.dtype) hidden_states_ptb = attn.to_out[0](hidden_states_ptb) hidden_states_ptb = attn.to_out[1](hidden_states_ptb) if input_ndim == 4: hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) hidden_states = torch.cat([hidden_states_org, hidden_states_ptb]) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class LuminaAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, query_rotary_emb: Optional[torch.Tensor]=None, key_rotary_emb: Optional[torch.Tensor]=None, base_sequence_length: Optional[int]=None) -> torch.Tensor: from .embeddings import apply_rotary_emb input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape query = attn.to_q(hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query_dim = query.shape[-1] inner_dim = key.shape[-1] head_dim = query_dim // attn.heads dtype = query.dtype kv_heads = inner_dim // head_dim if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) query = query.view(batch_size, -1, attn.heads, head_dim) key = key.view(batch_size, -1, kv_heads, head_dim) value = value.view(batch_size, -1, kv_heads, head_dim) if query_rotary_emb is not None: query = apply_rotary_emb(query, query_rotary_emb, use_real=False) if key_rotary_emb is not None: key = apply_rotary_emb(key, key_rotary_emb, use_real=False) (query, key) = (query.to(dtype), key.to(dtype)) if key_rotary_emb is None: softmax_scale = None elif base_sequence_length is not None: softmax_scale = math.sqrt(math.log(sequence_length, base_sequence_length)) * attn.scale else: softmax_scale = attn.scale n_rep = attn.heads // kv_heads if n_rep >= 1: key = key.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3) value = value.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3) attention_mask = attention_mask.bool().view(batch_size, 1, 1, -1) attention_mask = attention_mask.expand(-1, attn.heads, sequence_length, -1) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, scale=softmax_scale) hidden_states = hidden_states.transpose(1, 2).to(dtype) return hidden_states class FusedAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('FusedAttnProcessor2_0 requires at least PyTorch 2.0, to use it. Please upgrade PyTorch to > 2.0.') def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) if encoder_hidden_states is None: qkv = attn.to_qkv(hidden_states) split_size = qkv.shape[-1] // 3 (query, key, value) = torch.split(qkv, split_size, dim=-1) else: if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) query = attn.to_q(hidden_states) kv = attn.to_kv(encoder_hidden_states) split_size = kv.shape[-1] // 2 (key, value) = torch.split(kv, split_size, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class CustomDiffusionXFormersAttnProcessor(nn.Module): def __init__(self, train_kv: bool=True, train_q_out: bool=False, hidden_size: Optional[int]=None, cross_attention_dim: Optional[int]=None, out_bias: bool=True, dropout: float=0.0, attention_op: Optional[Callable]=None): super().__init__() self.train_kv = train_kv self.train_q_out = train_q_out self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.attention_op = attention_op if self.train_kv: self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) if self.train_q_out: self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) self.to_out_custom_diffusion = nn.ModuleList([]) self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) self.to_out_custom_diffusion.append(nn.Dropout(dropout)) def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if self.train_q_out: query = self.to_q_custom_diffusion(hidden_states).to(attn.to_q.weight.dtype) else: query = attn.to_q(hidden_states.to(attn.to_q.weight.dtype)) if encoder_hidden_states is None: crossattn = False encoder_hidden_states = hidden_states else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if self.train_kv: key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) key = key.to(attn.to_q.weight.dtype) value = value.to(attn.to_q.weight.dtype) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if crossattn: detach = torch.ones_like(key) detach[:, :1, :] = detach[:, :1, :] * 0.0 key = detach * key + (1 - detach) * key.detach() value = detach * value + (1 - detach) * value.detach() query = attn.head_to_batch_dim(query).contiguous() key = attn.head_to_batch_dim(key).contiguous() value = attn.head_to_batch_dim(value).contiguous() hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.batch_to_head_dim(hidden_states) if self.train_q_out: hidden_states = self.to_out_custom_diffusion[0](hidden_states) hidden_states = self.to_out_custom_diffusion[1](hidden_states) else: hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states class CustomDiffusionAttnProcessor2_0(nn.Module): def __init__(self, train_kv: bool=True, train_q_out: bool=True, hidden_size: Optional[int]=None, cross_attention_dim: Optional[int]=None, out_bias: bool=True, dropout: float=0.0): super().__init__() self.train_kv = train_kv self.train_q_out = train_q_out self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim if self.train_kv: self.to_k_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_custom_diffusion = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) if self.train_q_out: self.to_q_custom_diffusion = nn.Linear(hidden_size, hidden_size, bias=False) self.to_out_custom_diffusion = nn.ModuleList([]) self.to_out_custom_diffusion.append(nn.Linear(hidden_size, hidden_size, bias=out_bias)) self.to_out_custom_diffusion.append(nn.Dropout(dropout)) def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: (batch_size, sequence_length, _) = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if self.train_q_out: query = self.to_q_custom_diffusion(hidden_states) else: query = attn.to_q(hidden_states) if encoder_hidden_states is None: crossattn = False encoder_hidden_states = hidden_states else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if self.train_kv: key = self.to_k_custom_diffusion(encoder_hidden_states.to(self.to_k_custom_diffusion.weight.dtype)) value = self.to_v_custom_diffusion(encoder_hidden_states.to(self.to_v_custom_diffusion.weight.dtype)) key = key.to(attn.to_q.weight.dtype) value = value.to(attn.to_q.weight.dtype) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if crossattn: detach = torch.ones_like(key) detach[:, :1, :] = detach[:, :1, :] * 0.0 key = detach * key + (1 - detach) * key.detach() value = detach * value + (1 - detach) * value.detach() inner_dim = hidden_states.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if self.train_q_out: hidden_states = self.to_out_custom_diffusion[0](hidden_states) hidden_states = self.to_out_custom_diffusion[1](hidden_states) else: hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states class SlicedAttnProcessor: def __init__(self, slice_size: int): self.slice_size = slice_size def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) dim = query.shape[-1] query = attn.head_to_batch_dim(query) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) (batch_size_attention, query_tokens, _) = query.shape hidden_states = torch.zeros((batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype) for i in range((batch_size_attention - 1) // self.slice_size + 1): start_idx = i * self.slice_size end_idx = (i + 1) * self.slice_size query_slice = query[start_idx:end_idx] key_slice = key[start_idx:end_idx] attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice) attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx]) hidden_states[start_idx:end_idx] = attn_slice hidden_states = attn.batch_to_head_dim(hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class SlicedAttnAddedKVProcessor: def __init__(self, slice_size): self.slice_size = slice_size def __call__(self, attn: 'Attention', hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None) -> torch.Tensor: residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) dim = query.shape[-1] query = attn.head_to_batch_dim(query) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) if not attn.only_cross_attention: key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) value = torch.cat([encoder_hidden_states_value_proj, value], dim=1) else: key = encoder_hidden_states_key_proj value = encoder_hidden_states_value_proj (batch_size_attention, query_tokens, _) = query.shape hidden_states = torch.zeros((batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype) for i in range((batch_size_attention - 1) // self.slice_size + 1): start_idx = i * self.slice_size end_idx = (i + 1) * self.slice_size query_slice = query[start_idx:end_idx] key_slice = key[start_idx:end_idx] attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice) attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx]) hidden_states[start_idx:end_idx] = attn_slice hidden_states = attn.batch_to_head_dim(hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape) hidden_states = hidden_states + residual return hidden_states class SpatialNorm(nn.Module): def __init__(self, f_channels: int, zq_channels: int): super().__init__() self.norm_layer = nn.GroupNorm(num_channels=f_channels, num_groups=32, eps=1e-06, affine=True) self.conv_y = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) self.conv_b = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0) def forward(self, f: torch.Tensor, zq: torch.Tensor) -> torch.Tensor: f_size = f.shape[-2:] zq = F.interpolate(zq, size=f_size, mode='nearest') norm_f = self.norm_layer(f) new_f = norm_f * self.conv_y(zq) + self.conv_b(zq) return new_f class IPAdapterAttnProcessor(nn.Module): def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0): super().__init__() self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim if not isinstance(num_tokens, (tuple, list)): num_tokens = [num_tokens] self.num_tokens = num_tokens if not isinstance(scale, list): scale = [scale] * len(num_tokens) if len(scale) != len(num_tokens): raise ValueError('`scale` should be a list of integers with the same length as `num_tokens`.') self.scale = scale self.to_k_ip = nn.ModuleList([nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))]) self.to_v_ip = nn.ModuleList([nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))]) def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None, scale: float=1.0, ip_adapter_masks: Optional[torch.Tensor]=None): residual = hidden_states if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, tuple): (encoder_hidden_states, ip_hidden_states) = encoder_hidden_states else: deprecation_message = 'You have passed a tensor as `encoder_hidden_states`. This is deprecated and will be removed in a future release. Please make sure to update your script to pass `encoder_hidden_states` as a tuple to suppress this warning.' deprecate('encoder_hidden_states not a tuple', '1.0.0', deprecation_message, standard_warn=False) end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0] (encoder_hidden_states, ip_hidden_states) = (encoder_hidden_states[:, :end_pos, :], [encoder_hidden_states[:, end_pos:, :]]) if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) if ip_adapter_masks is not None: if not isinstance(ip_adapter_masks, List): ip_adapter_masks = list(ip_adapter_masks.unsqueeze(1)) if not len(ip_adapter_masks) == len(self.scale) == len(ip_hidden_states): raise ValueError(f'Length of ip_adapter_masks array ({len(ip_adapter_masks)}) must match length of self.scale array ({len(self.scale)}) and number of ip_hidden_states ({len(ip_hidden_states)})') else: for (index, (mask, scale, ip_state)) in enumerate(zip(ip_adapter_masks, self.scale, ip_hidden_states)): if not isinstance(mask, torch.Tensor) or mask.ndim != 4: raise ValueError('Each element of the ip_adapter_masks array should be a tensor with shape [1, num_images_for_ip_adapter, height, width]. Please use `IPAdapterMaskProcessor` to preprocess your mask') if mask.shape[1] != ip_state.shape[1]: raise ValueError(f'Number of masks ({mask.shape[1]}) does not match number of ip images ({ip_state.shape[1]}) at index {index}') if isinstance(scale, list) and (not len(scale) == mask.shape[1]): raise ValueError(f'Number of masks ({mask.shape[1]}) does not match number of scales ({len(scale)}) at index {index}') else: ip_adapter_masks = [None] * len(self.scale) for (current_ip_hidden_states, scale, to_k_ip, to_v_ip, mask) in zip(ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip, ip_adapter_masks): skip = False if isinstance(scale, list): if all((s == 0 for s in scale)): skip = True elif scale == 0: skip = True if not skip: if mask is not None: if not isinstance(scale, list): scale = [scale] * mask.shape[1] current_num_images = mask.shape[1] for i in range(current_num_images): ip_key = to_k_ip(current_ip_hidden_states[:, i, :, :]) ip_value = to_v_ip(current_ip_hidden_states[:, i, :, :]) ip_key = attn.head_to_batch_dim(ip_key) ip_value = attn.head_to_batch_dim(ip_value) ip_attention_probs = attn.get_attention_scores(query, ip_key, None) _current_ip_hidden_states = torch.bmm(ip_attention_probs, ip_value) _current_ip_hidden_states = attn.batch_to_head_dim(_current_ip_hidden_states) mask_downsample = IPAdapterMaskProcessor.downsample(mask[:, i, :, :], batch_size, _current_ip_hidden_states.shape[1], _current_ip_hidden_states.shape[2]) mask_downsample = mask_downsample.to(dtype=query.dtype, device=query.device) hidden_states = hidden_states + scale[i] * (_current_ip_hidden_states * mask_downsample) else: ip_key = to_k_ip(current_ip_hidden_states) ip_value = to_v_ip(current_ip_hidden_states) ip_key = attn.head_to_batch_dim(ip_key) ip_value = attn.head_to_batch_dim(ip_value) ip_attention_probs = attn.get_attention_scores(query, ip_key, None) current_ip_hidden_states = torch.bmm(ip_attention_probs, ip_value) current_ip_hidden_states = attn.batch_to_head_dim(current_ip_hidden_states) hidden_states = hidden_states + scale * current_ip_hidden_states hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class IPAdapterAttnProcessor2_0(torch.nn.Module): def __init__(self, hidden_size, cross_attention_dim=None, num_tokens=(4,), scale=1.0): super().__init__() if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError(f'{self.__class__.__name__} requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim if not isinstance(num_tokens, (tuple, list)): num_tokens = [num_tokens] self.num_tokens = num_tokens if not isinstance(scale, list): scale = [scale] * len(num_tokens) if len(scale) != len(num_tokens): raise ValueError('`scale` should be a list of integers with the same length as `num_tokens`.') self.scale = scale self.to_k_ip = nn.ModuleList([nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))]) self.to_v_ip = nn.ModuleList([nn.Linear(cross_attention_dim, hidden_size, bias=False) for _ in range(len(num_tokens))]) def __call__(self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None, scale: float=1.0, ip_adapter_masks: Optional[torch.Tensor]=None): residual = hidden_states if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, tuple): (encoder_hidden_states, ip_hidden_states) = encoder_hidden_states else: deprecation_message = 'You have passed a tensor as `encoder_hidden_states`. This is deprecated and will be removed in a future release. Please make sure to update your script to pass `encoder_hidden_states` as a tuple to suppress this warning.' deprecate('encoder_hidden_states not a tuple', '1.0.0', deprecation_message, standard_warn=False) end_pos = encoder_hidden_states.shape[1] - self.num_tokens[0] (encoder_hidden_states, ip_hidden_states) = (encoder_hidden_states[:, :end_pos, :], [encoder_hidden_states[:, end_pos:, :]]) if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) if ip_adapter_masks is not None: if not isinstance(ip_adapter_masks, List): ip_adapter_masks = list(ip_adapter_masks.unsqueeze(1)) if not len(ip_adapter_masks) == len(self.scale) == len(ip_hidden_states): raise ValueError(f'Length of ip_adapter_masks array ({len(ip_adapter_masks)}) must match length of self.scale array ({len(self.scale)}) and number of ip_hidden_states ({len(ip_hidden_states)})') else: for (index, (mask, scale, ip_state)) in enumerate(zip(ip_adapter_masks, self.scale, ip_hidden_states)): if not isinstance(mask, torch.Tensor) or mask.ndim != 4: raise ValueError('Each element of the ip_adapter_masks array should be a tensor with shape [1, num_images_for_ip_adapter, height, width]. Please use `IPAdapterMaskProcessor` to preprocess your mask') if mask.shape[1] != ip_state.shape[1]: raise ValueError(f'Number of masks ({mask.shape[1]}) does not match number of ip images ({ip_state.shape[1]}) at index {index}') if isinstance(scale, list) and (not len(scale) == mask.shape[1]): raise ValueError(f'Number of masks ({mask.shape[1]}) does not match number of scales ({len(scale)}) at index {index}') else: ip_adapter_masks = [None] * len(self.scale) for (current_ip_hidden_states, scale, to_k_ip, to_v_ip, mask) in zip(ip_hidden_states, self.scale, self.to_k_ip, self.to_v_ip, ip_adapter_masks): skip = False if isinstance(scale, list): if all((s == 0 for s in scale)): skip = True elif scale == 0: skip = True if not skip: if mask is not None: if not isinstance(scale, list): scale = [scale] * mask.shape[1] current_num_images = mask.shape[1] for i in range(current_num_images): ip_key = to_k_ip(current_ip_hidden_states[:, i, :, :]) ip_value = to_v_ip(current_ip_hidden_states[:, i, :, :]) ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) _current_ip_hidden_states = F.scaled_dot_product_attention(query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False) _current_ip_hidden_states = _current_ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) _current_ip_hidden_states = _current_ip_hidden_states.to(query.dtype) mask_downsample = IPAdapterMaskProcessor.downsample(mask[:, i, :, :], batch_size, _current_ip_hidden_states.shape[1], _current_ip_hidden_states.shape[2]) mask_downsample = mask_downsample.to(dtype=query.dtype, device=query.device) hidden_states = hidden_states + scale[i] * (_current_ip_hidden_states * mask_downsample) else: ip_key = to_k_ip(current_ip_hidden_states) ip_value = to_v_ip(current_ip_hidden_states) ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) current_ip_hidden_states = F.scaled_dot_product_attention(query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False) current_ip_hidden_states = current_ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) current_ip_hidden_states = current_ip_hidden_states.to(query.dtype) hidden_states = hidden_states + scale * current_ip_hidden_states hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class PAGIdentitySelfAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('PAGIdentitySelfAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, temb: Optional[torch.FloatTensor]=None) -> torch.Tensor: residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (hidden_states_org, hidden_states_ptb) = hidden_states.chunk(2) (batch_size, sequence_length, _) = hidden_states_org.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states_org = attn.group_norm(hidden_states_org.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states_org) key = attn.to_k(hidden_states_org) value = attn.to_v(hidden_states_org) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states_org = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_org = hidden_states_org.to(query.dtype) hidden_states_org = attn.to_out[0](hidden_states_org) hidden_states_org = attn.to_out[1](hidden_states_org) if input_ndim == 4: hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) (batch_size, sequence_length, _) = hidden_states_ptb.shape if attn.group_norm is not None: hidden_states_ptb = attn.group_norm(hidden_states_ptb.transpose(1, 2)).transpose(1, 2) hidden_states_ptb = attn.to_v(hidden_states_ptb) hidden_states_ptb = hidden_states_ptb.to(query.dtype) hidden_states_ptb = attn.to_out[0](hidden_states_ptb) hidden_states_ptb = attn.to_out[1](hidden_states_ptb) if input_ndim == 4: hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) hidden_states = torch.cat([hidden_states_org, hidden_states_ptb]) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class PAGCFGIdentitySelfAttnProcessor2_0: def __init__(self): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('PAGCFGIdentitySelfAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') def __call__(self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, temb: Optional[torch.FloatTensor]=None) -> torch.Tensor: residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: (batch_size, channel, height, width) = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) (hidden_states_uncond, hidden_states_org, hidden_states_ptb) = hidden_states.chunk(3) hidden_states_org = torch.cat([hidden_states_uncond, hidden_states_org]) (batch_size, sequence_length, _) = hidden_states_org.shape if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states_org = attn.group_norm(hidden_states_org.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states_org) key = attn.to_k(hidden_states_org) value = attn.to_v(hidden_states_org) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states_org = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states_org = hidden_states_org.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states_org = hidden_states_org.to(query.dtype) hidden_states_org = attn.to_out[0](hidden_states_org) hidden_states_org = attn.to_out[1](hidden_states_org) if input_ndim == 4: hidden_states_org = hidden_states_org.transpose(-1, -2).reshape(batch_size, channel, height, width) (batch_size, sequence_length, _) = hidden_states_ptb.shape if attn.group_norm is not None: hidden_states_ptb = attn.group_norm(hidden_states_ptb.transpose(1, 2)).transpose(1, 2) value = attn.to_v(hidden_states_ptb) hidden_states_ptb = value hidden_states_ptb = hidden_states_ptb.to(query.dtype) hidden_states_ptb = attn.to_out[0](hidden_states_ptb) hidden_states_ptb = attn.to_out[1](hidden_states_ptb) if input_ndim == 4: hidden_states_ptb = hidden_states_ptb.transpose(-1, -2).reshape(batch_size, channel, height, width) hidden_states = torch.cat([hidden_states_org, hidden_states_ptb]) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class LoRAAttnProcessor: def __init__(self): pass class LoRAAttnProcessor2_0: def __init__(self): pass class LoRAXFormersAttnProcessor: def __init__(self): pass class LoRAAttnAddedKVProcessor: def __init__(self): pass class FluxSingleAttnProcessor2_0(FluxAttnProcessor2_0): def __init__(self): deprecation_message = '`FluxSingleAttnProcessor2_0` is deprecated and will be removed in a future version. Please use `FluxAttnProcessor2_0` instead.' deprecate('FluxSingleAttnProcessor2_0', '0.32.0', deprecation_message) super().__init__() ADDED_KV_ATTENTION_PROCESSORS = (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0, XFormersAttnAddedKVProcessor) CROSS_ATTENTION_PROCESSORS = (AttnProcessor, AttnProcessor2_0, XFormersAttnProcessor, SlicedAttnProcessor, IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0) AttentionProcessor = Union[AttnProcessor, AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor, SlicedAttnProcessor, AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0, XFormersAttnAddedKVProcessor, CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, CustomDiffusionAttnProcessor2_0, PAGCFGIdentitySelfAttnProcessor2_0, PAGIdentitySelfAttnProcessor2_0, PAGCFGHunyuanAttnProcessor2_0, PAGHunyuanAttnProcessor2_0] # File: diffusers-main/src/diffusers/models/autoencoders/autoencoder_asym_kl.py from typing import Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils.accelerate_utils import apply_forward_hook from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder, MaskConditionDecoder class AsymmetricAutoencoderKL(ModelMixin, ConfigMixin): @register_to_config def __init__(self, in_channels: int=3, out_channels: int=3, down_block_types: Tuple[str, ...]=('DownEncoderBlock2D',), down_block_out_channels: Tuple[int, ...]=(64,), layers_per_down_block: int=1, up_block_types: Tuple[str, ...]=('UpDecoderBlock2D',), up_block_out_channels: Tuple[int, ...]=(64,), layers_per_up_block: int=1, act_fn: str='silu', latent_channels: int=4, norm_num_groups: int=32, sample_size: int=32, scaling_factor: float=0.18215) -> None: super().__init__() self.encoder = Encoder(in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=down_block_out_channels, layers_per_block=layers_per_down_block, act_fn=act_fn, norm_num_groups=norm_num_groups, double_z=True) self.decoder = MaskConditionDecoder(in_channels=latent_channels, out_channels=out_channels, up_block_types=up_block_types, block_out_channels=up_block_out_channels, layers_per_block=layers_per_up_block, act_fn=act_fn, norm_num_groups=norm_num_groups) self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1) self.use_slicing = False self.use_tiling = False self.register_to_config(block_out_channels=up_block_out_channels) self.register_to_config(force_upcast=False) @apply_forward_hook def encode(self, x: torch.Tensor, return_dict: bool=True) -> Union[AutoencoderKLOutput, Tuple[torch.Tensor]]: h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode(self, z: torch.Tensor, image: Optional[torch.Tensor]=None, mask: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[DecoderOutput, Tuple[torch.Tensor]]: z = self.post_quant_conv(z) dec = self.decoder(z, image, mask) if not return_dict: return (dec,) return DecoderOutput(sample=dec) @apply_forward_hook def decode(self, z: torch.Tensor, generator: Optional[torch.Generator]=None, image: Optional[torch.Tensor]=None, mask: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[DecoderOutput, Tuple[torch.Tensor]]: decoded = self._decode(z, image, mask).sample if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def forward(self, sample: torch.Tensor, mask: Optional[torch.Tensor]=None, sample_posterior: bool=False, return_dict: bool=True, generator: Optional[torch.Generator]=None) -> Union[DecoderOutput, Tuple[torch.Tensor]]: x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z, generator, sample, mask).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec) # File: diffusers-main/src/diffusers/models/autoencoders/autoencoder_kl.py from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders.single_file_model import FromOriginalModelMixin from ...utils.accelerate_utils import apply_forward_hook from ..attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, FusedAttnProcessor2_0 from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder class AutoencoderKL(ModelMixin, ConfigMixin, FromOriginalModelMixin): _supports_gradient_checkpointing = True _no_split_modules = ['BasicTransformerBlock', 'ResnetBlock2D'] @register_to_config def __init__(self, in_channels: int=3, out_channels: int=3, down_block_types: Tuple[str]=('DownEncoderBlock2D',), up_block_types: Tuple[str]=('UpDecoderBlock2D',), block_out_channels: Tuple[int]=(64,), layers_per_block: int=1, act_fn: str='silu', latent_channels: int=4, norm_num_groups: int=32, sample_size: int=32, scaling_factor: float=0.18215, shift_factor: Optional[float]=None, latents_mean: Optional[Tuple[float]]=None, latents_std: Optional[Tuple[float]]=None, force_upcast: float=True, use_quant_conv: bool=True, use_post_quant_conv: bool=True, mid_block_add_attention: bool=True): super().__init__() self.encoder = Encoder(in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, double_z=True, mid_block_add_attention=mid_block_add_attention) self.decoder = Decoder(in_channels=latent_channels, out_channels=out_channels, up_block_types=up_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, norm_num_groups=norm_num_groups, act_fn=act_fn, mid_block_add_attention=mid_block_add_attention) self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) if use_quant_conv else None self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1) if use_post_quant_conv else None self.use_slicing = False self.use_tiling = False self.tile_sample_min_size = self.config.sample_size sample_size = self.config.sample_size[0] if isinstance(self.config.sample_size, (list, tuple)) else self.config.sample_size self.tile_latent_min_size = int(sample_size / 2 ** (len(self.config.block_out_channels) - 1)) self.tile_overlap_factor = 0.25 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (Encoder, Decoder)): module.gradient_checkpointing = value def enable_tiling(self, use_tiling: bool=True): self.use_tiling = use_tiling def disable_tiling(self): self.enable_tiling(False) def enable_slicing(self): self.use_slicing = True def disable_slicing(self): self.use_slicing = False @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) @apply_forward_hook def encode(self, x: torch.Tensor, return_dict: bool=True) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(x, return_dict=return_dict) if self.use_slicing and x.shape[0] > 1: encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self.encoder(x) if self.quant_conv is not None: moments = self.quant_conv(h) else: moments = h posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode(self, z: torch.Tensor, return_dict: bool=True) -> Union[DecoderOutput, torch.Tensor]: if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(z, return_dict=return_dict) if self.post_quant_conv is not None: z = self.post_quant_conv(z) dec = self.decoder(z) if not return_dict: return (dec,) return DecoderOutput(sample=dec) @apply_forward_hook def decode(self, z: torch.FloatTensor, return_dict: bool=True, generator=None) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[2], b.shape[2], blend_extent) for y in range(blend_extent): b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[3], b.shape[3], blend_extent) for x in range(blend_extent): b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def tiled_encode(self, x: torch.Tensor, return_dict: bool=True) -> AutoencoderKLOutput: overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) row_limit = self.tile_latent_min_size - blend_extent rows = [] for i in range(0, x.shape[2], overlap_size): row = [] for j in range(0, x.shape[3], overlap_size): tile = x[:, :, i:i + self.tile_sample_min_size, j:j + self.tile_sample_min_size] tile = self.encoder(tile) if self.config.use_quant_conv: tile = self.quant_conv(tile) row.append(tile) rows.append(row) result_rows = [] for (i, row) in enumerate(rows): result_row = [] for (j, tile) in enumerate(row): if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) moments = torch.cat(result_rows, dim=2) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def tiled_decode(self, z: torch.Tensor, return_dict: bool=True) -> Union[DecoderOutput, torch.Tensor]: overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor) row_limit = self.tile_sample_min_size - blend_extent rows = [] for i in range(0, z.shape[2], overlap_size): row = [] for j in range(0, z.shape[3], overlap_size): tile = z[:, :, i:i + self.tile_latent_min_size, j:j + self.tile_latent_min_size] if self.config.use_post_quant_conv: tile = self.post_quant_conv(tile) decoded = self.decoder(tile) row.append(decoded) rows.append(row) result_rows = [] for (i, row) in enumerate(rows): result_row = [] for (j, tile) in enumerate(row): if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) dec = torch.cat(result_rows, dim=2) if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward(self, sample: torch.Tensor, sample_posterior: bool=False, return_dict: bool=True, generator: Optional[torch.Generator]=None) -> Union[DecoderOutput, torch.Tensor]: x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) # File: diffusers-main/src/diffusers/models/autoencoders/autoencoder_kl_cogvideox.py from typing import Optional, Tuple, Union import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders.single_file_model import FromOriginalModelMixin from ...utils import logging from ...utils.accelerate_utils import apply_forward_hook from ..activations import get_activation from ..downsampling import CogVideoXDownsample3D from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from ..upsampling import CogVideoXUpsample3D from .vae import DecoderOutput, DiagonalGaussianDistribution logger = logging.get_logger(__name__) class CogVideoXSafeConv3d(nn.Conv3d): def forward(self, input: torch.Tensor) -> torch.Tensor: memory_count = torch.prod(torch.tensor(input.shape)).item() * 2 / 1024 ** 3 if memory_count > 2: kernel_size = self.kernel_size[0] part_num = int(memory_count / 2) + 1 input_chunks = torch.chunk(input, part_num, dim=2) if kernel_size > 1: input_chunks = [input_chunks[0]] + [torch.cat((input_chunks[i - 1][:, :, -kernel_size + 1:], input_chunks[i]), dim=2) for i in range(1, len(input_chunks))] output_chunks = [] for input_chunk in input_chunks: output_chunks.append(super().forward(input_chunk)) output = torch.cat(output_chunks, dim=2) return output else: return super().forward(input) class CogVideoXCausalConv3d(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int, int]], stride: int=1, dilation: int=1, pad_mode: str='constant'): super().__init__() if isinstance(kernel_size, int): kernel_size = (kernel_size,) * 3 (time_kernel_size, height_kernel_size, width_kernel_size) = kernel_size self.pad_mode = pad_mode time_pad = dilation * (time_kernel_size - 1) + (1 - stride) height_pad = height_kernel_size // 2 width_pad = width_kernel_size // 2 self.height_pad = height_pad self.width_pad = width_pad self.time_pad = time_pad self.time_causal_padding = (width_pad, width_pad, height_pad, height_pad, time_pad, 0) self.temporal_dim = 2 self.time_kernel_size = time_kernel_size stride = (stride, 1, 1) dilation = (dilation, 1, 1) self.conv = CogVideoXSafeConv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation) self.conv_cache = None def fake_context_parallel_forward(self, inputs: torch.Tensor) -> torch.Tensor: kernel_size = self.time_kernel_size if kernel_size > 1: cached_inputs = [self.conv_cache] if self.conv_cache is not None else [inputs[:, :, :1]] * (kernel_size - 1) inputs = torch.cat(cached_inputs + [inputs], dim=2) return inputs def _clear_fake_context_parallel_cache(self): del self.conv_cache self.conv_cache = None def forward(self, inputs: torch.Tensor) -> torch.Tensor: inputs = self.fake_context_parallel_forward(inputs) self._clear_fake_context_parallel_cache() self.conv_cache = inputs[:, :, -self.time_kernel_size + 1:].clone() padding_2d = (self.width_pad, self.width_pad, self.height_pad, self.height_pad) inputs = F.pad(inputs, padding_2d, mode='constant', value=0) output = self.conv(inputs) return output class CogVideoXSpatialNorm3D(nn.Module): def __init__(self, f_channels: int, zq_channels: int, groups: int=32): super().__init__() self.norm_layer = nn.GroupNorm(num_channels=f_channels, num_groups=groups, eps=1e-06, affine=True) self.conv_y = CogVideoXCausalConv3d(zq_channels, f_channels, kernel_size=1, stride=1) self.conv_b = CogVideoXCausalConv3d(zq_channels, f_channels, kernel_size=1, stride=1) def forward(self, f: torch.Tensor, zq: torch.Tensor) -> torch.Tensor: if f.shape[2] > 1 and f.shape[2] % 2 == 1: (f_first, f_rest) = (f[:, :, :1], f[:, :, 1:]) (f_first_size, f_rest_size) = (f_first.shape[-3:], f_rest.shape[-3:]) (z_first, z_rest) = (zq[:, :, :1], zq[:, :, 1:]) z_first = F.interpolate(z_first, size=f_first_size) z_rest = F.interpolate(z_rest, size=f_rest_size) zq = torch.cat([z_first, z_rest], dim=2) else: zq = F.interpolate(zq, size=f.shape[-3:]) norm_f = self.norm_layer(f) new_f = norm_f * self.conv_y(zq) + self.conv_b(zq) return new_f class CogVideoXResnetBlock3D(nn.Module): def __init__(self, in_channels: int, out_channels: Optional[int]=None, dropout: float=0.0, temb_channels: int=512, groups: int=32, eps: float=1e-06, non_linearity: str='swish', conv_shortcut: bool=False, spatial_norm_dim: Optional[int]=None, pad_mode: str='first'): super().__init__() out_channels = out_channels or in_channels self.in_channels = in_channels self.out_channels = out_channels self.nonlinearity = get_activation(non_linearity) self.use_conv_shortcut = conv_shortcut if spatial_norm_dim is None: self.norm1 = nn.GroupNorm(num_channels=in_channels, num_groups=groups, eps=eps) self.norm2 = nn.GroupNorm(num_channels=out_channels, num_groups=groups, eps=eps) else: self.norm1 = CogVideoXSpatialNorm3D(f_channels=in_channels, zq_channels=spatial_norm_dim, groups=groups) self.norm2 = CogVideoXSpatialNorm3D(f_channels=out_channels, zq_channels=spatial_norm_dim, groups=groups) self.conv1 = CogVideoXCausalConv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode) if temb_channels > 0: self.temb_proj = nn.Linear(in_features=temb_channels, out_features=out_channels) self.dropout = nn.Dropout(dropout) self.conv2 = CogVideoXCausalConv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode) if self.in_channels != self.out_channels: if self.use_conv_shortcut: self.conv_shortcut = CogVideoXCausalConv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode) else: self.conv_shortcut = CogVideoXSafeConv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0) def forward(self, inputs: torch.Tensor, temb: Optional[torch.Tensor]=None, zq: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = inputs if zq is not None: hidden_states = self.norm1(hidden_states, zq) else: hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.conv1(hidden_states) if temb is not None: hidden_states = hidden_states + self.temb_proj(self.nonlinearity(temb))[:, :, None, None, None] if zq is not None: hidden_states = self.norm2(hidden_states, zq) else: hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.in_channels != self.out_channels: inputs = self.conv_shortcut(inputs) hidden_states = hidden_states + inputs return hidden_states class CogVideoXDownBlock3D(nn.Module): _supports_gradient_checkpointing = True def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_act_fn: str='swish', resnet_groups: int=32, add_downsample: bool=True, downsample_padding: int=0, compress_time: bool=False, pad_mode: str='first'): super().__init__() resnets = [] for i in range(num_layers): in_channel = in_channels if i == 0 else out_channels resnets.append(CogVideoXResnetBlock3D(in_channels=in_channel, out_channels=out_channels, dropout=dropout, temb_channels=temb_channels, groups=resnet_groups, eps=resnet_eps, non_linearity=resnet_act_fn, pad_mode=pad_mode)) self.resnets = nn.ModuleList(resnets) self.downsamplers = None if add_downsample: self.downsamplers = nn.ModuleList([CogVideoXDownsample3D(out_channels, out_channels, padding=downsample_padding, compress_time=compress_time)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, zq: Optional[torch.Tensor]=None) -> torch.Tensor: for resnet in self.resnets: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def create_forward(*inputs): return module(*inputs) return create_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, zq) else: hidden_states = resnet(hidden_states, temb, zq) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states class CogVideoXMidBlock3D(nn.Module): _supports_gradient_checkpointing = True def __init__(self, in_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_act_fn: str='swish', resnet_groups: int=32, spatial_norm_dim: Optional[int]=None, pad_mode: str='first'): super().__init__() resnets = [] for _ in range(num_layers): resnets.append(CogVideoXResnetBlock3D(in_channels=in_channels, out_channels=in_channels, dropout=dropout, temb_channels=temb_channels, groups=resnet_groups, eps=resnet_eps, spatial_norm_dim=spatial_norm_dim, non_linearity=resnet_act_fn, pad_mode=pad_mode)) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, zq: Optional[torch.Tensor]=None) -> torch.Tensor: for resnet in self.resnets: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def create_forward(*inputs): return module(*inputs) return create_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, zq) else: hidden_states = resnet(hidden_states, temb, zq) return hidden_states class CogVideoXUpBlock3D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_act_fn: str='swish', resnet_groups: int=32, spatial_norm_dim: int=16, add_upsample: bool=True, upsample_padding: int=1, compress_time: bool=False, pad_mode: str='first'): super().__init__() resnets = [] for i in range(num_layers): in_channel = in_channels if i == 0 else out_channels resnets.append(CogVideoXResnetBlock3D(in_channels=in_channel, out_channels=out_channels, dropout=dropout, temb_channels=temb_channels, groups=resnet_groups, eps=resnet_eps, non_linearity=resnet_act_fn, spatial_norm_dim=spatial_norm_dim, pad_mode=pad_mode)) self.resnets = nn.ModuleList(resnets) self.upsamplers = None if add_upsample: self.upsamplers = nn.ModuleList([CogVideoXUpsample3D(out_channels, out_channels, padding=upsample_padding, compress_time=compress_time)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, zq: Optional[torch.Tensor]=None) -> torch.Tensor: for resnet in self.resnets: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def create_forward(*inputs): return module(*inputs) return create_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, zq) else: hidden_states = resnet(hidden_states, temb, zq) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class CogVideoXEncoder3D(nn.Module): _supports_gradient_checkpointing = True def __init__(self, in_channels: int=3, out_channels: int=16, down_block_types: Tuple[str, ...]=('CogVideoXDownBlock3D', 'CogVideoXDownBlock3D', 'CogVideoXDownBlock3D', 'CogVideoXDownBlock3D'), block_out_channels: Tuple[int, ...]=(128, 256, 256, 512), layers_per_block: int=3, act_fn: str='silu', norm_eps: float=1e-06, norm_num_groups: int=32, dropout: float=0.0, pad_mode: str='first', temporal_compression_ratio: float=4): super().__init__() temporal_compress_level = int(np.log2(temporal_compression_ratio)) self.conv_in = CogVideoXCausalConv3d(in_channels, block_out_channels[0], kernel_size=3, pad_mode=pad_mode) self.down_blocks = nn.ModuleList([]) output_channel = block_out_channels[0] for (i, down_block_type) in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 compress_time = i < temporal_compress_level if down_block_type == 'CogVideoXDownBlock3D': down_block = CogVideoXDownBlock3D(in_channels=input_channel, out_channels=output_channel, temb_channels=0, dropout=dropout, num_layers=layers_per_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, add_downsample=not is_final_block, compress_time=compress_time) else: raise ValueError('Invalid `down_block_type` encountered. Must be `CogVideoXDownBlock3D`') self.down_blocks.append(down_block) self.mid_block = CogVideoXMidBlock3D(in_channels=block_out_channels[-1], temb_channels=0, dropout=dropout, num_layers=2, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, pad_mode=pad_mode) self.norm_out = nn.GroupNorm(norm_num_groups, block_out_channels[-1], eps=1e-06) self.conv_act = nn.SiLU() self.conv_out = CogVideoXCausalConv3d(block_out_channels[-1], 2 * out_channels, kernel_size=3, pad_mode=pad_mode) self.gradient_checkpointing = False def forward(self, sample: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.conv_in(sample) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward for down_block in self.down_blocks: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(down_block), hidden_states, temb, None) hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), hidden_states, temb, None) else: for down_block in self.down_blocks: hidden_states = down_block(hidden_states, temb, None) hidden_states = self.mid_block(hidden_states, temb, None) hidden_states = self.norm_out(hidden_states) hidden_states = self.conv_act(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states class CogVideoXDecoder3D(nn.Module): _supports_gradient_checkpointing = True def __init__(self, in_channels: int=16, out_channels: int=3, up_block_types: Tuple[str, ...]=('CogVideoXUpBlock3D', 'CogVideoXUpBlock3D', 'CogVideoXUpBlock3D', 'CogVideoXUpBlock3D'), block_out_channels: Tuple[int, ...]=(128, 256, 256, 512), layers_per_block: int=3, act_fn: str='silu', norm_eps: float=1e-06, norm_num_groups: int=32, dropout: float=0.0, pad_mode: str='first', temporal_compression_ratio: float=4): super().__init__() reversed_block_out_channels = list(reversed(block_out_channels)) self.conv_in = CogVideoXCausalConv3d(in_channels, reversed_block_out_channels[0], kernel_size=3, pad_mode=pad_mode) self.mid_block = CogVideoXMidBlock3D(in_channels=reversed_block_out_channels[0], temb_channels=0, num_layers=2, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, spatial_norm_dim=in_channels, pad_mode=pad_mode) self.up_blocks = nn.ModuleList([]) output_channel = reversed_block_out_channels[0] temporal_compress_level = int(np.log2(temporal_compression_ratio)) for (i, up_block_type) in enumerate(up_block_types): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 compress_time = i < temporal_compress_level if up_block_type == 'CogVideoXUpBlock3D': up_block = CogVideoXUpBlock3D(in_channels=prev_output_channel, out_channels=output_channel, temb_channels=0, dropout=dropout, num_layers=layers_per_block + 1, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, spatial_norm_dim=in_channels, add_upsample=not is_final_block, compress_time=compress_time, pad_mode=pad_mode) prev_output_channel = output_channel else: raise ValueError('Invalid `up_block_type` encountered. Must be `CogVideoXUpBlock3D`') self.up_blocks.append(up_block) self.norm_out = CogVideoXSpatialNorm3D(reversed_block_out_channels[-1], in_channels, groups=norm_num_groups) self.conv_act = nn.SiLU() self.conv_out = CogVideoXCausalConv3d(reversed_block_out_channels[-1], out_channels, kernel_size=3, pad_mode=pad_mode) self.gradient_checkpointing = False def forward(self, sample: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.conv_in(sample) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), hidden_states, temb, sample) for up_block in self.up_blocks: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), hidden_states, temb, sample) else: hidden_states = self.mid_block(hidden_states, temb, sample) for up_block in self.up_blocks: hidden_states = up_block(hidden_states, temb, sample) hidden_states = self.norm_out(hidden_states, sample) hidden_states = self.conv_act(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states class AutoencoderKLCogVideoX(ModelMixin, ConfigMixin, FromOriginalModelMixin): _supports_gradient_checkpointing = True _no_split_modules = ['CogVideoXResnetBlock3D'] @register_to_config def __init__(self, in_channels: int=3, out_channels: int=3, down_block_types: Tuple[str]=('CogVideoXDownBlock3D', 'CogVideoXDownBlock3D', 'CogVideoXDownBlock3D', 'CogVideoXDownBlock3D'), up_block_types: Tuple[str]=('CogVideoXUpBlock3D', 'CogVideoXUpBlock3D', 'CogVideoXUpBlock3D', 'CogVideoXUpBlock3D'), block_out_channels: Tuple[int]=(128, 256, 256, 512), latent_channels: int=16, layers_per_block: int=3, act_fn: str='silu', norm_eps: float=1e-06, norm_num_groups: int=32, temporal_compression_ratio: float=4, sample_height: int=480, sample_width: int=720, scaling_factor: float=1.15258426, shift_factor: Optional[float]=None, latents_mean: Optional[Tuple[float]]=None, latents_std: Optional[Tuple[float]]=None, force_upcast: float=True, use_quant_conv: bool=False, use_post_quant_conv: bool=False): super().__init__() self.encoder = CogVideoXEncoder3D(in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, act_fn=act_fn, norm_eps=norm_eps, norm_num_groups=norm_num_groups, temporal_compression_ratio=temporal_compression_ratio) self.decoder = CogVideoXDecoder3D(in_channels=latent_channels, out_channels=out_channels, up_block_types=up_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, act_fn=act_fn, norm_eps=norm_eps, norm_num_groups=norm_num_groups, temporal_compression_ratio=temporal_compression_ratio) self.quant_conv = CogVideoXSafeConv3d(2 * out_channels, 2 * out_channels, 1) if use_quant_conv else None self.post_quant_conv = CogVideoXSafeConv3d(out_channels, out_channels, 1) if use_post_quant_conv else None self.use_slicing = False self.use_tiling = False self.num_latent_frames_batch_size = 2 self.num_sample_frames_batch_size = 8 self.tile_sample_min_height = sample_height // 2 self.tile_sample_min_width = sample_width // 2 self.tile_latent_min_height = int(self.tile_sample_min_height / 2 ** (len(self.config.block_out_channels) - 1)) self.tile_latent_min_width = int(self.tile_sample_min_width / 2 ** (len(self.config.block_out_channels) - 1)) self.tile_overlap_factor_height = 1 / 6 self.tile_overlap_factor_width = 1 / 5 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (CogVideoXEncoder3D, CogVideoXDecoder3D)): module.gradient_checkpointing = value def _clear_fake_context_parallel_cache(self): for (name, module) in self.named_modules(): if isinstance(module, CogVideoXCausalConv3d): logger.debug(f'Clearing fake Context Parallel cache for layer: {name}') module._clear_fake_context_parallel_cache() def enable_tiling(self, tile_sample_min_height: Optional[int]=None, tile_sample_min_width: Optional[int]=None, tile_overlap_factor_height: Optional[float]=None, tile_overlap_factor_width: Optional[float]=None) -> None: self.use_tiling = True self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width self.tile_latent_min_height = int(self.tile_sample_min_height / 2 ** (len(self.config.block_out_channels) - 1)) self.tile_latent_min_width = int(self.tile_sample_min_width / 2 ** (len(self.config.block_out_channels) - 1)) self.tile_overlap_factor_height = tile_overlap_factor_height or self.tile_overlap_factor_height self.tile_overlap_factor_width = tile_overlap_factor_width or self.tile_overlap_factor_width def disable_tiling(self) -> None: self.use_tiling = False def enable_slicing(self) -> None: self.use_slicing = True def disable_slicing(self) -> None: self.use_slicing = False def _encode(self, x: torch.Tensor) -> torch.Tensor: (batch_size, num_channels, num_frames, height, width) = x.shape if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height): return self.tiled_encode(x) frame_batch_size = self.num_sample_frames_batch_size enc = [] for i in range(num_frames // frame_batch_size): remaining_frames = num_frames % frame_batch_size start_frame = frame_batch_size * i + (0 if i == 0 else remaining_frames) end_frame = frame_batch_size * (i + 1) + remaining_frames x_intermediate = x[:, :, start_frame:end_frame] x_intermediate = self.encoder(x_intermediate) if self.quant_conv is not None: x_intermediate = self.quant_conv(x_intermediate) enc.append(x_intermediate) self._clear_fake_context_parallel_cache() enc = torch.cat(enc, dim=2) return enc @apply_forward_hook def encode(self, x: torch.Tensor, return_dict: bool=True) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: if self.use_slicing and x.shape[0] > 1: encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self._encode(x) posterior = DiagonalGaussianDistribution(h) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode(self, z: torch.Tensor, return_dict: bool=True) -> Union[DecoderOutput, torch.Tensor]: (batch_size, num_channels, num_frames, height, width) = z.shape if self.use_tiling and (width > self.tile_latent_min_width or height > self.tile_latent_min_height): return self.tiled_decode(z, return_dict=return_dict) frame_batch_size = self.num_latent_frames_batch_size dec = [] for i in range(num_frames // frame_batch_size): remaining_frames = num_frames % frame_batch_size start_frame = frame_batch_size * i + (0 if i == 0 else remaining_frames) end_frame = frame_batch_size * (i + 1) + remaining_frames z_intermediate = z[:, :, start_frame:end_frame] if self.post_quant_conv is not None: z_intermediate = self.post_quant_conv(z_intermediate) z_intermediate = self.decoder(z_intermediate) dec.append(z_intermediate) self._clear_fake_context_parallel_cache() dec = torch.cat(dec, dim=2) if not return_dict: return (dec,) return DecoderOutput(sample=dec) @apply_forward_hook def decode(self, z: torch.Tensor, return_dict: bool=True) -> Union[DecoderOutput, torch.Tensor]: if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[3], b.shape[3], blend_extent) for y in range(blend_extent): b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (y / blend_extent) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[4], b.shape[4], blend_extent) for x in range(blend_extent): b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (x / blend_extent) return b def tiled_encode(self, x: torch.Tensor) -> torch.Tensor: (batch_size, num_channels, num_frames, height, width) = x.shape overlap_height = int(self.tile_sample_min_height * (1 - self.tile_overlap_factor_height)) overlap_width = int(self.tile_sample_min_width * (1 - self.tile_overlap_factor_width)) blend_extent_height = int(self.tile_latent_min_height * self.tile_overlap_factor_height) blend_extent_width = int(self.tile_latent_min_width * self.tile_overlap_factor_width) row_limit_height = self.tile_latent_min_height - blend_extent_height row_limit_width = self.tile_latent_min_width - blend_extent_width frame_batch_size = self.num_sample_frames_batch_size rows = [] for i in range(0, height, overlap_height): row = [] for j in range(0, width, overlap_width): time = [] for k in range(num_frames // frame_batch_size): remaining_frames = num_frames % frame_batch_size start_frame = frame_batch_size * k + (0 if k == 0 else remaining_frames) end_frame = frame_batch_size * (k + 1) + remaining_frames tile = x[:, :, start_frame:end_frame, i:i + self.tile_sample_min_height, j:j + self.tile_sample_min_width] tile = self.encoder(tile) if self.quant_conv is not None: tile = self.quant_conv(tile) time.append(tile) self._clear_fake_context_parallel_cache() row.append(torch.cat(time, dim=2)) rows.append(row) result_rows = [] for (i, row) in enumerate(rows): result_row = [] for (j, tile) in enumerate(row): if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent_width) result_row.append(tile[:, :, :, :row_limit_height, :row_limit_width]) result_rows.append(torch.cat(result_row, dim=4)) enc = torch.cat(result_rows, dim=3) return enc def tiled_decode(self, z: torch.Tensor, return_dict: bool=True) -> Union[DecoderOutput, torch.Tensor]: (batch_size, num_channels, num_frames, height, width) = z.shape overlap_height = int(self.tile_latent_min_height * (1 - self.tile_overlap_factor_height)) overlap_width = int(self.tile_latent_min_width * (1 - self.tile_overlap_factor_width)) blend_extent_height = int(self.tile_sample_min_height * self.tile_overlap_factor_height) blend_extent_width = int(self.tile_sample_min_width * self.tile_overlap_factor_width) row_limit_height = self.tile_sample_min_height - blend_extent_height row_limit_width = self.tile_sample_min_width - blend_extent_width frame_batch_size = self.num_latent_frames_batch_size rows = [] for i in range(0, height, overlap_height): row = [] for j in range(0, width, overlap_width): time = [] for k in range(num_frames // frame_batch_size): remaining_frames = num_frames % frame_batch_size start_frame = frame_batch_size * k + (0 if k == 0 else remaining_frames) end_frame = frame_batch_size * (k + 1) + remaining_frames tile = z[:, :, start_frame:end_frame, i:i + self.tile_latent_min_height, j:j + self.tile_latent_min_width] if self.post_quant_conv is not None: tile = self.post_quant_conv(tile) tile = self.decoder(tile) time.append(tile) self._clear_fake_context_parallel_cache() row.append(torch.cat(time, dim=2)) rows.append(row) result_rows = [] for (i, row) in enumerate(rows): result_row = [] for (j, tile) in enumerate(row): if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent_width) result_row.append(tile[:, :, :, :row_limit_height, :row_limit_width]) result_rows.append(torch.cat(result_row, dim=4)) dec = torch.cat(result_rows, dim=3) if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward(self, sample: torch.Tensor, sample_posterior: bool=False, return_dict: bool=True, generator: Optional[torch.Generator]=None) -> Union[torch.Tensor, torch.Tensor]: x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z) if not return_dict: return (dec,) return dec # File: diffusers-main/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import is_torch_version from ...utils.accelerate_utils import apply_forward_hook from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from ..unets.unet_3d_blocks import MidBlockTemporalDecoder, UpBlockTemporalDecoder from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder class TemporalDecoder(nn.Module): def __init__(self, in_channels: int=4, out_channels: int=3, block_out_channels: Tuple[int]=(128, 256, 512, 512), layers_per_block: int=2): super().__init__() self.layers_per_block = layers_per_block self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1) self.mid_block = MidBlockTemporalDecoder(num_layers=self.layers_per_block, in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], attention_head_dim=block_out_channels[-1]) self.up_blocks = nn.ModuleList([]) reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] for i in range(len(block_out_channels)): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 up_block = UpBlockTemporalDecoder(num_layers=self.layers_per_block + 1, in_channels=prev_output_channel, out_channels=output_channel, add_upsample=not is_final_block) self.up_blocks.append(up_block) prev_output_channel = output_channel self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-06) self.conv_act = nn.SiLU() self.conv_out = torch.nn.Conv2d(in_channels=block_out_channels[0], out_channels=out_channels, kernel_size=3, padding=1) conv_out_kernel_size = (3, 1, 1) padding = [int(k // 2) for k in conv_out_kernel_size] self.time_conv_out = torch.nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=conv_out_kernel_size, padding=padding) self.gradient_checkpointing = False def forward(self, sample: torch.Tensor, image_only_indicator: torch.Tensor, num_frames: int=1) -> torch.Tensor: sample = self.conv_in(sample) upscale_dtype = next(iter(self.up_blocks.parameters())).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample, image_only_indicator, use_reentrant=False) sample = sample.to(upscale_dtype) for up_block in self.up_blocks: sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, image_only_indicator, use_reentrant=False) else: sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample, image_only_indicator) sample = sample.to(upscale_dtype) for up_block in self.up_blocks: sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, image_only_indicator) else: sample = self.mid_block(sample, image_only_indicator=image_only_indicator) sample = sample.to(upscale_dtype) for up_block in self.up_blocks: sample = up_block(sample, image_only_indicator=image_only_indicator) sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) (batch_frames, channels, height, width) = sample.shape batch_size = batch_frames // num_frames sample = sample[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) sample = self.time_conv_out(sample) sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width) return sample class AutoencoderKLTemporalDecoder(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, in_channels: int=3, out_channels: int=3, down_block_types: Tuple[str]=('DownEncoderBlock2D',), block_out_channels: Tuple[int]=(64,), layers_per_block: int=1, latent_channels: int=4, sample_size: int=32, scaling_factor: float=0.18215, force_upcast: float=True): super().__init__() self.encoder = Encoder(in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, double_z=True) self.decoder = TemporalDecoder(in_channels=latent_channels, out_channels=out_channels, block_out_channels=block_out_channels, layers_per_block=layers_per_block) self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) sample_size = self.config.sample_size[0] if isinstance(self.config.sample_size, (list, tuple)) else self.config.sample_size self.tile_latent_min_size = int(sample_size / 2 ** (len(self.config.block_out_channels) - 1)) self.tile_overlap_factor = 0.25 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (Encoder, TemporalDecoder)): module.gradient_checkpointing = value @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): if all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) @apply_forward_hook def encode(self, x: torch.Tensor, return_dict: bool=True) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) @apply_forward_hook def decode(self, z: torch.Tensor, num_frames: int, return_dict: bool=True) -> Union[DecoderOutput, torch.Tensor]: batch_size = z.shape[0] // num_frames image_only_indicator = torch.zeros(batch_size, num_frames, dtype=z.dtype, device=z.device) decoded = self.decoder(z, num_frames=num_frames, image_only_indicator=image_only_indicator) if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def forward(self, sample: torch.Tensor, sample_posterior: bool=False, return_dict: bool=True, generator: Optional[torch.Generator]=None, num_frames: int=1) -> Union[DecoderOutput, torch.Tensor]: x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z, num_frames=num_frames).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec) # File: diffusers-main/src/diffusers/models/autoencoders/autoencoder_oobleck.py import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.nn as nn from torch.nn.utils import weight_norm from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ...utils.accelerate_utils import apply_forward_hook from ...utils.torch_utils import randn_tensor from ..modeling_utils import ModelMixin class Snake1d(nn.Module): def __init__(self, hidden_dim, logscale=True): super().__init__() self.alpha = nn.Parameter(torch.zeros(1, hidden_dim, 1)) self.beta = nn.Parameter(torch.zeros(1, hidden_dim, 1)) self.alpha.requires_grad = True self.beta.requires_grad = True self.logscale = logscale def forward(self, hidden_states): shape = hidden_states.shape alpha = self.alpha if not self.logscale else torch.exp(self.alpha) beta = self.beta if not self.logscale else torch.exp(self.beta) hidden_states = hidden_states.reshape(shape[0], shape[1], -1) hidden_states = hidden_states + (beta + 1e-09).reciprocal() * torch.sin(alpha * hidden_states).pow(2) hidden_states = hidden_states.reshape(shape) return hidden_states class OobleckResidualUnit(nn.Module): def __init__(self, dimension: int=16, dilation: int=1): super().__init__() pad = (7 - 1) * dilation // 2 self.snake1 = Snake1d(dimension) self.conv1 = weight_norm(nn.Conv1d(dimension, dimension, kernel_size=7, dilation=dilation, padding=pad)) self.snake2 = Snake1d(dimension) self.conv2 = weight_norm(nn.Conv1d(dimension, dimension, kernel_size=1)) def forward(self, hidden_state): output_tensor = hidden_state output_tensor = self.conv1(self.snake1(output_tensor)) output_tensor = self.conv2(self.snake2(output_tensor)) padding = (hidden_state.shape[-1] - output_tensor.shape[-1]) // 2 if padding > 0: hidden_state = hidden_state[..., padding:-padding] output_tensor = hidden_state + output_tensor return output_tensor class OobleckEncoderBlock(nn.Module): def __init__(self, input_dim, output_dim, stride: int=1): super().__init__() self.res_unit1 = OobleckResidualUnit(input_dim, dilation=1) self.res_unit2 = OobleckResidualUnit(input_dim, dilation=3) self.res_unit3 = OobleckResidualUnit(input_dim, dilation=9) self.snake1 = Snake1d(input_dim) self.conv1 = weight_norm(nn.Conv1d(input_dim, output_dim, kernel_size=2 * stride, stride=stride, padding=math.ceil(stride / 2))) def forward(self, hidden_state): hidden_state = self.res_unit1(hidden_state) hidden_state = self.res_unit2(hidden_state) hidden_state = self.snake1(self.res_unit3(hidden_state)) hidden_state = self.conv1(hidden_state) return hidden_state class OobleckDecoderBlock(nn.Module): def __init__(self, input_dim, output_dim, stride: int=1): super().__init__() self.snake1 = Snake1d(input_dim) self.conv_t1 = weight_norm(nn.ConvTranspose1d(input_dim, output_dim, kernel_size=2 * stride, stride=stride, padding=math.ceil(stride / 2))) self.res_unit1 = OobleckResidualUnit(output_dim, dilation=1) self.res_unit2 = OobleckResidualUnit(output_dim, dilation=3) self.res_unit3 = OobleckResidualUnit(output_dim, dilation=9) def forward(self, hidden_state): hidden_state = self.snake1(hidden_state) hidden_state = self.conv_t1(hidden_state) hidden_state = self.res_unit1(hidden_state) hidden_state = self.res_unit2(hidden_state) hidden_state = self.res_unit3(hidden_state) return hidden_state class OobleckDiagonalGaussianDistribution(object): def __init__(self, parameters: torch.Tensor, deterministic: bool=False): self.parameters = parameters (self.mean, self.scale) = parameters.chunk(2, dim=1) self.std = nn.functional.softplus(self.scale) + 0.0001 self.var = self.std * self.std self.logvar = torch.log(self.var) self.deterministic = deterministic def sample(self, generator: Optional[torch.Generator]=None) -> torch.Tensor: sample = randn_tensor(self.mean.shape, generator=generator, device=self.parameters.device, dtype=self.parameters.dtype) x = self.mean + self.std * sample return x def kl(self, other: 'OobleckDiagonalGaussianDistribution'=None) -> torch.Tensor: if self.deterministic: return torch.Tensor([0.0]) elif other is None: return (self.mean * self.mean + self.var - self.logvar - 1.0).sum(1).mean() else: normalized_diff = torch.pow(self.mean - other.mean, 2) / other.var var_ratio = self.var / other.var logvar_diff = self.logvar - other.logvar kl = normalized_diff + var_ratio + logvar_diff - 1 kl = kl.sum(1).mean() return kl def mode(self) -> torch.Tensor: return self.mean @dataclass class AutoencoderOobleckOutput(BaseOutput): latent_dist: 'OobleckDiagonalGaussianDistribution' @dataclass class OobleckDecoderOutput(BaseOutput): sample: torch.Tensor class OobleckEncoder(nn.Module): def __init__(self, encoder_hidden_size, audio_channels, downsampling_ratios, channel_multiples): super().__init__() strides = downsampling_ratios channel_multiples = [1] + channel_multiples self.conv1 = weight_norm(nn.Conv1d(audio_channels, encoder_hidden_size, kernel_size=7, padding=3)) self.block = [] for (stride_index, stride) in enumerate(strides): self.block += [OobleckEncoderBlock(input_dim=encoder_hidden_size * channel_multiples[stride_index], output_dim=encoder_hidden_size * channel_multiples[stride_index + 1], stride=stride)] self.block = nn.ModuleList(self.block) d_model = encoder_hidden_size * channel_multiples[-1] self.snake1 = Snake1d(d_model) self.conv2 = weight_norm(nn.Conv1d(d_model, encoder_hidden_size, kernel_size=3, padding=1)) def forward(self, hidden_state): hidden_state = self.conv1(hidden_state) for module in self.block: hidden_state = module(hidden_state) hidden_state = self.snake1(hidden_state) hidden_state = self.conv2(hidden_state) return hidden_state class OobleckDecoder(nn.Module): def __init__(self, channels, input_channels, audio_channels, upsampling_ratios, channel_multiples): super().__init__() strides = upsampling_ratios channel_multiples = [1] + channel_multiples self.conv1 = weight_norm(nn.Conv1d(input_channels, channels * channel_multiples[-1], kernel_size=7, padding=3)) block = [] for (stride_index, stride) in enumerate(strides): block += [OobleckDecoderBlock(input_dim=channels * channel_multiples[len(strides) - stride_index], output_dim=channels * channel_multiples[len(strides) - stride_index - 1], stride=stride)] self.block = nn.ModuleList(block) output_dim = channels self.snake1 = Snake1d(output_dim) self.conv2 = weight_norm(nn.Conv1d(channels, audio_channels, kernel_size=7, padding=3, bias=False)) def forward(self, hidden_state): hidden_state = self.conv1(hidden_state) for layer in self.block: hidden_state = layer(hidden_state) hidden_state = self.snake1(hidden_state) hidden_state = self.conv2(hidden_state) return hidden_state class AutoencoderOobleck(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = False @register_to_config def __init__(self, encoder_hidden_size=128, downsampling_ratios=[2, 4, 4, 8, 8], channel_multiples=[1, 2, 4, 8, 16], decoder_channels=128, decoder_input_channels=64, audio_channels=2, sampling_rate=44100): super().__init__() self.encoder_hidden_size = encoder_hidden_size self.downsampling_ratios = downsampling_ratios self.decoder_channels = decoder_channels self.upsampling_ratios = downsampling_ratios[::-1] self.hop_length = int(np.prod(downsampling_ratios)) self.sampling_rate = sampling_rate self.encoder = OobleckEncoder(encoder_hidden_size=encoder_hidden_size, audio_channels=audio_channels, downsampling_ratios=downsampling_ratios, channel_multiples=channel_multiples) self.decoder = OobleckDecoder(channels=decoder_channels, input_channels=decoder_input_channels, audio_channels=audio_channels, upsampling_ratios=self.upsampling_ratios, channel_multiples=channel_multiples) self.use_slicing = False def enable_slicing(self): self.use_slicing = True def disable_slicing(self): self.use_slicing = False @apply_forward_hook def encode(self, x: torch.Tensor, return_dict: bool=True) -> Union[AutoencoderOobleckOutput, Tuple[OobleckDiagonalGaussianDistribution]]: if self.use_slicing and x.shape[0] > 1: encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self.encoder(x) posterior = OobleckDiagonalGaussianDistribution(h) if not return_dict: return (posterior,) return AutoencoderOobleckOutput(latent_dist=posterior) def _decode(self, z: torch.Tensor, return_dict: bool=True) -> Union[OobleckDecoderOutput, torch.Tensor]: dec = self.decoder(z) if not return_dict: return (dec,) return OobleckDecoderOutput(sample=dec) @apply_forward_hook def decode(self, z: torch.FloatTensor, return_dict: bool=True, generator=None) -> Union[OobleckDecoderOutput, torch.FloatTensor]: if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample if not return_dict: return (decoded,) return OobleckDecoderOutput(sample=decoded) def forward(self, sample: torch.Tensor, sample_posterior: bool=False, return_dict: bool=True, generator: Optional[torch.Generator]=None) -> Union[OobleckDecoderOutput, torch.Tensor]: x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z).sample if not return_dict: return (dec,) return OobleckDecoderOutput(sample=dec) # File: diffusers-main/src/diffusers/models/autoencoders/autoencoder_tiny.py from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ...utils.accelerate_utils import apply_forward_hook from ..modeling_utils import ModelMixin from .vae import DecoderOutput, DecoderTiny, EncoderTiny @dataclass class AutoencoderTinyOutput(BaseOutput): latents: torch.Tensor class AutoencoderTiny(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, in_channels: int=3, out_channels: int=3, encoder_block_out_channels: Tuple[int, ...]=(64, 64, 64, 64), decoder_block_out_channels: Tuple[int, ...]=(64, 64, 64, 64), act_fn: str='relu', upsample_fn: str='nearest', latent_channels: int=4, upsampling_scaling_factor: int=2, num_encoder_blocks: Tuple[int, ...]=(1, 3, 3, 3), num_decoder_blocks: Tuple[int, ...]=(3, 3, 3, 1), latent_magnitude: int=3, latent_shift: float=0.5, force_upcast: bool=False, scaling_factor: float=1.0, shift_factor: float=0.0): super().__init__() if len(encoder_block_out_channels) != len(num_encoder_blocks): raise ValueError('`encoder_block_out_channels` should have the same length as `num_encoder_blocks`.') if len(decoder_block_out_channels) != len(num_decoder_blocks): raise ValueError('`decoder_block_out_channels` should have the same length as `num_decoder_blocks`.') self.encoder = EncoderTiny(in_channels=in_channels, out_channels=latent_channels, num_blocks=num_encoder_blocks, block_out_channels=encoder_block_out_channels, act_fn=act_fn) self.decoder = DecoderTiny(in_channels=latent_channels, out_channels=out_channels, num_blocks=num_decoder_blocks, block_out_channels=decoder_block_out_channels, upsampling_scaling_factor=upsampling_scaling_factor, act_fn=act_fn, upsample_fn=upsample_fn) self.latent_magnitude = latent_magnitude self.latent_shift = latent_shift self.scaling_factor = scaling_factor self.use_slicing = False self.use_tiling = False self.spatial_scale_factor = 2 ** out_channels self.tile_overlap_factor = 0.125 self.tile_sample_min_size = 512 self.tile_latent_min_size = self.tile_sample_min_size // self.spatial_scale_factor self.register_to_config(block_out_channels=decoder_block_out_channels) self.register_to_config(force_upcast=False) def _set_gradient_checkpointing(self, module, value: bool=False) -> None: if isinstance(module, (EncoderTiny, DecoderTiny)): module.gradient_checkpointing = value def scale_latents(self, x: torch.Tensor) -> torch.Tensor: return x.div(2 * self.latent_magnitude).add(self.latent_shift).clamp(0, 1) def unscale_latents(self, x: torch.Tensor) -> torch.Tensor: return x.sub(self.latent_shift).mul(2 * self.latent_magnitude) def enable_slicing(self) -> None: self.use_slicing = True def disable_slicing(self) -> None: self.use_slicing = False def enable_tiling(self, use_tiling: bool=True) -> None: self.use_tiling = use_tiling def disable_tiling(self) -> None: self.enable_tiling(False) def _tiled_encode(self, x: torch.Tensor) -> torch.Tensor: sf = self.spatial_scale_factor tile_size = self.tile_sample_min_size blend_size = int(tile_size * self.tile_overlap_factor) traverse_size = tile_size - blend_size ti = range(0, x.shape[-2], traverse_size) tj = range(0, x.shape[-1], traverse_size) blend_masks = torch.stack(torch.meshgrid([torch.arange(tile_size / sf) / (blend_size / sf - 1)] * 2, indexing='ij')) blend_masks = blend_masks.clamp(0, 1).to(x.device) out = torch.zeros(x.shape[0], 4, x.shape[-2] // sf, x.shape[-1] // sf, device=x.device) for i in ti: for j in tj: tile_in = x[..., i:i + tile_size, j:j + tile_size] tile_out = out[..., i // sf:(i + tile_size) // sf, j // sf:(j + tile_size) // sf] tile = self.encoder(tile_in) (h, w) = (tile.shape[-2], tile.shape[-1]) blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0] blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1] blend_mask = blend_mask_i * blend_mask_j (tile, blend_mask) = (tile[..., :h, :w], blend_mask[..., :h, :w]) tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out) return out def _tiled_decode(self, x: torch.Tensor) -> torch.Tensor: sf = self.spatial_scale_factor tile_size = self.tile_latent_min_size blend_size = int(tile_size * self.tile_overlap_factor) traverse_size = tile_size - blend_size ti = range(0, x.shape[-2], traverse_size) tj = range(0, x.shape[-1], traverse_size) blend_masks = torch.stack(torch.meshgrid([torch.arange(tile_size * sf) / (blend_size * sf - 1)] * 2, indexing='ij')) blend_masks = blend_masks.clamp(0, 1).to(x.device) out = torch.zeros(x.shape[0], 3, x.shape[-2] * sf, x.shape[-1] * sf, device=x.device) for i in ti: for j in tj: tile_in = x[..., i:i + tile_size, j:j + tile_size] tile_out = out[..., i * sf:(i + tile_size) * sf, j * sf:(j + tile_size) * sf] tile = self.decoder(tile_in) (h, w) = (tile.shape[-2], tile.shape[-1]) blend_mask_i = torch.ones_like(blend_masks[0]) if i == 0 else blend_masks[0] blend_mask_j = torch.ones_like(blend_masks[1]) if j == 0 else blend_masks[1] blend_mask = (blend_mask_i * blend_mask_j)[..., :h, :w] tile_out.copy_(blend_mask * tile + (1 - blend_mask) * tile_out) return out @apply_forward_hook def encode(self, x: torch.Tensor, return_dict: bool=True) -> Union[AutoencoderTinyOutput, Tuple[torch.Tensor]]: if self.use_slicing and x.shape[0] > 1: output = [self._tiled_encode(x_slice) if self.use_tiling else self.encoder(x_slice) for x_slice in x.split(1)] output = torch.cat(output) else: output = self._tiled_encode(x) if self.use_tiling else self.encoder(x) if not return_dict: return (output,) return AutoencoderTinyOutput(latents=output) @apply_forward_hook def decode(self, x: torch.Tensor, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[DecoderOutput, Tuple[torch.Tensor]]: if self.use_slicing and x.shape[0] > 1: output = [self._tiled_decode(x_slice) if self.use_tiling else self.decoder(x) for x_slice in x.split(1)] output = torch.cat(output) else: output = self._tiled_decode(x) if self.use_tiling else self.decoder(x) if not return_dict: return (output,) return DecoderOutput(sample=output) def forward(self, sample: torch.Tensor, return_dict: bool=True) -> Union[DecoderOutput, Tuple[torch.Tensor]]: enc = self.encode(sample).latents scaled_enc = self.scale_latents(enc).mul_(255).round_().byte() unscaled_enc = self.unscale_latents(scaled_enc / 255.0) dec = self.decode(unscaled_enc) if not return_dict: return (dec,) return DecoderOutput(sample=dec) # File: diffusers-main/src/diffusers/models/autoencoders/consistency_decoder_vae.py from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...schedulers import ConsistencyDecoderScheduler from ...utils import BaseOutput from ...utils.accelerate_utils import apply_forward_hook from ...utils.torch_utils import randn_tensor from ..attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor from ..modeling_utils import ModelMixin from ..unets.unet_2d import UNet2DModel from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class ConsistencyDecoderVAEOutput(BaseOutput): latent_dist: 'DiagonalGaussianDistribution' class ConsistencyDecoderVAE(ModelMixin, ConfigMixin): @register_to_config def __init__(self, scaling_factor: float=0.18215, latent_channels: int=4, sample_size: int=32, encoder_act_fn: str='silu', encoder_block_out_channels: Tuple[int, ...]=(128, 256, 512, 512), encoder_double_z: bool=True, encoder_down_block_types: Tuple[str, ...]=('DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'), encoder_in_channels: int=3, encoder_layers_per_block: int=2, encoder_norm_num_groups: int=32, encoder_out_channels: int=4, decoder_add_attention: bool=False, decoder_block_out_channels: Tuple[int, ...]=(320, 640, 1024, 1024), decoder_down_block_types: Tuple[str, ...]=('ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D'), decoder_downsample_padding: int=1, decoder_in_channels: int=7, decoder_layers_per_block: int=3, decoder_norm_eps: float=1e-05, decoder_norm_num_groups: int=32, decoder_num_train_timesteps: int=1024, decoder_out_channels: int=6, decoder_resnet_time_scale_shift: str='scale_shift', decoder_time_embedding_type: str='learned', decoder_up_block_types: Tuple[str, ...]=('ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D')): super().__init__() self.encoder = Encoder(act_fn=encoder_act_fn, block_out_channels=encoder_block_out_channels, double_z=encoder_double_z, down_block_types=encoder_down_block_types, in_channels=encoder_in_channels, layers_per_block=encoder_layers_per_block, norm_num_groups=encoder_norm_num_groups, out_channels=encoder_out_channels) self.decoder_unet = UNet2DModel(add_attention=decoder_add_attention, block_out_channels=decoder_block_out_channels, down_block_types=decoder_down_block_types, downsample_padding=decoder_downsample_padding, in_channels=decoder_in_channels, layers_per_block=decoder_layers_per_block, norm_eps=decoder_norm_eps, norm_num_groups=decoder_norm_num_groups, num_train_timesteps=decoder_num_train_timesteps, out_channels=decoder_out_channels, resnet_time_scale_shift=decoder_resnet_time_scale_shift, time_embedding_type=decoder_time_embedding_type, up_block_types=decoder_up_block_types) self.decoder_scheduler = ConsistencyDecoderScheduler() self.register_to_config(block_out_channels=encoder_block_out_channels) self.register_to_config(force_upcast=False) self.register_buffer('means', torch.tensor([0.38862467, 0.02253063, 0.07381133, -0.0171294])[None, :, None, None], persistent=False) self.register_buffer('stds', torch.tensor([0.9654121, 1.0440036, 0.76147926, 0.77022034])[None, :, None, None], persistent=False) self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) self.use_slicing = False self.use_tiling = False self.tile_sample_min_size = self.config.sample_size sample_size = self.config.sample_size[0] if isinstance(self.config.sample_size, (list, tuple)) else self.config.sample_size self.tile_latent_min_size = int(sample_size / 2 ** (len(self.config.block_out_channels) - 1)) self.tile_overlap_factor = 0.25 def enable_tiling(self, use_tiling: bool=True): self.use_tiling = use_tiling def disable_tiling(self): self.enable_tiling(False) def enable_slicing(self): self.use_slicing = True def disable_slicing(self): self.use_slicing = False @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) @apply_forward_hook def encode(self, x: torch.Tensor, return_dict: bool=True) -> Union[ConsistencyDecoderVAEOutput, Tuple[DiagonalGaussianDistribution]]: if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(x, return_dict=return_dict) if self.use_slicing and x.shape[0] > 1: encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return ConsistencyDecoderVAEOutput(latent_dist=posterior) @apply_forward_hook def decode(self, z: torch.Tensor, generator: Optional[torch.Generator]=None, return_dict: bool=True, num_inference_steps: int=2) -> Union[DecoderOutput, Tuple[torch.Tensor]]: z = (z * self.config.scaling_factor - self.means) / self.stds scale_factor = 2 ** (len(self.config.block_out_channels) - 1) z = F.interpolate(z, mode='nearest', scale_factor=scale_factor) (batch_size, _, height, width) = z.shape self.decoder_scheduler.set_timesteps(num_inference_steps, device=self.device) x_t = self.decoder_scheduler.init_noise_sigma * randn_tensor((batch_size, 3, height, width), generator=generator, dtype=z.dtype, device=z.device) for t in self.decoder_scheduler.timesteps: model_input = torch.concat([self.decoder_scheduler.scale_model_input(x_t, t), z], dim=1) model_output = self.decoder_unet(model_input, t).sample[:, :3, :, :] prev_sample = self.decoder_scheduler.step(model_output, t, x_t, generator).prev_sample x_t = prev_sample x_0 = x_t if not return_dict: return (x_0,) return DecoderOutput(sample=x_0) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[2], b.shape[2], blend_extent) for y in range(blend_extent): b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[3], b.shape[3], blend_extent) for x in range(blend_extent): b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def tiled_encode(self, x: torch.Tensor, return_dict: bool=True) -> Union[ConsistencyDecoderVAEOutput, Tuple]: overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) row_limit = self.tile_latent_min_size - blend_extent rows = [] for i in range(0, x.shape[2], overlap_size): row = [] for j in range(0, x.shape[3], overlap_size): tile = x[:, :, i:i + self.tile_sample_min_size, j:j + self.tile_sample_min_size] tile = self.encoder(tile) tile = self.quant_conv(tile) row.append(tile) rows.append(row) result_rows = [] for (i, row) in enumerate(rows): result_row = [] for (j, tile) in enumerate(row): if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) moments = torch.cat(result_rows, dim=2) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return ConsistencyDecoderVAEOutput(latent_dist=posterior) def forward(self, sample: torch.Tensor, sample_posterior: bool=False, return_dict: bool=True, generator: Optional[torch.Generator]=None) -> Union[DecoderOutput, Tuple[torch.Tensor]]: x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z, generator=generator).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec) # File: diffusers-main/src/diffusers/models/autoencoders/vae.py from dataclasses import dataclass from typing import Optional, Tuple import numpy as np import torch import torch.nn as nn from ...utils import BaseOutput, is_torch_version from ...utils.torch_utils import randn_tensor from ..activations import get_activation from ..attention_processor import SpatialNorm from ..unets.unet_2d_blocks import AutoencoderTinyBlock, UNetMidBlock2D, get_down_block, get_up_block @dataclass class DecoderOutput(BaseOutput): sample: torch.Tensor commit_loss: Optional[torch.FloatTensor] = None class Encoder(nn.Module): def __init__(self, in_channels: int=3, out_channels: int=3, down_block_types: Tuple[str, ...]=('DownEncoderBlock2D',), block_out_channels: Tuple[int, ...]=(64,), layers_per_block: int=2, norm_num_groups: int=32, act_fn: str='silu', double_z: bool=True, mid_block_add_attention=True): super().__init__() self.layers_per_block = layers_per_block self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, stride=1, padding=1) self.down_blocks = nn.ModuleList([]) output_channel = block_out_channels[0] for (i, down_block_type) in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block(down_block_type, num_layers=self.layers_per_block, in_channels=input_channel, out_channels=output_channel, add_downsample=not is_final_block, resnet_eps=1e-06, downsample_padding=0, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, attention_head_dim=output_channel, temb_channels=None) self.down_blocks.append(down_block) self.mid_block = UNetMidBlock2D(in_channels=block_out_channels[-1], resnet_eps=1e-06, resnet_act_fn=act_fn, output_scale_factor=1, resnet_time_scale_shift='default', attention_head_dim=block_out_channels[-1], resnet_groups=norm_num_groups, temb_channels=None, add_attention=mid_block_add_attention) self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-06) self.conv_act = nn.SiLU() conv_out_channels = 2 * out_channels if double_z else out_channels self.conv_out = nn.Conv2d(block_out_channels[-1], conv_out_channels, 3, padding=1) self.gradient_checkpointing = False def forward(self, sample: torch.Tensor) -> torch.Tensor: sample = self.conv_in(sample) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): for down_block in self.down_blocks: sample = torch.utils.checkpoint.checkpoint(create_custom_forward(down_block), sample, use_reentrant=False) sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample, use_reentrant=False) else: for down_block in self.down_blocks: sample = torch.utils.checkpoint.checkpoint(create_custom_forward(down_block), sample) sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample) else: for down_block in self.down_blocks: sample = down_block(sample) sample = self.mid_block(sample) sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) return sample class Decoder(nn.Module): def __init__(self, in_channels: int=3, out_channels: int=3, up_block_types: Tuple[str, ...]=('UpDecoderBlock2D',), block_out_channels: Tuple[int, ...]=(64,), layers_per_block: int=2, norm_num_groups: int=32, act_fn: str='silu', norm_type: str='group', mid_block_add_attention=True): super().__init__() self.layers_per_block = layers_per_block self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1) self.up_blocks = nn.ModuleList([]) temb_channels = in_channels if norm_type == 'spatial' else None self.mid_block = UNetMidBlock2D(in_channels=block_out_channels[-1], resnet_eps=1e-06, resnet_act_fn=act_fn, output_scale_factor=1, resnet_time_scale_shift='default' if norm_type == 'group' else norm_type, attention_head_dim=block_out_channels[-1], resnet_groups=norm_num_groups, temb_channels=temb_channels, add_attention=mid_block_add_attention) reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] for (i, up_block_type) in enumerate(up_block_types): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 up_block = get_up_block(up_block_type, num_layers=self.layers_per_block + 1, in_channels=prev_output_channel, out_channels=output_channel, prev_output_channel=None, add_upsample=not is_final_block, resnet_eps=1e-06, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, attention_head_dim=output_channel, temb_channels=temb_channels, resnet_time_scale_shift=norm_type) self.up_blocks.append(up_block) prev_output_channel = output_channel if norm_type == 'spatial': self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels) else: self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-06) self.conv_act = nn.SiLU() self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1) self.gradient_checkpointing = False def forward(self, sample: torch.Tensor, latent_embeds: Optional[torch.Tensor]=None) -> torch.Tensor: sample = self.conv_in(sample) upscale_dtype = next(iter(self.up_blocks.parameters())).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample, latent_embeds, use_reentrant=False) sample = sample.to(upscale_dtype) for up_block in self.up_blocks: sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds, use_reentrant=False) else: sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample, latent_embeds) sample = sample.to(upscale_dtype) for up_block in self.up_blocks: sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds) else: sample = self.mid_block(sample, latent_embeds) sample = sample.to(upscale_dtype) for up_block in self.up_blocks: sample = up_block(sample, latent_embeds) if latent_embeds is None: sample = self.conv_norm_out(sample) else: sample = self.conv_norm_out(sample, latent_embeds) sample = self.conv_act(sample) sample = self.conv_out(sample) return sample class UpSample(nn.Module): def __init__(self, in_channels: int, out_channels: int) -> None: super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=4, stride=2, padding=1) def forward(self, x: torch.Tensor) -> torch.Tensor: x = torch.relu(x) x = self.deconv(x) return x class MaskConditionEncoder(nn.Module): def __init__(self, in_ch: int, out_ch: int=192, res_ch: int=768, stride: int=16) -> None: super().__init__() channels = [] while stride > 1: stride = stride // 2 in_ch_ = out_ch * 2 if out_ch > res_ch: out_ch = res_ch if stride == 1: in_ch_ = res_ch channels.append((in_ch_, out_ch)) out_ch *= 2 out_channels = [] for (_in_ch, _out_ch) in channels: out_channels.append(_out_ch) out_channels.append(channels[-1][0]) layers = [] in_ch_ = in_ch for l in range(len(out_channels)): out_ch_ = out_channels[l] if l == 0 or l == 1: layers.append(nn.Conv2d(in_ch_, out_ch_, kernel_size=3, stride=1, padding=1)) else: layers.append(nn.Conv2d(in_ch_, out_ch_, kernel_size=4, stride=2, padding=1)) in_ch_ = out_ch_ self.layers = nn.Sequential(*layers) def forward(self, x: torch.Tensor, mask=None) -> torch.Tensor: out = {} for l in range(len(self.layers)): layer = self.layers[l] x = layer(x) out[str(tuple(x.shape))] = x x = torch.relu(x) return out class MaskConditionDecoder(nn.Module): def __init__(self, in_channels: int=3, out_channels: int=3, up_block_types: Tuple[str, ...]=('UpDecoderBlock2D',), block_out_channels: Tuple[int, ...]=(64,), layers_per_block: int=2, norm_num_groups: int=32, act_fn: str='silu', norm_type: str='group'): super().__init__() self.layers_per_block = layers_per_block self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1) self.up_blocks = nn.ModuleList([]) temb_channels = in_channels if norm_type == 'spatial' else None self.mid_block = UNetMidBlock2D(in_channels=block_out_channels[-1], resnet_eps=1e-06, resnet_act_fn=act_fn, output_scale_factor=1, resnet_time_scale_shift='default' if norm_type == 'group' else norm_type, attention_head_dim=block_out_channels[-1], resnet_groups=norm_num_groups, temb_channels=temb_channels) reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] for (i, up_block_type) in enumerate(up_block_types): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 up_block = get_up_block(up_block_type, num_layers=self.layers_per_block + 1, in_channels=prev_output_channel, out_channels=output_channel, prev_output_channel=None, add_upsample=not is_final_block, resnet_eps=1e-06, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, attention_head_dim=output_channel, temb_channels=temb_channels, resnet_time_scale_shift=norm_type) self.up_blocks.append(up_block) prev_output_channel = output_channel self.condition_encoder = MaskConditionEncoder(in_ch=out_channels, out_ch=block_out_channels[0], res_ch=block_out_channels[-1]) if norm_type == 'spatial': self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels) else: self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-06) self.conv_act = nn.SiLU() self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1) self.gradient_checkpointing = False def forward(self, z: torch.Tensor, image: Optional[torch.Tensor]=None, mask: Optional[torch.Tensor]=None, latent_embeds: Optional[torch.Tensor]=None) -> torch.Tensor: sample = z sample = self.conv_in(sample) upscale_dtype = next(iter(self.up_blocks.parameters())).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample, latent_embeds, use_reentrant=False) sample = sample.to(upscale_dtype) if image is not None and mask is not None: masked_image = (1 - mask) * image im_x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.condition_encoder), masked_image, mask, use_reentrant=False) for up_block in self.up_blocks: if image is not None and mask is not None: sample_ = im_x[str(tuple(sample.shape))] mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode='nearest') sample = sample * mask_ + sample_ * (1 - mask_) sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds, use_reentrant=False) if image is not None and mask is not None: sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask) else: sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample, latent_embeds) sample = sample.to(upscale_dtype) if image is not None and mask is not None: masked_image = (1 - mask) * image im_x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.condition_encoder), masked_image, mask) for up_block in self.up_blocks: if image is not None and mask is not None: sample_ = im_x[str(tuple(sample.shape))] mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode='nearest') sample = sample * mask_ + sample_ * (1 - mask_) sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds) if image is not None and mask is not None: sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask) else: sample = self.mid_block(sample, latent_embeds) sample = sample.to(upscale_dtype) if image is not None and mask is not None: masked_image = (1 - mask) * image im_x = self.condition_encoder(masked_image, mask) for up_block in self.up_blocks: if image is not None and mask is not None: sample_ = im_x[str(tuple(sample.shape))] mask_ = nn.functional.interpolate(mask, size=sample.shape[-2:], mode='nearest') sample = sample * mask_ + sample_ * (1 - mask_) sample = up_block(sample, latent_embeds) if image is not None and mask is not None: sample = sample * mask + im_x[str(tuple(sample.shape))] * (1 - mask) if latent_embeds is None: sample = self.conv_norm_out(sample) else: sample = self.conv_norm_out(sample, latent_embeds) sample = self.conv_act(sample) sample = self.conv_out(sample) return sample class VectorQuantizer(nn.Module): def __init__(self, n_e: int, vq_embed_dim: int, beta: float, remap=None, unknown_index: str='random', sane_index_shape: bool=False, legacy: bool=True): super().__init__() self.n_e = n_e self.vq_embed_dim = vq_embed_dim self.beta = beta self.legacy = legacy self.embedding = nn.Embedding(self.n_e, self.vq_embed_dim) self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) self.remap = remap if self.remap is not None: self.register_buffer('used', torch.tensor(np.load(self.remap))) self.used: torch.Tensor self.re_embed = self.used.shape[0] self.unknown_index = unknown_index if self.unknown_index == 'extra': self.unknown_index = self.re_embed self.re_embed = self.re_embed + 1 print(f'Remapping {self.n_e} indices to {self.re_embed} indices. Using {self.unknown_index} for unknown indices.') else: self.re_embed = n_e self.sane_index_shape = sane_index_shape def remap_to_used(self, inds: torch.LongTensor) -> torch.LongTensor: ishape = inds.shape assert len(ishape) > 1 inds = inds.reshape(ishape[0], -1) used = self.used.to(inds) match = (inds[:, :, None] == used[None, None, ...]).long() new = match.argmax(-1) unknown = match.sum(2) < 1 if self.unknown_index == 'random': new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device) else: new[unknown] = self.unknown_index return new.reshape(ishape) def unmap_to_all(self, inds: torch.LongTensor) -> torch.LongTensor: ishape = inds.shape assert len(ishape) > 1 inds = inds.reshape(ishape[0], -1) used = self.used.to(inds) if self.re_embed > self.used.shape[0]: inds[inds >= self.used.shape[0]] = 0 back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds) return back.reshape(ishape) def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, Tuple]: z = z.permute(0, 2, 3, 1).contiguous() z_flattened = z.view(-1, self.vq_embed_dim) min_encoding_indices = torch.argmin(torch.cdist(z_flattened, self.embedding.weight), dim=1) z_q = self.embedding(min_encoding_indices).view(z.shape) perplexity = None min_encodings = None if not self.legacy: loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2) else: loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2) z_q: torch.Tensor = z + (z_q - z).detach() z_q = z_q.permute(0, 3, 1, 2).contiguous() if self.remap is not None: min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) min_encoding_indices = self.remap_to_used(min_encoding_indices) min_encoding_indices = min_encoding_indices.reshape(-1, 1) if self.sane_index_shape: min_encoding_indices = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3]) return (z_q, loss, (perplexity, min_encodings, min_encoding_indices)) def get_codebook_entry(self, indices: torch.LongTensor, shape: Tuple[int, ...]) -> torch.Tensor: if self.remap is not None: indices = indices.reshape(shape[0], -1) indices = self.unmap_to_all(indices) indices = indices.reshape(-1) z_q: torch.Tensor = self.embedding(indices) if shape is not None: z_q = z_q.view(shape) z_q = z_q.permute(0, 3, 1, 2).contiguous() return z_q class DiagonalGaussianDistribution(object): def __init__(self, parameters: torch.Tensor, deterministic: bool=False): self.parameters = parameters (self.mean, self.logvar) = torch.chunk(parameters, 2, dim=1) self.logvar = torch.clamp(self.logvar, -30.0, 20.0) self.deterministic = deterministic self.std = torch.exp(0.5 * self.logvar) self.var = torch.exp(self.logvar) if self.deterministic: self.var = self.std = torch.zeros_like(self.mean, device=self.parameters.device, dtype=self.parameters.dtype) def sample(self, generator: Optional[torch.Generator]=None) -> torch.Tensor: sample = randn_tensor(self.mean.shape, generator=generator, device=self.parameters.device, dtype=self.parameters.dtype) x = self.mean + self.std * sample return x def kl(self, other: 'DiagonalGaussianDistribution'=None) -> torch.Tensor: if self.deterministic: return torch.Tensor([0.0]) elif other is None: return 0.5 * torch.sum(torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar, dim=[1, 2, 3]) else: return 0.5 * torch.sum(torch.pow(self.mean - other.mean, 2) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar, dim=[1, 2, 3]) def nll(self, sample: torch.Tensor, dims: Tuple[int, ...]=[1, 2, 3]) -> torch.Tensor: if self.deterministic: return torch.Tensor([0.0]) logtwopi = np.log(2.0 * np.pi) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, dim=dims) def mode(self) -> torch.Tensor: return self.mean class EncoderTiny(nn.Module): def __init__(self, in_channels: int, out_channels: int, num_blocks: Tuple[int, ...], block_out_channels: Tuple[int, ...], act_fn: str): super().__init__() layers = [] for (i, num_block) in enumerate(num_blocks): num_channels = block_out_channels[i] if i == 0: layers.append(nn.Conv2d(in_channels, num_channels, kernel_size=3, padding=1)) else: layers.append(nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1, stride=2, bias=False)) for _ in range(num_block): layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn)) layers.append(nn.Conv2d(block_out_channels[-1], out_channels, kernel_size=3, padding=1)) self.layers = nn.Sequential(*layers) self.gradient_checkpointing = False def forward(self, x: torch.Tensor) -> torch.Tensor: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x, use_reentrant=False) else: x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x) else: x = self.layers(x.add(1).div(2)) return x class DecoderTiny(nn.Module): def __init__(self, in_channels: int, out_channels: int, num_blocks: Tuple[int, ...], block_out_channels: Tuple[int, ...], upsampling_scaling_factor: int, act_fn: str, upsample_fn: str): super().__init__() layers = [nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=1), get_activation(act_fn)] for (i, num_block) in enumerate(num_blocks): is_final_block = i == len(num_blocks) - 1 num_channels = block_out_channels[i] for _ in range(num_block): layers.append(AutoencoderTinyBlock(num_channels, num_channels, act_fn)) if not is_final_block: layers.append(nn.Upsample(scale_factor=upsampling_scaling_factor, mode=upsample_fn)) conv_out_channel = num_channels if not is_final_block else out_channels layers.append(nn.Conv2d(num_channels, conv_out_channel, kernel_size=3, padding=1, bias=is_final_block)) self.layers = nn.Sequential(*layers) self.gradient_checkpointing = False def forward(self, x: torch.Tensor) -> torch.Tensor: x = torch.tanh(x / 3) * 3 if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x, use_reentrant=False) else: x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.layers), x) else: x = self.layers(x) return x.mul(2).sub(1) # File: diffusers-main/src/diffusers/models/autoencoders/vq_model.py from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ...utils.accelerate_utils import apply_forward_hook from ..autoencoders.vae import Decoder, DecoderOutput, Encoder, VectorQuantizer from ..modeling_utils import ModelMixin @dataclass class VQEncoderOutput(BaseOutput): latents: torch.Tensor class VQModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, in_channels: int=3, out_channels: int=3, down_block_types: Tuple[str, ...]=('DownEncoderBlock2D',), up_block_types: Tuple[str, ...]=('UpDecoderBlock2D',), block_out_channels: Tuple[int, ...]=(64,), layers_per_block: int=1, act_fn: str='silu', latent_channels: int=3, sample_size: int=32, num_vq_embeddings: int=256, norm_num_groups: int=32, vq_embed_dim: Optional[int]=None, scaling_factor: float=0.18215, norm_type: str='group', mid_block_add_attention=True, lookup_from_codebook=False, force_upcast=False): super().__init__() self.encoder = Encoder(in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, double_z=False, mid_block_add_attention=mid_block_add_attention) vq_embed_dim = vq_embed_dim if vq_embed_dim is not None else latent_channels self.quant_conv = nn.Conv2d(latent_channels, vq_embed_dim, 1) self.quantize = VectorQuantizer(num_vq_embeddings, vq_embed_dim, beta=0.25, remap=None, sane_index_shape=False) self.post_quant_conv = nn.Conv2d(vq_embed_dim, latent_channels, 1) self.decoder = Decoder(in_channels=latent_channels, out_channels=out_channels, up_block_types=up_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_type=norm_type, mid_block_add_attention=mid_block_add_attention) @apply_forward_hook def encode(self, x: torch.Tensor, return_dict: bool=True) -> VQEncoderOutput: h = self.encoder(x) h = self.quant_conv(h) if not return_dict: return (h,) return VQEncoderOutput(latents=h) @apply_forward_hook def decode(self, h: torch.Tensor, force_not_quantize: bool=False, return_dict: bool=True, shape=None) -> Union[DecoderOutput, torch.Tensor]: if not force_not_quantize: (quant, commit_loss, _) = self.quantize(h) elif self.config.lookup_from_codebook: quant = self.quantize.get_codebook_entry(h, shape) commit_loss = torch.zeros(h.shape[0]).to(h.device, dtype=h.dtype) else: quant = h commit_loss = torch.zeros(h.shape[0]).to(h.device, dtype=h.dtype) quant2 = self.post_quant_conv(quant) dec = self.decoder(quant2, quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec, commit_loss) return DecoderOutput(sample=dec, commit_loss=commit_loss) def forward(self, sample: torch.Tensor, return_dict: bool=True) -> Union[DecoderOutput, Tuple[torch.Tensor, ...]]: h = self.encode(sample).latents dec = self.decode(h) if not return_dict: return (dec.sample, dec.commit_loss) return dec # File: diffusers-main/src/diffusers/models/controlnet.py from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import functional as F from ..configuration_utils import ConfigMixin, register_to_config from ..loaders.single_file_model import FromOriginalModelMixin from ..utils import BaseOutput, logging from .attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor from .embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unets.unet_2d_blocks import CrossAttnDownBlock2D, DownBlock2D, UNetMidBlock2D, UNetMidBlock2DCrossAttn, get_down_block from .unets.unet_2d_condition import UNet2DConditionModel logger = logging.get_logger(__name__) @dataclass class ControlNetOutput(BaseOutput): down_block_res_samples: Tuple[torch.Tensor] mid_block_res_sample: torch.Tensor class ControlNetConditioningEmbedding(nn.Module): def __init__(self, conditioning_embedding_channels: int, conditioning_channels: int=3, block_out_channels: Tuple[int, ...]=(16, 32, 96, 256)): super().__init__() self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) self.blocks = nn.ModuleList([]) for i in range(len(block_out_channels) - 1): channel_in = block_out_channels[i] channel_out = block_out_channels[i + 1] self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1)) self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2)) self.conv_out = zero_module(nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)) def forward(self, conditioning): embedding = self.conv_in(conditioning) embedding = F.silu(embedding) for block in self.blocks: embedding = block(embedding) embedding = F.silu(embedding) embedding = self.conv_out(embedding) return embedding class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, in_channels: int=4, conditioning_channels: int=3, flip_sin_to_cos: bool=True, freq_shift: int=0, down_block_types: Tuple[str, ...]=('CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'DownBlock2D'), mid_block_type: Optional[str]='UNetMidBlock2DCrossAttn', only_cross_attention: Union[bool, Tuple[bool]]=False, block_out_channels: Tuple[int, ...]=(320, 640, 1280, 1280), layers_per_block: int=2, downsample_padding: int=1, mid_block_scale_factor: float=1, act_fn: str='silu', norm_num_groups: Optional[int]=32, norm_eps: float=1e-05, cross_attention_dim: int=1280, transformer_layers_per_block: Union[int, Tuple[int, ...]]=1, encoder_hid_dim: Optional[int]=None, encoder_hid_dim_type: Optional[str]=None, attention_head_dim: Union[int, Tuple[int, ...]]=8, num_attention_heads: Optional[Union[int, Tuple[int, ...]]]=None, use_linear_projection: bool=False, class_embed_type: Optional[str]=None, addition_embed_type: Optional[str]=None, addition_time_embed_dim: Optional[int]=None, num_class_embeds: Optional[int]=None, upcast_attention: bool=False, resnet_time_scale_shift: str='default', projection_class_embeddings_input_dim: Optional[int]=None, controlnet_conditioning_channel_order: str='rgb', conditioning_embedding_out_channels: Optional[Tuple[int, ...]]=(16, 32, 96, 256), global_pool_conditions: bool=False, addition_embed_type_num_heads: int=64): super().__init__() num_attention_heads = num_attention_heads or attention_head_dim if len(block_out_channels) != len(down_block_types): raise ValueError(f'Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.') if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError(f'Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.') if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError(f'Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}.') if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) conv_in_kernel = 3 conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding) time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = 'text_proj' self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError(f'`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}.') if encoder_hid_dim_type == 'text_proj': self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == 'text_image_proj': self.encoder_hid_proj = TextImageProjection(text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim) elif encoder_hid_dim_type is not None: raise ValueError(f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'.") else: self.encoder_hid_proj = None if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == 'timestep': self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) elif class_embed_type == 'identity': self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == 'projection': if projection_class_embeddings_input_dim is None: raise ValueError("`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set") self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == 'text': if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding(text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads) elif addition_embed_type == 'text_image': self.add_embedding = TextImageTimeEmbedding(text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == 'text_time': self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") self.controlnet_cond_embedding = ControlNetConditioningEmbedding(conditioning_embedding_channels=block_out_channels[0], block_out_channels=conditioning_embedding_out_channels, conditioning_channels=conditioning_channels) self.down_blocks = nn.ModuleList([]) self.controlnet_down_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): only_cross_attention = [only_cross_attention] * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) output_channel = block_out_channels[0] controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) for (i, down_block_type) in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block(down_block_type, num_layers=layers_per_block, transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[i], attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, downsample_padding=downsample_padding, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift) self.down_blocks.append(down_block) for _ in range(layers_per_block): controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) if not is_final_block: controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) mid_block_channel = block_out_channels[-1] controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_mid_block = controlnet_block if mid_block_type == 'UNetMidBlock2DCrossAttn': self.mid_block = UNetMidBlock2DCrossAttn(transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=mid_block_channel, temb_channels=time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention) elif mid_block_type == 'UNetMidBlock2D': self.mid_block = UNetMidBlock2D(in_channels=block_out_channels[-1], temb_channels=time_embed_dim, num_layers=0, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, add_attention=False) else: raise ValueError(f'unknown mid_block_type : {mid_block_type}') @classmethod def from_unet(cls, unet: UNet2DConditionModel, controlnet_conditioning_channel_order: str='rgb', conditioning_embedding_out_channels: Optional[Tuple[int, ...]]=(16, 32, 96, 256), load_weights_from_unet: bool=True, conditioning_channels: int=3): transformer_layers_per_block = unet.config.transformer_layers_per_block if 'transformer_layers_per_block' in unet.config else 1 encoder_hid_dim = unet.config.encoder_hid_dim if 'encoder_hid_dim' in unet.config else None encoder_hid_dim_type = unet.config.encoder_hid_dim_type if 'encoder_hid_dim_type' in unet.config else None addition_embed_type = unet.config.addition_embed_type if 'addition_embed_type' in unet.config else None addition_time_embed_dim = unet.config.addition_time_embed_dim if 'addition_time_embed_dim' in unet.config else None controlnet = cls(encoder_hid_dim=encoder_hid_dim, encoder_hid_dim_type=encoder_hid_dim_type, addition_embed_type=addition_embed_type, addition_time_embed_dim=addition_time_embed_dim, transformer_layers_per_block=transformer_layers_per_block, in_channels=unet.config.in_channels, flip_sin_to_cos=unet.config.flip_sin_to_cos, freq_shift=unet.config.freq_shift, down_block_types=unet.config.down_block_types, only_cross_attention=unet.config.only_cross_attention, block_out_channels=unet.config.block_out_channels, layers_per_block=unet.config.layers_per_block, downsample_padding=unet.config.downsample_padding, mid_block_scale_factor=unet.config.mid_block_scale_factor, act_fn=unet.config.act_fn, norm_num_groups=unet.config.norm_num_groups, norm_eps=unet.config.norm_eps, cross_attention_dim=unet.config.cross_attention_dim, attention_head_dim=unet.config.attention_head_dim, num_attention_heads=unet.config.num_attention_heads, use_linear_projection=unet.config.use_linear_projection, class_embed_type=unet.config.class_embed_type, num_class_embeds=unet.config.num_class_embeds, upcast_attention=unet.config.upcast_attention, resnet_time_scale_shift=unet.config.resnet_time_scale_shift, projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim, mid_block_type=unet.config.mid_block_type, controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, conditioning_embedding_out_channels=conditioning_embedding_out_channels, conditioning_channels=conditioning_channels) if load_weights_from_unet: controlnet.conv_in.load_state_dict(unet.conv_in.state_dict()) controlnet.time_proj.load_state_dict(unet.time_proj.state_dict()) controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict()) if controlnet.class_embedding: controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict()) if hasattr(controlnet, 'add_embedding'): controlnet.add_embedding.load_state_dict(unet.add_embedding.state_dict()) controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict()) controlnet.mid_block.load_state_dict(unet.mid_block.state_dict()) return controlnet @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None: sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, 'set_attention_slice'): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == 'auto': slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == 'max': slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError(f'You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.') for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f'size {size} has to be smaller or equal to {dim}.') def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, 'set_attention_slice'): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value: bool=False) -> None: if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)): module.gradient_checkpointing = value def forward(self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, controlnet_cond: torch.Tensor, conditioning_scale: float=1.0, class_labels: Optional[torch.Tensor]=None, timestep_cond: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guess_mode: bool=False, return_dict: bool=True) -> Union[ControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]: channel_order = self.config.controlnet_conditioning_channel_order if channel_order == 'rgb': ... elif channel_order == 'bgr': controlnet_cond = torch.flip(controlnet_cond, dims=[1]) else: raise ValueError(f'unknown `controlnet_conditioning_channel_order`: {channel_order}') if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) timesteps = timestep if not torch.is_tensor(timesteps): is_mps = sample.device.type == 'mps' if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError('class_labels should be provided when num_class_embeds > 0') if self.config.class_embed_type == 'timestep': class_labels = self.time_proj(class_labels) class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) emb = emb + class_emb if self.config.addition_embed_type is not None: if self.config.addition_embed_type == 'text': aug_emb = self.add_embedding(encoder_hidden_states) elif self.config.addition_embed_type == 'text_time': if 'text_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`") text_embeds = added_cond_kwargs.get('text_embeds') if 'time_ids' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`") time_ids = added_cond_kwargs.get('time_ids') time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) emb = emb + aug_emb if aug_emb is not None else emb sample = self.conv_in(sample) controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) sample = sample + controlnet_cond down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, 'has_cross_attention') and downsample_block.has_cross_attention: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs) else: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb) down_block_res_samples += res_samples if self.mid_block is not None: if hasattr(self.mid_block, 'has_cross_attention') and self.mid_block.has_cross_attention: sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs) else: sample = self.mid_block(sample, emb) controlnet_down_block_res_samples = () for (down_block_res_sample, controlnet_block) in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample) if guess_mode and (not self.config.global_pool_conditions): scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) scales = scales * conditioning_scale down_block_res_samples = [sample * scale for (sample, scale) in zip(down_block_res_samples, scales)] mid_block_res_sample = mid_block_res_sample * scales[-1] else: down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample = mid_block_res_sample * conditioning_scale if self.config.global_pool_conditions: down_block_res_samples = [torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples] mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True) if not return_dict: return (down_block_res_samples, mid_block_res_sample) return ControlNetOutput(down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample) def zero_module(module): for p in module.parameters(): nn.init.zeros_(p) return module # File: diffusers-main/src/diffusers/models/controlnet_flax.py from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unets.unet_2d_blocks_flax import FlaxCrossAttnDownBlock2D, FlaxDownBlock2D, FlaxUNetMidBlock2DCrossAttn @flax.struct.dataclass class FlaxControlNetOutput(BaseOutput): down_block_res_samples: jnp.ndarray mid_block_res_sample: jnp.ndarray class FlaxControlNetConditioningEmbedding(nn.Module): conditioning_embedding_channels: int block_out_channels: Tuple[int, ...] = (16, 32, 96, 256) dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.conv_in = nn.Conv(self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype) blocks = [] for i in range(len(self.block_out_channels) - 1): channel_in = self.block_out_channels[i] channel_out = self.block_out_channels[i + 1] conv1 = nn.Conv(channel_in, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype) blocks.append(conv1) conv2 = nn.Conv(channel_out, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype) blocks.append(conv2) self.blocks = blocks self.conv_out = nn.Conv(self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype) def __call__(self, conditioning: jnp.ndarray) -> jnp.ndarray: embedding = self.conv_in(conditioning) embedding = nn.silu(embedding) for block in self.blocks: embedding = block(embedding) embedding = nn.silu(embedding) embedding = self.conv_out(embedding) return embedding @flax_register_to_config class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin): sample_size: int = 32 in_channels: int = 4 down_block_types: Tuple[str, ...] = ('CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'DownBlock2D') only_cross_attention: Union[bool, Tuple[bool, ...]] = False block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[int, Tuple[int, ...]] = 8 num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 controlnet_conditioning_channel_order: str = 'rgb' conditioning_embedding_out_channels: Tuple[int, ...] = (16, 32, 96, 256) def init_weights(self, rng: jax.Array) -> FrozenDict: sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) controlnet_cond_shape = (1, 3, self.sample_size * 8, self.sample_size * 8) controlnet_cond = jnp.zeros(controlnet_cond_shape, dtype=jnp.float32) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} return self.init(rngs, sample, timesteps, encoder_hidden_states, controlnet_cond)['params'] def setup(self) -> None: block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 num_attention_heads = self.num_attention_heads or self.attention_head_dim self.conv_in = nn.Conv(block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) self.time_proj = FlaxTimesteps(block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift) self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype) self.controlnet_cond_embedding = FlaxControlNetConditioningEmbedding(conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels) only_cross_attention = self.only_cross_attention if isinstance(only_cross_attention, bool): only_cross_attention = (only_cross_attention,) * len(self.down_block_types) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(self.down_block_types) down_blocks = [] controlnet_down_blocks = [] output_channel = block_out_channels[0] controlnet_block = nn.Conv(output_channel, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype) controlnet_down_blocks.append(controlnet_block) for (i, down_block_type) in enumerate(self.down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 if down_block_type == 'CrossAttnDownBlock2D': down_block = FlaxCrossAttnDownBlock2D(in_channels=input_channel, out_channels=output_channel, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype) else: down_block = FlaxDownBlock2D(in_channels=input_channel, out_channels=output_channel, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype) down_blocks.append(down_block) for _ in range(self.layers_per_block): controlnet_block = nn.Conv(output_channel, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype) controlnet_down_blocks.append(controlnet_block) if not is_final_block: controlnet_block = nn.Conv(output_channel, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype) controlnet_down_blocks.append(controlnet_block) self.down_blocks = down_blocks self.controlnet_down_blocks = controlnet_down_blocks mid_block_channel = block_out_channels[-1] self.mid_block = FlaxUNetMidBlock2DCrossAttn(in_channels=mid_block_channel, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype) self.controlnet_mid_block = nn.Conv(mid_block_channel, kernel_size=(1, 1), padding='VALID', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype) def __call__(self, sample: jnp.ndarray, timesteps: Union[jnp.ndarray, float, int], encoder_hidden_states: jnp.ndarray, controlnet_cond: jnp.ndarray, conditioning_scale: float=1.0, return_dict: bool=True, train: bool=False) -> Union[FlaxControlNetOutput, Tuple[Tuple[jnp.ndarray, ...], jnp.ndarray]]: channel_order = self.controlnet_conditioning_channel_order if channel_order == 'bgr': controlnet_cond = jnp.flip(controlnet_cond, axis=1) if not isinstance(timesteps, jnp.ndarray): timesteps = jnp.array([timesteps], dtype=jnp.int32) elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0: timesteps = timesteps.astype(dtype=jnp.float32) timesteps = jnp.expand_dims(timesteps, 0) t_emb = self.time_proj(timesteps) t_emb = self.time_embedding(t_emb) sample = jnp.transpose(sample, (0, 2, 3, 1)) sample = self.conv_in(sample) controlnet_cond = jnp.transpose(controlnet_cond, (0, 2, 3, 1)) controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) sample += controlnet_cond down_block_res_samples = (sample,) for down_block in self.down_blocks: if isinstance(down_block, FlaxCrossAttnDownBlock2D): (sample, res_samples) = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train) else: (sample, res_samples) = down_block(sample, t_emb, deterministic=not train) down_block_res_samples += res_samples sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train) controlnet_down_block_res_samples = () for (down_block_res_sample, controlnet_block) in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples += (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample) down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput(down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample) # File: diffusers-main/src/diffusers/models/controlnet_flux.py from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..loaders import PeftAdapterMixin from ..models.attention_processor import AttentionProcessor from ..models.modeling_utils import ModelMixin from ..utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers from .controlnet import BaseOutput, zero_module from .embeddings import CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, FluxPosEmbed from .modeling_outputs import Transformer2DModelOutput from .transformers.transformer_flux import FluxSingleTransformerBlock, FluxTransformerBlock logger = logging.get_logger(__name__) @dataclass class FluxControlNetOutput(BaseOutput): controlnet_block_samples: Tuple[torch.Tensor] controlnet_single_block_samples: Tuple[torch.Tensor] class FluxControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, patch_size: int=1, in_channels: int=64, num_layers: int=19, num_single_layers: int=38, attention_head_dim: int=128, num_attention_heads: int=24, joint_attention_dim: int=4096, pooled_projection_dim: int=768, guidance_embeds: bool=False, axes_dims_rope: List[int]=[16, 56, 56], num_mode: int=None): super().__init__() self.out_channels = in_channels self.inner_dim = num_attention_heads * attention_head_dim self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope) text_time_guidance_cls = CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings self.time_text_embed = text_time_guidance_cls(embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim) self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim) self.x_embedder = torch.nn.Linear(in_channels, self.inner_dim) self.transformer_blocks = nn.ModuleList([FluxTransformerBlock(dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim) for i in range(num_layers)]) self.single_transformer_blocks = nn.ModuleList([FluxSingleTransformerBlock(dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim) for i in range(num_single_layers)]) self.controlnet_blocks = nn.ModuleList([]) for _ in range(len(self.transformer_blocks)): self.controlnet_blocks.append(zero_module(nn.Linear(self.inner_dim, self.inner_dim))) self.controlnet_single_blocks = nn.ModuleList([]) for _ in range(len(self.single_transformer_blocks)): self.controlnet_single_blocks.append(zero_module(nn.Linear(self.inner_dim, self.inner_dim))) self.union = num_mode is not None if self.union: self.controlnet_mode_embedder = nn.Embedding(num_mode, self.inner_dim) self.controlnet_x_embedder = zero_module(torch.nn.Linear(in_channels, self.inner_dim)) self.gradient_checkpointing = False @property def attn_processors(self): processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value @classmethod def from_transformer(cls, transformer, num_layers: int=4, num_single_layers: int=10, attention_head_dim: int=128, num_attention_heads: int=24, load_weights_from_transformer=True): config = transformer.config config['num_layers'] = num_layers config['num_single_layers'] = num_single_layers config['attention_head_dim'] = attention_head_dim config['num_attention_heads'] = num_attention_heads controlnet = cls(**config) if load_weights_from_transformer: controlnet.pos_embed.load_state_dict(transformer.pos_embed.state_dict()) controlnet.time_text_embed.load_state_dict(transformer.time_text_embed.state_dict()) controlnet.context_embedder.load_state_dict(transformer.context_embedder.state_dict()) controlnet.x_embedder.load_state_dict(transformer.x_embedder.state_dict()) controlnet.transformer_blocks.load_state_dict(transformer.transformer_blocks.state_dict(), strict=False) controlnet.single_transformer_blocks.load_state_dict(transformer.single_transformer_blocks.state_dict(), strict=False) controlnet.controlnet_x_embedder = zero_module(controlnet.controlnet_x_embedder) return controlnet def forward(self, hidden_states: torch.Tensor, controlnet_cond: torch.Tensor, controlnet_mode: torch.Tensor=None, conditioning_scale: float=1.0, encoder_hidden_states: torch.Tensor=None, pooled_projections: torch.Tensor=None, timestep: torch.LongTensor=None, img_ids: torch.Tensor=None, txt_ids: torch.Tensor=None, guidance: torch.Tensor=None, joint_attention_kwargs: Optional[Dict[str, Any]]=None, return_dict: bool=True) -> Union[torch.FloatTensor, Transformer2DModelOutput]: if joint_attention_kwargs is not None: joint_attention_kwargs = joint_attention_kwargs.copy() lora_scale = joint_attention_kwargs.pop('scale', 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: scale_lora_layers(self, lora_scale) elif joint_attention_kwargs is not None and joint_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective.') hidden_states = self.x_embedder(hidden_states) hidden_states = hidden_states + self.controlnet_x_embedder(controlnet_cond) timestep = timestep.to(hidden_states.dtype) * 1000 if guidance is not None: guidance = guidance.to(hidden_states.dtype) * 1000 else: guidance = None temb = self.time_text_embed(timestep, pooled_projections) if guidance is None else self.time_text_embed(timestep, guidance, pooled_projections) encoder_hidden_states = self.context_embedder(encoder_hidden_states) if self.union: if controlnet_mode is None: raise ValueError('`controlnet_mode` cannot be `None` when applying ControlNet-Union') controlnet_mode_emb = self.controlnet_mode_embedder(controlnet_mode) encoder_hidden_states = torch.cat([controlnet_mode_emb, encoder_hidden_states], dim=1) txt_ids = torch.cat([txt_ids[:1], txt_ids], dim=0) if txt_ids.ndim == 3: logger.warning('Passing `txt_ids` 3d torch.Tensor is deprecated.Please remove the batch dimension and pass it as a 2d torch Tensor') txt_ids = txt_ids[0] if img_ids.ndim == 3: logger.warning('Passing `img_ids` 3d torch.Tensor is deprecated.Please remove the batch dimension and pass it as a 2d torch Tensor') img_ids = img_ids[0] ids = torch.cat((txt_ids, img_ids), dim=0) image_rotary_emb = self.pos_embed(ids) block_samples = () for (index_block, block) in enumerate(self.transformer_blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} (encoder_hidden_states, hidden_states) = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, encoder_hidden_states, temb, image_rotary_emb, **ckpt_kwargs) else: (encoder_hidden_states, hidden_states) = block(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb) block_samples = block_samples + (hidden_states,) hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) single_block_samples = () for (index_block, block) in enumerate(self.single_transformer_blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, temb, image_rotary_emb, **ckpt_kwargs) else: hidden_states = block(hidden_states=hidden_states, temb=temb, image_rotary_emb=image_rotary_emb) single_block_samples = single_block_samples + (hidden_states[:, encoder_hidden_states.shape[1]:],) controlnet_block_samples = () for (block_sample, controlnet_block) in zip(block_samples, self.controlnet_blocks): block_sample = controlnet_block(block_sample) controlnet_block_samples = controlnet_block_samples + (block_sample,) controlnet_single_block_samples = () for (single_block_sample, controlnet_block) in zip(single_block_samples, self.controlnet_single_blocks): single_block_sample = controlnet_block(single_block_sample) controlnet_single_block_samples = controlnet_single_block_samples + (single_block_sample,) controlnet_block_samples = [sample * conditioning_scale for sample in controlnet_block_samples] controlnet_single_block_samples = [sample * conditioning_scale for sample in controlnet_single_block_samples] controlnet_block_samples = None if len(controlnet_block_samples) == 0 else controlnet_block_samples controlnet_single_block_samples = None if len(controlnet_single_block_samples) == 0 else controlnet_single_block_samples if USE_PEFT_BACKEND: unscale_lora_layers(self, lora_scale) if not return_dict: return (controlnet_block_samples, controlnet_single_block_samples) return FluxControlNetOutput(controlnet_block_samples=controlnet_block_samples, controlnet_single_block_samples=controlnet_single_block_samples) class FluxMultiControlNetModel(ModelMixin): def __init__(self, controlnets): super().__init__() self.nets = nn.ModuleList(controlnets) def forward(self, hidden_states: torch.FloatTensor, controlnet_cond: List[torch.tensor], controlnet_mode: List[torch.tensor], conditioning_scale: List[float], encoder_hidden_states: torch.Tensor=None, pooled_projections: torch.Tensor=None, timestep: torch.LongTensor=None, img_ids: torch.Tensor=None, txt_ids: torch.Tensor=None, guidance: torch.Tensor=None, joint_attention_kwargs: Optional[Dict[str, Any]]=None, return_dict: bool=True) -> Union[FluxControlNetOutput, Tuple]: if len(self.nets) == 1 and self.nets[0].union: controlnet = self.nets[0] for (i, (image, mode, scale)) in enumerate(zip(controlnet_cond, controlnet_mode, conditioning_scale)): (block_samples, single_block_samples) = controlnet(hidden_states=hidden_states, controlnet_cond=image, controlnet_mode=mode[:, None], conditioning_scale=scale, timestep=timestep, guidance=guidance, pooled_projections=pooled_projections, encoder_hidden_states=encoder_hidden_states, txt_ids=txt_ids, img_ids=img_ids, joint_attention_kwargs=joint_attention_kwargs, return_dict=return_dict) if i == 0: control_block_samples = block_samples control_single_block_samples = single_block_samples else: control_block_samples = [control_block_sample + block_sample for (control_block_sample, block_sample) in zip(control_block_samples, block_samples)] control_single_block_samples = [control_single_block_sample + block_sample for (control_single_block_sample, block_sample) in zip(control_single_block_samples, single_block_samples)] else: for (i, (image, mode, scale, controlnet)) in enumerate(zip(controlnet_cond, controlnet_mode, conditioning_scale, self.nets)): (block_samples, single_block_samples) = controlnet(hidden_states=hidden_states, controlnet_cond=image, controlnet_mode=mode[:, None], conditioning_scale=scale, timestep=timestep, guidance=guidance, pooled_projections=pooled_projections, encoder_hidden_states=encoder_hidden_states, txt_ids=txt_ids, img_ids=img_ids, joint_attention_kwargs=joint_attention_kwargs, return_dict=return_dict) if i == 0: control_block_samples = block_samples control_single_block_samples = single_block_samples else: control_block_samples = [control_block_sample + block_sample for (control_block_sample, block_sample) in zip(control_block_samples, block_samples)] control_single_block_samples = [control_single_block_sample + block_sample for (control_single_block_sample, block_sample) in zip(control_single_block_samples, single_block_samples)] return (control_block_samples, control_single_block_samples) # File: diffusers-main/src/diffusers/models/controlnet_hunyuan.py from dataclasses import dataclass from typing import Dict, Optional, Union import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import logging from .attention_processor import AttentionProcessor from .controlnet import BaseOutput, Tuple, zero_module from .embeddings import HunyuanCombinedTimestepTextSizeStyleEmbedding, PatchEmbed, PixArtAlphaTextProjection from .modeling_utils import ModelMixin from .transformers.hunyuan_transformer_2d import HunyuanDiTBlock logger = logging.get_logger(__name__) @dataclass class HunyuanControlNetOutput(BaseOutput): controlnet_block_samples: Tuple[torch.Tensor] class HunyuanDiT2DControlNetModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, conditioning_channels: int=3, num_attention_heads: int=16, attention_head_dim: int=88, in_channels: Optional[int]=None, patch_size: Optional[int]=None, activation_fn: str='gelu-approximate', sample_size=32, hidden_size=1152, transformer_num_layers: int=40, mlp_ratio: float=4.0, cross_attention_dim: int=1024, cross_attention_dim_t5: int=2048, pooled_projection_dim: int=1024, text_len: int=77, text_len_t5: int=256, use_style_cond_and_image_meta_size: bool=True): super().__init__() self.num_heads = num_attention_heads self.inner_dim = num_attention_heads * attention_head_dim self.text_embedder = PixArtAlphaTextProjection(in_features=cross_attention_dim_t5, hidden_size=cross_attention_dim_t5 * 4, out_features=cross_attention_dim, act_fn='silu_fp32') self.text_embedding_padding = nn.Parameter(torch.randn(text_len + text_len_t5, cross_attention_dim, dtype=torch.float32)) self.pos_embed = PatchEmbed(height=sample_size, width=sample_size, in_channels=in_channels, embed_dim=hidden_size, patch_size=patch_size, pos_embed_type=None) self.time_extra_emb = HunyuanCombinedTimestepTextSizeStyleEmbedding(hidden_size, pooled_projection_dim=pooled_projection_dim, seq_len=text_len_t5, cross_attention_dim=cross_attention_dim_t5, use_style_cond_and_image_meta_size=use_style_cond_and_image_meta_size) self.controlnet_blocks = nn.ModuleList([]) self.blocks = nn.ModuleList([HunyuanDiTBlock(dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, activation_fn=activation_fn, ff_inner_dim=int(self.inner_dim * mlp_ratio), cross_attention_dim=cross_attention_dim, qk_norm=True, skip=False) for layer in range(transformer_num_layers // 2 - 1)]) self.input_block = zero_module(nn.Linear(hidden_size, hidden_size)) for _ in range(len(self.blocks)): controlnet_block = nn.Linear(hidden_size, hidden_size) controlnet_block = zero_module(controlnet_block) self.controlnet_blocks.append(controlnet_block) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor(return_deprecated_lora=True) for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) @classmethod def from_transformer(cls, transformer, conditioning_channels=3, transformer_num_layers=None, load_weights_from_transformer=True): config = transformer.config activation_fn = config.activation_fn attention_head_dim = config.attention_head_dim cross_attention_dim = config.cross_attention_dim cross_attention_dim_t5 = config.cross_attention_dim_t5 hidden_size = config.hidden_size in_channels = config.in_channels mlp_ratio = config.mlp_ratio num_attention_heads = config.num_attention_heads patch_size = config.patch_size sample_size = config.sample_size text_len = config.text_len text_len_t5 = config.text_len_t5 conditioning_channels = conditioning_channels transformer_num_layers = transformer_num_layers or config.transformer_num_layers controlnet = cls(conditioning_channels=conditioning_channels, transformer_num_layers=transformer_num_layers, activation_fn=activation_fn, attention_head_dim=attention_head_dim, cross_attention_dim=cross_attention_dim, cross_attention_dim_t5=cross_attention_dim_t5, hidden_size=hidden_size, in_channels=in_channels, mlp_ratio=mlp_ratio, num_attention_heads=num_attention_heads, patch_size=patch_size, sample_size=sample_size, text_len=text_len, text_len_t5=text_len_t5) if load_weights_from_transformer: key = controlnet.load_state_dict(transformer.state_dict(), strict=False) logger.warning(f'controlnet load from Hunyuan-DiT. missing_keys: {key[0]}') return controlnet def forward(self, hidden_states, timestep, controlnet_cond: torch.Tensor, conditioning_scale: float=1.0, encoder_hidden_states=None, text_embedding_mask=None, encoder_hidden_states_t5=None, text_embedding_mask_t5=None, image_meta_size=None, style=None, image_rotary_emb=None, return_dict=True): (height, width) = hidden_states.shape[-2:] hidden_states = self.pos_embed(hidden_states) hidden_states = hidden_states + self.input_block(self.pos_embed(controlnet_cond)) temb = self.time_extra_emb(timestep, encoder_hidden_states_t5, image_meta_size, style, hidden_dtype=timestep.dtype) (batch_size, sequence_length, _) = encoder_hidden_states_t5.shape encoder_hidden_states_t5 = self.text_embedder(encoder_hidden_states_t5.view(-1, encoder_hidden_states_t5.shape[-1])) encoder_hidden_states_t5 = encoder_hidden_states_t5.view(batch_size, sequence_length, -1) encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states_t5], dim=1) text_embedding_mask = torch.cat([text_embedding_mask, text_embedding_mask_t5], dim=-1) text_embedding_mask = text_embedding_mask.unsqueeze(2).bool() encoder_hidden_states = torch.where(text_embedding_mask, encoder_hidden_states, self.text_embedding_padding) block_res_samples = () for (layer, block) in enumerate(self.blocks): hidden_states = block(hidden_states, temb=temb, encoder_hidden_states=encoder_hidden_states, image_rotary_emb=image_rotary_emb) block_res_samples = block_res_samples + (hidden_states,) controlnet_block_res_samples = () for (block_res_sample, controlnet_block) in zip(block_res_samples, self.controlnet_blocks): block_res_sample = controlnet_block(block_res_sample) controlnet_block_res_samples = controlnet_block_res_samples + (block_res_sample,) controlnet_block_res_samples = [sample * conditioning_scale for sample in controlnet_block_res_samples] if not return_dict: return (controlnet_block_res_samples,) return HunyuanControlNetOutput(controlnet_block_samples=controlnet_block_res_samples) class HunyuanDiT2DMultiControlNetModel(ModelMixin): def __init__(self, controlnets): super().__init__() self.nets = nn.ModuleList(controlnets) def forward(self, hidden_states, timestep, controlnet_cond: torch.Tensor, conditioning_scale: float=1.0, encoder_hidden_states=None, text_embedding_mask=None, encoder_hidden_states_t5=None, text_embedding_mask_t5=None, image_meta_size=None, style=None, image_rotary_emb=None, return_dict=True): for (i, (image, scale, controlnet)) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)): block_samples = controlnet(hidden_states=hidden_states, timestep=timestep, controlnet_cond=image, conditioning_scale=scale, encoder_hidden_states=encoder_hidden_states, text_embedding_mask=text_embedding_mask, encoder_hidden_states_t5=encoder_hidden_states_t5, text_embedding_mask_t5=text_embedding_mask_t5, image_meta_size=image_meta_size, style=style, image_rotary_emb=image_rotary_emb, return_dict=return_dict) if i == 0: control_block_samples = block_samples else: control_block_samples = [control_block_sample + block_sample for (control_block_sample, block_sample) in zip(control_block_samples[0], block_samples[0])] control_block_samples = (control_block_samples,) return control_block_samples # File: diffusers-main/src/diffusers/models/controlnet_sd3.py from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..loaders import FromOriginalModelMixin, PeftAdapterMixin from ..models.attention import JointTransformerBlock from ..models.attention_processor import Attention, AttentionProcessor, FusedJointAttnProcessor2_0 from ..models.modeling_outputs import Transformer2DModelOutput from ..models.modeling_utils import ModelMixin from ..utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers from .controlnet import BaseOutput, zero_module from .embeddings import CombinedTimestepTextProjEmbeddings, PatchEmbed logger = logging.get_logger(__name__) @dataclass class SD3ControlNetOutput(BaseOutput): controlnet_block_samples: Tuple[torch.Tensor] class SD3ControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, sample_size: int=128, patch_size: int=2, in_channels: int=16, num_layers: int=18, attention_head_dim: int=64, num_attention_heads: int=18, joint_attention_dim: int=4096, caption_projection_dim: int=1152, pooled_projection_dim: int=2048, out_channels: int=16, pos_embed_max_size: int=96, extra_conditioning_channels: int=0): super().__init__() default_out_channels = in_channels self.out_channels = out_channels if out_channels is not None else default_out_channels self.inner_dim = num_attention_heads * attention_head_dim self.pos_embed = PatchEmbed(height=sample_size, width=sample_size, patch_size=patch_size, in_channels=in_channels, embed_dim=self.inner_dim, pos_embed_max_size=pos_embed_max_size) self.time_text_embed = CombinedTimestepTextProjEmbeddings(embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim) self.context_embedder = nn.Linear(joint_attention_dim, caption_projection_dim) self.transformer_blocks = nn.ModuleList([JointTransformerBlock(dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=self.config.attention_head_dim, context_pre_only=False) for i in range(num_layers)]) self.controlnet_blocks = nn.ModuleList([]) for _ in range(len(self.transformer_blocks)): controlnet_block = nn.Linear(self.inner_dim, self.inner_dim) controlnet_block = zero_module(controlnet_block) self.controlnet_blocks.append(controlnet_block) pos_embed_input = PatchEmbed(height=sample_size, width=sample_size, patch_size=patch_size, in_channels=in_channels + extra_conditioning_channels, embed_dim=self.inner_dim, pos_embed_type=None) self.pos_embed_input = zero_module(pos_embed_input) self.gradient_checkpointing = False def enable_forward_chunking(self, chunk_size: Optional[int]=None, dim: int=0) -> None: if dim not in [0, 1]: raise ValueError(f'Make sure to set `dim` to either 0 or 1, not {dim}') chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, 'set_chunk_feed_forward'): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedJointAttnProcessor2_0()) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value @classmethod def from_transformer(cls, transformer, num_layers=12, num_extra_conditioning_channels=1, load_weights_from_transformer=True): config = transformer.config config['num_layers'] = num_layers or config.num_layers config['extra_conditioning_channels'] = num_extra_conditioning_channels controlnet = cls(**config) if load_weights_from_transformer: controlnet.pos_embed.load_state_dict(transformer.pos_embed.state_dict()) controlnet.time_text_embed.load_state_dict(transformer.time_text_embed.state_dict()) controlnet.context_embedder.load_state_dict(transformer.context_embedder.state_dict()) controlnet.transformer_blocks.load_state_dict(transformer.transformer_blocks.state_dict(), strict=False) controlnet.pos_embed_input = zero_module(controlnet.pos_embed_input) return controlnet def forward(self, hidden_states: torch.FloatTensor, controlnet_cond: torch.Tensor, conditioning_scale: float=1.0, encoder_hidden_states: torch.FloatTensor=None, pooled_projections: torch.FloatTensor=None, timestep: torch.LongTensor=None, joint_attention_kwargs: Optional[Dict[str, Any]]=None, return_dict: bool=True) -> Union[torch.FloatTensor, Transformer2DModelOutput]: if joint_attention_kwargs is not None: joint_attention_kwargs = joint_attention_kwargs.copy() lora_scale = joint_attention_kwargs.pop('scale', 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: scale_lora_layers(self, lora_scale) elif joint_attention_kwargs is not None and joint_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective.') hidden_states = self.pos_embed(hidden_states) temb = self.time_text_embed(timestep, pooled_projections) encoder_hidden_states = self.context_embedder(encoder_hidden_states) hidden_states = hidden_states + self.pos_embed_input(controlnet_cond) block_res_samples = () for block in self.transformer_blocks: if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, encoder_hidden_states, temb, **ckpt_kwargs) else: (encoder_hidden_states, hidden_states) = block(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb) block_res_samples = block_res_samples + (hidden_states,) controlnet_block_res_samples = () for (block_res_sample, controlnet_block) in zip(block_res_samples, self.controlnet_blocks): block_res_sample = controlnet_block(block_res_sample) controlnet_block_res_samples = controlnet_block_res_samples + (block_res_sample,) controlnet_block_res_samples = [sample * conditioning_scale for sample in controlnet_block_res_samples] if USE_PEFT_BACKEND: unscale_lora_layers(self, lora_scale) if not return_dict: return (controlnet_block_res_samples,) return SD3ControlNetOutput(controlnet_block_samples=controlnet_block_res_samples) class SD3MultiControlNetModel(ModelMixin): def __init__(self, controlnets): super().__init__() self.nets = nn.ModuleList(controlnets) def forward(self, hidden_states: torch.FloatTensor, controlnet_cond: List[torch.tensor], conditioning_scale: List[float], pooled_projections: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor=None, timestep: torch.LongTensor=None, joint_attention_kwargs: Optional[Dict[str, Any]]=None, return_dict: bool=True) -> Union[SD3ControlNetOutput, Tuple]: for (i, (image, scale, controlnet)) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)): block_samples = controlnet(hidden_states=hidden_states, timestep=timestep, encoder_hidden_states=encoder_hidden_states, pooled_projections=pooled_projections, controlnet_cond=image, conditioning_scale=scale, joint_attention_kwargs=joint_attention_kwargs, return_dict=return_dict) if i == 0: control_block_samples = block_samples else: control_block_samples = [control_block_sample + block_sample for (control_block_sample, block_sample) in zip(control_block_samples[0], block_samples[0])] control_block_samples = (tuple(control_block_samples),) return control_block_samples # File: diffusers-main/src/diffusers/models/controlnet_sparsectrl.py from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import functional as F from ..configuration_utils import ConfigMixin, register_to_config from ..loaders import FromOriginalModelMixin from ..utils import BaseOutput, logging from .attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unets.unet_2d_blocks import UNetMidBlock2DCrossAttn from .unets.unet_2d_condition import UNet2DConditionModel from .unets.unet_motion_model import CrossAttnDownBlockMotion, DownBlockMotion logger = logging.get_logger(__name__) @dataclass class SparseControlNetOutput(BaseOutput): down_block_res_samples: Tuple[torch.Tensor] mid_block_res_sample: torch.Tensor class SparseControlNetConditioningEmbedding(nn.Module): def __init__(self, conditioning_embedding_channels: int, conditioning_channels: int=3, block_out_channels: Tuple[int, ...]=(16, 32, 96, 256)): super().__init__() self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) self.blocks = nn.ModuleList([]) for i in range(len(block_out_channels) - 1): channel_in = block_out_channels[i] channel_out = block_out_channels[i + 1] self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1)) self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2)) self.conv_out = zero_module(nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)) def forward(self, conditioning: torch.Tensor) -> torch.Tensor: embedding = self.conv_in(conditioning) embedding = F.silu(embedding) for block in self.blocks: embedding = block(embedding) embedding = F.silu(embedding) embedding = self.conv_out(embedding) return embedding class SparseControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, in_channels: int=4, conditioning_channels: int=4, flip_sin_to_cos: bool=True, freq_shift: int=0, down_block_types: Tuple[str, ...]=('CrossAttnDownBlockMotion', 'CrossAttnDownBlockMotion', 'CrossAttnDownBlockMotion', 'DownBlockMotion'), only_cross_attention: Union[bool, Tuple[bool]]=False, block_out_channels: Tuple[int, ...]=(320, 640, 1280, 1280), layers_per_block: int=2, downsample_padding: int=1, mid_block_scale_factor: float=1, act_fn: str='silu', norm_num_groups: Optional[int]=32, norm_eps: float=1e-05, cross_attention_dim: int=768, transformer_layers_per_block: Union[int, Tuple[int, ...]]=1, transformer_layers_per_mid_block: Optional[Union[int, Tuple[int]]]=None, temporal_transformer_layers_per_block: Union[int, Tuple[int, ...]]=1, attention_head_dim: Union[int, Tuple[int, ...]]=8, num_attention_heads: Optional[Union[int, Tuple[int, ...]]]=None, use_linear_projection: bool=False, upcast_attention: bool=False, resnet_time_scale_shift: str='default', conditioning_embedding_out_channels: Optional[Tuple[int, ...]]=(16, 32, 96, 256), global_pool_conditions: bool=False, controlnet_conditioning_channel_order: str='rgb', motion_max_seq_length: int=32, motion_num_attention_heads: int=8, concat_conditioning_mask: bool=True, use_simplified_condition_embedding: bool=True): super().__init__() self.use_simplified_condition_embedding = use_simplified_condition_embedding num_attention_heads = num_attention_heads or attention_head_dim if len(block_out_channels) != len(down_block_types): raise ValueError(f'Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.') if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError(f'Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.') if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError(f'Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}.') if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = [temporal_transformer_layers_per_block] * len(down_block_types) conv_in_kernel = 3 conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding) if concat_conditioning_mask: conditioning_channels = conditioning_channels + 1 self.concat_conditioning_mask = concat_conditioning_mask if use_simplified_condition_embedding: self.controlnet_cond_embedding = zero_module(nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)) else: self.controlnet_cond_embedding = SparseControlNetConditioningEmbedding(conditioning_embedding_channels=block_out_channels[0], block_out_channels=conditioning_embedding_out_channels, conditioning_channels=conditioning_channels) time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) self.down_blocks = nn.ModuleList([]) self.controlnet_down_blocks = nn.ModuleList([]) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(only_cross_attention, bool): only_cross_attention = [only_cross_attention] * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(motion_num_attention_heads, int): motion_num_attention_heads = (motion_num_attention_heads,) * len(down_block_types) output_channel = block_out_channels[0] controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) for (i, down_block_type) in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 if down_block_type == 'CrossAttnDownBlockMotion': down_block = CrossAttnDownBlockMotion(in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, dropout=0, num_layers=layers_per_block, transformer_layers_per_block=transformer_layers_per_block[i], resnet_eps=norm_eps, resnet_time_scale_shift=resnet_time_scale_shift, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, resnet_pre_norm=True, num_attention_heads=num_attention_heads[i], cross_attention_dim=cross_attention_dim[i], add_downsample=not is_final_block, dual_cross_attention=False, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, temporal_num_attention_heads=motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=temporal_transformer_layers_per_block[i], temporal_double_self_attention=False) elif down_block_type == 'DownBlockMotion': down_block = DownBlockMotion(in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, dropout=0, num_layers=layers_per_block, resnet_eps=norm_eps, resnet_time_scale_shift=resnet_time_scale_shift, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, resnet_pre_norm=True, add_downsample=not is_final_block, temporal_num_attention_heads=motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=temporal_transformer_layers_per_block[i], temporal_double_self_attention=False) else: raise ValueError('Invalid `block_type` encountered. Must be one of `CrossAttnDownBlockMotion` or `DownBlockMotion`') self.down_blocks.append(down_block) for _ in range(layers_per_block): controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) if not is_final_block: controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_down_blocks.append(controlnet_block) mid_block_channels = block_out_channels[-1] controlnet_block = nn.Conv2d(mid_block_channels, mid_block_channels, kernel_size=1) controlnet_block = zero_module(controlnet_block) self.controlnet_mid_block = controlnet_block if transformer_layers_per_mid_block is None: transformer_layers_per_mid_block = transformer_layers_per_block[-1] if isinstance(transformer_layers_per_block[-1], int) else 1 self.mid_block = UNetMidBlock2DCrossAttn(in_channels=mid_block_channels, temb_channels=time_embed_dim, dropout=0, num_layers=1, transformer_layers_per_block=transformer_layers_per_mid_block, resnet_eps=norm_eps, resnet_time_scale_shift=resnet_time_scale_shift, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, resnet_pre_norm=True, num_attention_heads=num_attention_heads[-1], output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], dual_cross_attention=False, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type='default') @classmethod def from_unet(cls, unet: UNet2DConditionModel, controlnet_conditioning_channel_order: str='rgb', conditioning_embedding_out_channels: Optional[Tuple[int, ...]]=(16, 32, 96, 256), load_weights_from_unet: bool=True, conditioning_channels: int=3) -> 'SparseControlNetModel': transformer_layers_per_block = unet.config.transformer_layers_per_block if 'transformer_layers_per_block' in unet.config else 1 down_block_types = unet.config.down_block_types for i in range(len(down_block_types)): if 'CrossAttn' in down_block_types[i]: down_block_types[i] = 'CrossAttnDownBlockMotion' elif 'Down' in down_block_types[i]: down_block_types[i] = 'DownBlockMotion' else: raise ValueError('Invalid `block_type` encountered. Must be a cross-attention or down block') controlnet = cls(in_channels=unet.config.in_channels, conditioning_channels=conditioning_channels, flip_sin_to_cos=unet.config.flip_sin_to_cos, freq_shift=unet.config.freq_shift, down_block_types=unet.config.down_block_types, only_cross_attention=unet.config.only_cross_attention, block_out_channels=unet.config.block_out_channels, layers_per_block=unet.config.layers_per_block, downsample_padding=unet.config.downsample_padding, mid_block_scale_factor=unet.config.mid_block_scale_factor, act_fn=unet.config.act_fn, norm_num_groups=unet.config.norm_num_groups, norm_eps=unet.config.norm_eps, cross_attention_dim=unet.config.cross_attention_dim, transformer_layers_per_block=transformer_layers_per_block, attention_head_dim=unet.config.attention_head_dim, num_attention_heads=unet.config.num_attention_heads, use_linear_projection=unet.config.use_linear_projection, upcast_attention=unet.config.upcast_attention, resnet_time_scale_shift=unet.config.resnet_time_scale_shift, conditioning_embedding_out_channels=conditioning_embedding_out_channels, controlnet_conditioning_channel_order=controlnet_conditioning_channel_order) if load_weights_from_unet: controlnet.conv_in.load_state_dict(unet.conv_in.state_dict(), strict=False) controlnet.time_proj.load_state_dict(unet.time_proj.state_dict(), strict=False) controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict(), strict=False) controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict(), strict=False) controlnet.mid_block.load_state_dict(unet.mid_block.state_dict(), strict=False) return controlnet @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None: sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, 'set_attention_slice'): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == 'auto': slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == 'max': slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError(f'You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.') for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f'size {size} has to be smaller or equal to {dim}.') def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, 'set_attention_slice'): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value: bool=False) -> None: if isinstance(module, (CrossAttnDownBlockMotion, DownBlockMotion, UNetMidBlock2DCrossAttn)): module.gradient_checkpointing = value def forward(self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, controlnet_cond: torch.Tensor, conditioning_scale: float=1.0, timestep_cond: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, conditioning_mask: Optional[torch.Tensor]=None, guess_mode: bool=False, return_dict: bool=True) -> Union[SparseControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]: (sample_batch_size, sample_channels, sample_num_frames, sample_height, sample_width) = sample.shape sample = torch.zeros_like(sample) channel_order = self.config.controlnet_conditioning_channel_order if channel_order == 'rgb': ... elif channel_order == 'bgr': controlnet_cond = torch.flip(controlnet_cond, dims=[1]) else: raise ValueError(f'unknown `controlnet_conditioning_channel_order`: {channel_order}') if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) timesteps = timestep if not torch.is_tensor(timesteps): is_mps = sample.device.type == 'mps' if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) emb = emb.repeat_interleave(sample_num_frames, dim=0) (batch_size, channels, num_frames, height, width) = sample.shape sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) sample = self.conv_in(sample) (batch_frames, channels, height, width) = sample.shape sample = sample[:, None].reshape(sample_batch_size, sample_num_frames, channels, height, width) if self.concat_conditioning_mask: controlnet_cond = torch.cat([controlnet_cond, conditioning_mask], dim=1) (batch_size, channels, num_frames, height, width) = controlnet_cond.shape controlnet_cond = controlnet_cond.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) (batch_frames, channels, height, width) = controlnet_cond.shape controlnet_cond = controlnet_cond[:, None].reshape(batch_size, num_frames, channels, height, width) sample = sample + controlnet_cond (batch_size, num_frames, channels, height, width) = sample.shape sample = sample.reshape(sample_batch_size * sample_num_frames, channels, height, width) down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, 'has_cross_attention') and downsample_block.has_cross_attention: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs) else: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames) down_block_res_samples += res_samples if self.mid_block is not None: if hasattr(self.mid_block, 'has_cross_attention') and self.mid_block.has_cross_attention: sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs) else: sample = self.mid_block(sample, emb) controlnet_down_block_res_samples = () for (down_block_res_sample, controlnet_block) in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample) if guess_mode and (not self.config.global_pool_conditions): scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) scales = scales * conditioning_scale down_block_res_samples = [sample * scale for (sample, scale) in zip(down_block_res_samples, scales)] mid_block_res_sample = mid_block_res_sample * scales[-1] else: down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample = mid_block_res_sample * conditioning_scale if self.config.global_pool_conditions: down_block_res_samples = [torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples] mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True) if not return_dict: return (down_block_res_samples, mid_block_res_sample) return SparseControlNetOutput(down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample) def zero_module(module: nn.Module) -> nn.Module: for p in module.parameters(): nn.init.zeros_(p) return module # File: diffusers-main/src/diffusers/models/controlnet_xs.py from dataclasses import dataclass from math import gcd from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import Tensor, nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, is_torch_version, logging from ..utils.torch_utils import apply_freeu from .attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, FusedAttnProcessor2_0 from .controlnet import ControlNetConditioningEmbedding from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unets.unet_2d_blocks import CrossAttnDownBlock2D, CrossAttnUpBlock2D, Downsample2D, ResnetBlock2D, Transformer2DModel, UNetMidBlock2DCrossAttn, Upsample2D from .unets.unet_2d_condition import UNet2DConditionModel logger = logging.get_logger(__name__) @dataclass class ControlNetXSOutput(BaseOutput): sample: Tensor = None class DownBlockControlNetXSAdapter(nn.Module): def __init__(self, resnets: nn.ModuleList, base_to_ctrl: nn.ModuleList, ctrl_to_base: nn.ModuleList, attentions: Optional[nn.ModuleList]=None, downsampler: Optional[nn.Conv2d]=None): super().__init__() self.resnets = resnets self.base_to_ctrl = base_to_ctrl self.ctrl_to_base = ctrl_to_base self.attentions = attentions self.downsamplers = downsampler class MidBlockControlNetXSAdapter(nn.Module): def __init__(self, midblock: UNetMidBlock2DCrossAttn, base_to_ctrl: nn.ModuleList, ctrl_to_base: nn.ModuleList): super().__init__() self.midblock = midblock self.base_to_ctrl = base_to_ctrl self.ctrl_to_base = ctrl_to_base class UpBlockControlNetXSAdapter(nn.Module): def __init__(self, ctrl_to_base: nn.ModuleList): super().__init__() self.ctrl_to_base = ctrl_to_base def get_down_block_adapter(base_in_channels: int, base_out_channels: int, ctrl_in_channels: int, ctrl_out_channels: int, temb_channels: int, max_norm_num_groups: Optional[int]=32, has_crossattn=True, transformer_layers_per_block: Optional[Union[int, Tuple[int]]]=1, num_attention_heads: Optional[int]=1, cross_attention_dim: Optional[int]=1024, add_downsample: bool=True, upcast_attention: Optional[bool]=False, use_linear_projection: Optional[bool]=True): num_layers = 2 resnets = [] attentions = [] ctrl_to_base = [] base_to_ctrl = [] if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): base_in_channels = base_in_channels if i == 0 else base_out_channels ctrl_in_channels = ctrl_in_channels if i == 0 else ctrl_out_channels base_to_ctrl.append(make_zero_conv(base_in_channels, base_in_channels)) resnets.append(ResnetBlock2D(in_channels=ctrl_in_channels + base_in_channels, out_channels=ctrl_out_channels, temb_channels=temb_channels, groups=find_largest_factor(ctrl_in_channels + base_in_channels, max_factor=max_norm_num_groups), groups_out=find_largest_factor(ctrl_out_channels, max_factor=max_norm_num_groups), eps=1e-05)) if has_crossattn: attentions.append(Transformer2DModel(num_attention_heads, ctrl_out_channels // num_attention_heads, in_channels=ctrl_out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, norm_num_groups=find_largest_factor(ctrl_out_channels, max_factor=max_norm_num_groups))) ctrl_to_base.append(make_zero_conv(ctrl_out_channels, base_out_channels)) if add_downsample: base_to_ctrl.append(make_zero_conv(base_out_channels, base_out_channels)) downsamplers = Downsample2D(ctrl_out_channels + base_out_channels, use_conv=True, out_channels=ctrl_out_channels, name='op') ctrl_to_base.append(make_zero_conv(ctrl_out_channels, base_out_channels)) else: downsamplers = None down_block_components = DownBlockControlNetXSAdapter(resnets=nn.ModuleList(resnets), base_to_ctrl=nn.ModuleList(base_to_ctrl), ctrl_to_base=nn.ModuleList(ctrl_to_base)) if has_crossattn: down_block_components.attentions = nn.ModuleList(attentions) if downsamplers is not None: down_block_components.downsamplers = downsamplers return down_block_components def get_mid_block_adapter(base_channels: int, ctrl_channels: int, temb_channels: Optional[int]=None, max_norm_num_groups: Optional[int]=32, transformer_layers_per_block: int=1, num_attention_heads: Optional[int]=1, cross_attention_dim: Optional[int]=1024, upcast_attention: bool=False, use_linear_projection: bool=True): base_to_ctrl = make_zero_conv(base_channels, base_channels) midblock = UNetMidBlock2DCrossAttn(transformer_layers_per_block=transformer_layers_per_block, in_channels=ctrl_channels + base_channels, out_channels=ctrl_channels, temb_channels=temb_channels, resnet_groups=find_largest_factor(gcd(ctrl_channels, ctrl_channels + base_channels), max_norm_num_groups), cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention) ctrl_to_base = make_zero_conv(ctrl_channels, base_channels) return MidBlockControlNetXSAdapter(base_to_ctrl=base_to_ctrl, midblock=midblock, ctrl_to_base=ctrl_to_base) def get_up_block_adapter(out_channels: int, prev_output_channel: int, ctrl_skip_channels: List[int]): ctrl_to_base = [] num_layers = 3 for i in range(num_layers): resnet_in_channels = prev_output_channel if i == 0 else out_channels ctrl_to_base.append(make_zero_conv(ctrl_skip_channels[i], resnet_in_channels)) return UpBlockControlNetXSAdapter(ctrl_to_base=nn.ModuleList(ctrl_to_base)) class ControlNetXSAdapter(ModelMixin, ConfigMixin): @register_to_config def __init__(self, conditioning_channels: int=3, conditioning_channel_order: str='rgb', conditioning_embedding_out_channels: Tuple[int]=(16, 32, 96, 256), time_embedding_mix: float=1.0, learn_time_embedding: bool=False, num_attention_heads: Union[int, Tuple[int]]=4, block_out_channels: Tuple[int]=(4, 8, 16, 16), base_block_out_channels: Tuple[int]=(320, 640, 1280, 1280), cross_attention_dim: int=1024, down_block_types: Tuple[str]=('CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'DownBlock2D'), sample_size: Optional[int]=96, transformer_layers_per_block: Union[int, Tuple[int]]=1, upcast_attention: bool=True, max_norm_num_groups: int=32, use_linear_projection: bool=True): super().__init__() time_embedding_input_dim = base_block_out_channels[0] time_embedding_dim = base_block_out_channels[0] * 4 if conditioning_channel_order not in ['rgb', 'bgr']: raise ValueError(f'unknown `conditioning_channel_order`: {conditioning_channel_order}') if len(block_out_channels) != len(down_block_types): raise ValueError(f'Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.') if not isinstance(transformer_layers_per_block, (list, tuple)): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if not isinstance(cross_attention_dim, (list, tuple)): cross_attention_dim = [cross_attention_dim] * len(down_block_types) if not isinstance(num_attention_heads, (list, tuple)): num_attention_heads = [num_attention_heads] * len(down_block_types) if len(num_attention_heads) != len(down_block_types): raise ValueError(f'Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}.') self.controlnet_cond_embedding = ControlNetConditioningEmbedding(conditioning_embedding_channels=block_out_channels[0], block_out_channels=conditioning_embedding_out_channels, conditioning_channels=conditioning_channels) if learn_time_embedding: self.time_embedding = TimestepEmbedding(time_embedding_input_dim, time_embedding_dim) else: self.time_embedding = None self.down_blocks = nn.ModuleList([]) self.up_connections = nn.ModuleList([]) self.conv_in = nn.Conv2d(4, block_out_channels[0], kernel_size=3, padding=1) self.control_to_base_for_conv_in = make_zero_conv(block_out_channels[0], base_block_out_channels[0]) base_out_channels = base_block_out_channels[0] ctrl_out_channels = block_out_channels[0] for (i, down_block_type) in enumerate(down_block_types): base_in_channels = base_out_channels base_out_channels = base_block_out_channels[i] ctrl_in_channels = ctrl_out_channels ctrl_out_channels = block_out_channels[i] has_crossattn = 'CrossAttn' in down_block_type is_final_block = i == len(down_block_types) - 1 self.down_blocks.append(get_down_block_adapter(base_in_channels=base_in_channels, base_out_channels=base_out_channels, ctrl_in_channels=ctrl_in_channels, ctrl_out_channels=ctrl_out_channels, temb_channels=time_embedding_dim, max_norm_num_groups=max_norm_num_groups, has_crossattn=has_crossattn, transformer_layers_per_block=transformer_layers_per_block[i], num_attention_heads=num_attention_heads[i], cross_attention_dim=cross_attention_dim[i], add_downsample=not is_final_block, upcast_attention=upcast_attention, use_linear_projection=use_linear_projection)) self.mid_block = get_mid_block_adapter(base_channels=base_block_out_channels[-1], ctrl_channels=block_out_channels[-1], temb_channels=time_embedding_dim, transformer_layers_per_block=transformer_layers_per_block[-1], num_attention_heads=num_attention_heads[-1], cross_attention_dim=cross_attention_dim[-1], upcast_attention=upcast_attention, use_linear_projection=use_linear_projection) ctrl_skip_channels = [block_out_channels[0]] for (i, out_channels) in enumerate(block_out_channels): number_of_subblocks = 3 if i < len(block_out_channels) - 1 else 2 ctrl_skip_channels.extend([out_channels] * number_of_subblocks) reversed_base_block_out_channels = list(reversed(base_block_out_channels)) base_out_channels = reversed_base_block_out_channels[0] for i in range(len(down_block_types)): prev_base_output_channel = base_out_channels base_out_channels = reversed_base_block_out_channels[i] ctrl_skip_channels_ = [ctrl_skip_channels.pop() for _ in range(3)] self.up_connections.append(get_up_block_adapter(out_channels=base_out_channels, prev_output_channel=prev_base_output_channel, ctrl_skip_channels=ctrl_skip_channels_)) @classmethod def from_unet(cls, unet: UNet2DConditionModel, size_ratio: Optional[float]=None, block_out_channels: Optional[List[int]]=None, num_attention_heads: Optional[List[int]]=None, learn_time_embedding: bool=False, time_embedding_mix: int=1.0, conditioning_channels: int=3, conditioning_channel_order: str='rgb', conditioning_embedding_out_channels: Tuple[int]=(16, 32, 96, 256)): fixed_size = block_out_channels is not None relative_size = size_ratio is not None if not fixed_size ^ relative_size: raise ValueError('Pass exactly one of `block_out_channels` (for absolute sizing) or `size_ratio` (for relative sizing).') block_out_channels = block_out_channels or [int(b * size_ratio) for b in unet.config.block_out_channels] if num_attention_heads is None: num_attention_heads = unet.config.attention_head_dim model = cls(conditioning_channels=conditioning_channels, conditioning_channel_order=conditioning_channel_order, conditioning_embedding_out_channels=conditioning_embedding_out_channels, time_embedding_mix=time_embedding_mix, learn_time_embedding=learn_time_embedding, num_attention_heads=num_attention_heads, block_out_channels=block_out_channels, base_block_out_channels=unet.config.block_out_channels, cross_attention_dim=unet.config.cross_attention_dim, down_block_types=unet.config.down_block_types, sample_size=unet.config.sample_size, transformer_layers_per_block=unet.config.transformer_layers_per_block, upcast_attention=unet.config.upcast_attention, max_norm_num_groups=unet.config.norm_num_groups, use_linear_projection=unet.config.use_linear_projection) model.to(unet.dtype) return model def forward(self, *args, **kwargs): raise ValueError('A ControlNetXSAdapter cannot be run by itself. Use it together with a UNet2DConditionModel to instantiate a UNetControlNetXSModel.') class UNetControlNetXSModel(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, sample_size: Optional[int]=96, down_block_types: Tuple[str]=('CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'DownBlock2D'), up_block_types: Tuple[str]=('UpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D'), block_out_channels: Tuple[int]=(320, 640, 1280, 1280), norm_num_groups: Optional[int]=32, cross_attention_dim: Union[int, Tuple[int]]=1024, transformer_layers_per_block: Union[int, Tuple[int]]=1, num_attention_heads: Union[int, Tuple[int]]=8, addition_embed_type: Optional[str]=None, addition_time_embed_dim: Optional[int]=None, upcast_attention: bool=True, use_linear_projection: bool=True, time_cond_proj_dim: Optional[int]=None, projection_class_embeddings_input_dim: Optional[int]=None, time_embedding_mix: float=1.0, ctrl_conditioning_channels: int=3, ctrl_conditioning_embedding_out_channels: Tuple[int]=(16, 32, 96, 256), ctrl_conditioning_channel_order: str='rgb', ctrl_learn_time_embedding: bool=False, ctrl_block_out_channels: Tuple[int]=(4, 8, 16, 16), ctrl_num_attention_heads: Union[int, Tuple[int]]=4, ctrl_max_norm_num_groups: int=32): super().__init__() if time_embedding_mix < 0 or time_embedding_mix > 1: raise ValueError('`time_embedding_mix` needs to be between 0 and 1.') if time_embedding_mix < 1 and (not ctrl_learn_time_embedding): raise ValueError('To use `time_embedding_mix` < 1, `ctrl_learn_time_embedding` must be `True`') if addition_embed_type is not None and addition_embed_type != 'text_time': raise ValueError("As `UNetControlNetXSModel` currently only supports StableDiffusion and StableDiffusion-XL, `addition_embed_type` must be `None` or `'text_time'`.") if not isinstance(transformer_layers_per_block, (list, tuple)): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if not isinstance(cross_attention_dim, (list, tuple)): cross_attention_dim = [cross_attention_dim] * len(down_block_types) if not isinstance(num_attention_heads, (list, tuple)): num_attention_heads = [num_attention_heads] * len(down_block_types) if not isinstance(ctrl_num_attention_heads, (list, tuple)): ctrl_num_attention_heads = [ctrl_num_attention_heads] * len(down_block_types) base_num_attention_heads = num_attention_heads self.in_channels = 4 self.base_conv_in = nn.Conv2d(4, block_out_channels[0], kernel_size=3, padding=1) self.controlnet_cond_embedding = ControlNetConditioningEmbedding(conditioning_embedding_channels=ctrl_block_out_channels[0], block_out_channels=ctrl_conditioning_embedding_out_channels, conditioning_channels=ctrl_conditioning_channels) self.ctrl_conv_in = nn.Conv2d(4, ctrl_block_out_channels[0], kernel_size=3, padding=1) self.control_to_base_for_conv_in = make_zero_conv(ctrl_block_out_channels[0], block_out_channels[0]) time_embed_input_dim = block_out_channels[0] time_embed_dim = block_out_channels[0] * 4 self.base_time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos=True, downscale_freq_shift=0) self.base_time_embedding = TimestepEmbedding(time_embed_input_dim, time_embed_dim, cond_proj_dim=time_cond_proj_dim) if ctrl_learn_time_embedding: self.ctrl_time_embedding = TimestepEmbedding(in_channels=time_embed_input_dim, time_embed_dim=time_embed_dim) else: self.ctrl_time_embedding = None if addition_embed_type is None: self.base_add_time_proj = None self.base_add_embedding = None else: self.base_add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos=True, downscale_freq_shift=0) self.base_add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) down_blocks = [] base_out_channels = block_out_channels[0] ctrl_out_channels = ctrl_block_out_channels[0] for (i, down_block_type) in enumerate(down_block_types): base_in_channels = base_out_channels base_out_channels = block_out_channels[i] ctrl_in_channels = ctrl_out_channels ctrl_out_channels = ctrl_block_out_channels[i] has_crossattn = 'CrossAttn' in down_block_type is_final_block = i == len(down_block_types) - 1 down_blocks.append(ControlNetXSCrossAttnDownBlock2D(base_in_channels=base_in_channels, base_out_channels=base_out_channels, ctrl_in_channels=ctrl_in_channels, ctrl_out_channels=ctrl_out_channels, temb_channels=time_embed_dim, norm_num_groups=norm_num_groups, ctrl_max_norm_num_groups=ctrl_max_norm_num_groups, has_crossattn=has_crossattn, transformer_layers_per_block=transformer_layers_per_block[i], base_num_attention_heads=base_num_attention_heads[i], ctrl_num_attention_heads=ctrl_num_attention_heads[i], cross_attention_dim=cross_attention_dim[i], add_downsample=not is_final_block, upcast_attention=upcast_attention, use_linear_projection=use_linear_projection)) self.mid_block = ControlNetXSCrossAttnMidBlock2D(base_channels=block_out_channels[-1], ctrl_channels=ctrl_block_out_channels[-1], temb_channels=time_embed_dim, norm_num_groups=norm_num_groups, ctrl_max_norm_num_groups=ctrl_max_norm_num_groups, transformer_layers_per_block=transformer_layers_per_block[-1], base_num_attention_heads=base_num_attention_heads[-1], ctrl_num_attention_heads=ctrl_num_attention_heads[-1], cross_attention_dim=cross_attention_dim[-1], upcast_attention=upcast_attention, use_linear_projection=use_linear_projection) up_blocks = [] rev_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) rev_num_attention_heads = list(reversed(base_num_attention_heads)) rev_cross_attention_dim = list(reversed(cross_attention_dim)) ctrl_skip_channels = [ctrl_block_out_channels[0]] for (i, out_channels) in enumerate(ctrl_block_out_channels): number_of_subblocks = 3 if i < len(ctrl_block_out_channels) - 1 else 2 ctrl_skip_channels.extend([out_channels] * number_of_subblocks) reversed_block_out_channels = list(reversed(block_out_channels)) out_channels = reversed_block_out_channels[0] for (i, up_block_type) in enumerate(up_block_types): prev_output_channel = out_channels out_channels = reversed_block_out_channels[i] in_channels = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] ctrl_skip_channels_ = [ctrl_skip_channels.pop() for _ in range(3)] has_crossattn = 'CrossAttn' in up_block_type is_final_block = i == len(block_out_channels) - 1 up_blocks.append(ControlNetXSCrossAttnUpBlock2D(in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, ctrl_skip_channels=ctrl_skip_channels_, temb_channels=time_embed_dim, resolution_idx=i, has_crossattn=has_crossattn, transformer_layers_per_block=rev_transformer_layers_per_block[i], num_attention_heads=rev_num_attention_heads[i], cross_attention_dim=rev_cross_attention_dim[i], add_upsample=not is_final_block, upcast_attention=upcast_attention, norm_num_groups=norm_num_groups, use_linear_projection=use_linear_projection)) self.down_blocks = nn.ModuleList(down_blocks) self.up_blocks = nn.ModuleList(up_blocks) self.base_conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups) self.base_conv_act = nn.SiLU() self.base_conv_out = nn.Conv2d(block_out_channels[0], 4, kernel_size=3, padding=1) @classmethod def from_unet(cls, unet: UNet2DConditionModel, controlnet: Optional[ControlNetXSAdapter]=None, size_ratio: Optional[float]=None, ctrl_block_out_channels: Optional[List[float]]=None, time_embedding_mix: Optional[float]=None, ctrl_optional_kwargs: Optional[Dict]=None): if controlnet is None: controlnet = ControlNetXSAdapter.from_unet(unet, size_ratio, ctrl_block_out_channels, **ctrl_optional_kwargs) elif any((o is not None for o in (size_ratio, ctrl_block_out_channels, time_embedding_mix, ctrl_optional_kwargs))): raise ValueError('When a controlnet is passed, none of these parameters should be passed: size_ratio, ctrl_block_out_channels, time_embedding_mix, ctrl_optional_kwargs.') params_for_unet = ['sample_size', 'down_block_types', 'up_block_types', 'block_out_channels', 'norm_num_groups', 'cross_attention_dim', 'transformer_layers_per_block', 'addition_embed_type', 'addition_time_embed_dim', 'upcast_attention', 'use_linear_projection', 'time_cond_proj_dim', 'projection_class_embeddings_input_dim'] params_for_unet = {k: v for (k, v) in unet.config.items() if k in params_for_unet} params_for_unet['num_attention_heads'] = unet.config.attention_head_dim params_for_controlnet = ['conditioning_channels', 'conditioning_embedding_out_channels', 'conditioning_channel_order', 'learn_time_embedding', 'block_out_channels', 'num_attention_heads', 'max_norm_num_groups'] params_for_controlnet = {'ctrl_' + k: v for (k, v) in controlnet.config.items() if k in params_for_controlnet} params_for_controlnet['time_embedding_mix'] = controlnet.config.time_embedding_mix model = cls.from_config({**params_for_unet, **params_for_controlnet}) modules_from_unet = ['time_embedding', 'conv_in', 'conv_norm_out', 'conv_out'] for m in modules_from_unet: getattr(model, 'base_' + m).load_state_dict(getattr(unet, m).state_dict()) optional_modules_from_unet = ['add_time_proj', 'add_embedding'] for m in optional_modules_from_unet: if hasattr(unet, m) and getattr(unet, m) is not None: getattr(model, 'base_' + m).load_state_dict(getattr(unet, m).state_dict()) model.controlnet_cond_embedding.load_state_dict(controlnet.controlnet_cond_embedding.state_dict()) model.ctrl_conv_in.load_state_dict(controlnet.conv_in.state_dict()) if controlnet.time_embedding is not None: model.ctrl_time_embedding.load_state_dict(controlnet.time_embedding.state_dict()) model.control_to_base_for_conv_in.load_state_dict(controlnet.control_to_base_for_conv_in.state_dict()) model.down_blocks = nn.ModuleList((ControlNetXSCrossAttnDownBlock2D.from_modules(b, c) for (b, c) in zip(unet.down_blocks, controlnet.down_blocks))) model.mid_block = ControlNetXSCrossAttnMidBlock2D.from_modules(unet.mid_block, controlnet.mid_block) model.up_blocks = nn.ModuleList((ControlNetXSCrossAttnUpBlock2D.from_modules(b, c) for (b, c) in zip(unet.up_blocks, controlnet.up_connections))) model.to(unet.dtype) return model def freeze_unet_params(self) -> None: for param in self.parameters(): param.requires_grad = True base_parts = ['base_time_proj', 'base_time_embedding', 'base_add_time_proj', 'base_add_embedding', 'base_conv_in', 'base_conv_norm_out', 'base_conv_act', 'base_conv_out'] base_parts = [getattr(self, part) for part in base_parts if getattr(self, part) is not None] for part in base_parts: for param in part.parameters(): param.requires_grad = False for d in self.down_blocks: d.freeze_base_params() self.mid_block.freeze_base_params() for u in self.up_blocks: u.freeze_base_params() def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): for (i, upsample_block) in enumerate(self.up_blocks): setattr(upsample_block, 's1', s1) setattr(upsample_block, 's2', s2) setattr(upsample_block, 'b1', b1) setattr(upsample_block, 'b2', b2) def disable_freeu(self): freeu_keys = {'s1', 's2', 'b1', 'b2'} for (i, upsample_block) in enumerate(self.up_blocks): for k in freeu_keys: if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: setattr(upsample_block, k, None) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def forward(self, sample: Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, controlnet_cond: Optional[torch.Tensor]=None, conditioning_scale: Optional[float]=1.0, class_labels: Optional[torch.Tensor]=None, timestep_cond: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]]=None, return_dict: bool=True, apply_control: bool=True) -> Union[ControlNetXSOutput, Tuple]: if self.config.ctrl_conditioning_channel_order == 'bgr': controlnet_cond = torch.flip(controlnet_cond, dims=[1]) if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) timesteps = timestep if not torch.is_tensor(timesteps): is_mps = sample.device.type == 'mps' if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) timesteps = timesteps.expand(sample.shape[0]) t_emb = self.base_time_proj(timesteps) t_emb = t_emb.to(dtype=sample.dtype) if self.config.ctrl_learn_time_embedding and apply_control: ctrl_temb = self.ctrl_time_embedding(t_emb, timestep_cond) base_temb = self.base_time_embedding(t_emb, timestep_cond) interpolation_param = self.config.time_embedding_mix ** 0.3 temb = ctrl_temb * interpolation_param + base_temb * (1 - interpolation_param) else: temb = self.base_time_embedding(t_emb) aug_emb = None if self.config.addition_embed_type is None: pass elif self.config.addition_embed_type == 'text_time': if 'text_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`") text_embeds = added_cond_kwargs.get('text_embeds') if 'time_ids' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`") time_ids = added_cond_kwargs.get('time_ids') time_embeds = self.base_add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(temb.dtype) aug_emb = self.base_add_embedding(add_embeds) else: raise ValueError(f'ControlNet-XS currently only supports StableDiffusion and StableDiffusion-XL, so addition_embed_type = {self.config.addition_embed_type} is currently not supported.') temb = temb + aug_emb if aug_emb is not None else temb cemb = encoder_hidden_states h_ctrl = h_base = sample (hs_base, hs_ctrl) = ([], []) guided_hint = self.controlnet_cond_embedding(controlnet_cond) h_base = self.base_conv_in(h_base) h_ctrl = self.ctrl_conv_in(h_ctrl) if guided_hint is not None: h_ctrl += guided_hint if apply_control: h_base = h_base + self.control_to_base_for_conv_in(h_ctrl) * conditioning_scale hs_base.append(h_base) hs_ctrl.append(h_ctrl) for down in self.down_blocks: (h_base, h_ctrl, residual_hb, residual_hc) = down(hidden_states_base=h_base, hidden_states_ctrl=h_ctrl, temb=temb, encoder_hidden_states=cemb, conditioning_scale=conditioning_scale, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, apply_control=apply_control) hs_base.extend(residual_hb) hs_ctrl.extend(residual_hc) (h_base, h_ctrl) = self.mid_block(hidden_states_base=h_base, hidden_states_ctrl=h_ctrl, temb=temb, encoder_hidden_states=cemb, conditioning_scale=conditioning_scale, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, apply_control=apply_control) for up in self.up_blocks: n_resnets = len(up.resnets) skips_hb = hs_base[-n_resnets:] skips_hc = hs_ctrl[-n_resnets:] hs_base = hs_base[:-n_resnets] hs_ctrl = hs_ctrl[:-n_resnets] h_base = up(hidden_states=h_base, res_hidden_states_tuple_base=skips_hb, res_hidden_states_tuple_ctrl=skips_hc, temb=temb, encoder_hidden_states=cemb, conditioning_scale=conditioning_scale, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, apply_control=apply_control) h_base = self.base_conv_norm_out(h_base) h_base = self.base_conv_act(h_base) h_base = self.base_conv_out(h_base) if not return_dict: return (h_base,) return ControlNetXSOutput(sample=h_base) class ControlNetXSCrossAttnDownBlock2D(nn.Module): def __init__(self, base_in_channels: int, base_out_channels: int, ctrl_in_channels: int, ctrl_out_channels: int, temb_channels: int, norm_num_groups: int=32, ctrl_max_norm_num_groups: int=32, has_crossattn=True, transformer_layers_per_block: Optional[Union[int, Tuple[int]]]=1, base_num_attention_heads: Optional[int]=1, ctrl_num_attention_heads: Optional[int]=1, cross_attention_dim: Optional[int]=1024, add_downsample: bool=True, upcast_attention: Optional[bool]=False, use_linear_projection: Optional[bool]=True): super().__init__() base_resnets = [] base_attentions = [] ctrl_resnets = [] ctrl_attentions = [] ctrl_to_base = [] base_to_ctrl = [] num_layers = 2 if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): base_in_channels = base_in_channels if i == 0 else base_out_channels ctrl_in_channels = ctrl_in_channels if i == 0 else ctrl_out_channels base_to_ctrl.append(make_zero_conv(base_in_channels, base_in_channels)) base_resnets.append(ResnetBlock2D(in_channels=base_in_channels, out_channels=base_out_channels, temb_channels=temb_channels, groups=norm_num_groups)) ctrl_resnets.append(ResnetBlock2D(in_channels=ctrl_in_channels + base_in_channels, out_channels=ctrl_out_channels, temb_channels=temb_channels, groups=find_largest_factor(ctrl_in_channels + base_in_channels, max_factor=ctrl_max_norm_num_groups), groups_out=find_largest_factor(ctrl_out_channels, max_factor=ctrl_max_norm_num_groups), eps=1e-05)) if has_crossattn: base_attentions.append(Transformer2DModel(base_num_attention_heads, base_out_channels // base_num_attention_heads, in_channels=base_out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, norm_num_groups=norm_num_groups)) ctrl_attentions.append(Transformer2DModel(ctrl_num_attention_heads, ctrl_out_channels // ctrl_num_attention_heads, in_channels=ctrl_out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, norm_num_groups=find_largest_factor(ctrl_out_channels, max_factor=ctrl_max_norm_num_groups))) ctrl_to_base.append(make_zero_conv(ctrl_out_channels, base_out_channels)) if add_downsample: base_to_ctrl.append(make_zero_conv(base_out_channels, base_out_channels)) self.base_downsamplers = Downsample2D(base_out_channels, use_conv=True, out_channels=base_out_channels, name='op') self.ctrl_downsamplers = Downsample2D(ctrl_out_channels + base_out_channels, use_conv=True, out_channels=ctrl_out_channels, name='op') ctrl_to_base.append(make_zero_conv(ctrl_out_channels, base_out_channels)) else: self.base_downsamplers = None self.ctrl_downsamplers = None self.base_resnets = nn.ModuleList(base_resnets) self.ctrl_resnets = nn.ModuleList(ctrl_resnets) self.base_attentions = nn.ModuleList(base_attentions) if has_crossattn else [None] * num_layers self.ctrl_attentions = nn.ModuleList(ctrl_attentions) if has_crossattn else [None] * num_layers self.base_to_ctrl = nn.ModuleList(base_to_ctrl) self.ctrl_to_base = nn.ModuleList(ctrl_to_base) self.gradient_checkpointing = False @classmethod def from_modules(cls, base_downblock: CrossAttnDownBlock2D, ctrl_downblock: DownBlockControlNetXSAdapter): def get_first_cross_attention(block): return block.attentions[0].transformer_blocks[0].attn2 base_in_channels = base_downblock.resnets[0].in_channels base_out_channels = base_downblock.resnets[0].out_channels ctrl_in_channels = ctrl_downblock.resnets[0].in_channels - base_in_channels ctrl_out_channels = ctrl_downblock.resnets[0].out_channels temb_channels = base_downblock.resnets[0].time_emb_proj.in_features num_groups = base_downblock.resnets[0].norm1.num_groups ctrl_num_groups = ctrl_downblock.resnets[0].norm1.num_groups if hasattr(base_downblock, 'attentions'): has_crossattn = True transformer_layers_per_block = len(base_downblock.attentions[0].transformer_blocks) base_num_attention_heads = get_first_cross_attention(base_downblock).heads ctrl_num_attention_heads = get_first_cross_attention(ctrl_downblock).heads cross_attention_dim = get_first_cross_attention(base_downblock).cross_attention_dim upcast_attention = get_first_cross_attention(base_downblock).upcast_attention use_linear_projection = base_downblock.attentions[0].use_linear_projection else: has_crossattn = False transformer_layers_per_block = None base_num_attention_heads = None ctrl_num_attention_heads = None cross_attention_dim = None upcast_attention = None use_linear_projection = None add_downsample = base_downblock.downsamplers is not None model = cls(base_in_channels=base_in_channels, base_out_channels=base_out_channels, ctrl_in_channels=ctrl_in_channels, ctrl_out_channels=ctrl_out_channels, temb_channels=temb_channels, norm_num_groups=num_groups, ctrl_max_norm_num_groups=ctrl_num_groups, has_crossattn=has_crossattn, transformer_layers_per_block=transformer_layers_per_block, base_num_attention_heads=base_num_attention_heads, ctrl_num_attention_heads=ctrl_num_attention_heads, cross_attention_dim=cross_attention_dim, add_downsample=add_downsample, upcast_attention=upcast_attention, use_linear_projection=use_linear_projection) model.base_resnets.load_state_dict(base_downblock.resnets.state_dict()) model.ctrl_resnets.load_state_dict(ctrl_downblock.resnets.state_dict()) if has_crossattn: model.base_attentions.load_state_dict(base_downblock.attentions.state_dict()) model.ctrl_attentions.load_state_dict(ctrl_downblock.attentions.state_dict()) if add_downsample: model.base_downsamplers.load_state_dict(base_downblock.downsamplers[0].state_dict()) model.ctrl_downsamplers.load_state_dict(ctrl_downblock.downsamplers.state_dict()) model.base_to_ctrl.load_state_dict(ctrl_downblock.base_to_ctrl.state_dict()) model.ctrl_to_base.load_state_dict(ctrl_downblock.ctrl_to_base.state_dict()) return model def freeze_base_params(self) -> None: for param in self.parameters(): param.requires_grad = True base_parts = [self.base_resnets] if isinstance(self.base_attentions, nn.ModuleList): base_parts.append(self.base_attentions) if self.base_downsamplers is not None: base_parts.append(self.base_downsamplers) for part in base_parts: for param in part.parameters(): param.requires_grad = False def forward(self, hidden_states_base: Tensor, temb: Tensor, encoder_hidden_states: Optional[Tensor]=None, hidden_states_ctrl: Optional[Tensor]=None, conditioning_scale: Optional[float]=1.0, attention_mask: Optional[Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[Tensor]=None, apply_control: bool=True) -> Tuple[Tensor, Tensor, Tuple[Tensor, ...], Tuple[Tensor, ...]]: if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') h_base = hidden_states_base h_ctrl = hidden_states_ctrl base_output_states = () ctrl_output_states = () base_blocks = list(zip(self.base_resnets, self.base_attentions)) ctrl_blocks = list(zip(self.ctrl_resnets, self.ctrl_attentions)) def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward for ((b_res, b_attn), (c_res, c_attn), b2c, c2b) in zip(base_blocks, ctrl_blocks, self.base_to_ctrl, self.ctrl_to_base): if apply_control: h_ctrl = torch.cat([h_ctrl, b2c(h_base)], dim=1) if self.training and self.gradient_checkpointing: ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} h_base = torch.utils.checkpoint.checkpoint(create_custom_forward(b_res), h_base, temb, **ckpt_kwargs) else: h_base = b_res(h_base, temb) if b_attn is not None: h_base = b_attn(h_base, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] if apply_control: if self.training and self.gradient_checkpointing: ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} h_ctrl = torch.utils.checkpoint.checkpoint(create_custom_forward(c_res), h_ctrl, temb, **ckpt_kwargs) else: h_ctrl = c_res(h_ctrl, temb) if c_attn is not None: h_ctrl = c_attn(h_ctrl, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] if apply_control: h_base = h_base + c2b(h_ctrl) * conditioning_scale base_output_states = base_output_states + (h_base,) ctrl_output_states = ctrl_output_states + (h_ctrl,) if self.base_downsamplers is not None: b2c = self.base_to_ctrl[-1] c2b = self.ctrl_to_base[-1] if apply_control: h_ctrl = torch.cat([h_ctrl, b2c(h_base)], dim=1) h_base = self.base_downsamplers(h_base) if apply_control: h_ctrl = self.ctrl_downsamplers(h_ctrl) if apply_control: h_base = h_base + c2b(h_ctrl) * conditioning_scale base_output_states = base_output_states + (h_base,) ctrl_output_states = ctrl_output_states + (h_ctrl,) return (h_base, h_ctrl, base_output_states, ctrl_output_states) class ControlNetXSCrossAttnMidBlock2D(nn.Module): def __init__(self, base_channels: int, ctrl_channels: int, temb_channels: Optional[int]=None, norm_num_groups: int=32, ctrl_max_norm_num_groups: int=32, transformer_layers_per_block: int=1, base_num_attention_heads: Optional[int]=1, ctrl_num_attention_heads: Optional[int]=1, cross_attention_dim: Optional[int]=1024, upcast_attention: bool=False, use_linear_projection: Optional[bool]=True): super().__init__() self.base_to_ctrl = make_zero_conv(base_channels, base_channels) self.base_midblock = UNetMidBlock2DCrossAttn(transformer_layers_per_block=transformer_layers_per_block, in_channels=base_channels, temb_channels=temb_channels, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=base_num_attention_heads, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention) self.ctrl_midblock = UNetMidBlock2DCrossAttn(transformer_layers_per_block=transformer_layers_per_block, in_channels=ctrl_channels + base_channels, out_channels=ctrl_channels, temb_channels=temb_channels, resnet_groups=find_largest_factor(gcd(ctrl_channels, ctrl_channels + base_channels), ctrl_max_norm_num_groups), cross_attention_dim=cross_attention_dim, num_attention_heads=ctrl_num_attention_heads, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention) self.ctrl_to_base = make_zero_conv(ctrl_channels, base_channels) self.gradient_checkpointing = False @classmethod def from_modules(cls, base_midblock: UNetMidBlock2DCrossAttn, ctrl_midblock: MidBlockControlNetXSAdapter): base_to_ctrl = ctrl_midblock.base_to_ctrl ctrl_to_base = ctrl_midblock.ctrl_to_base ctrl_midblock = ctrl_midblock.midblock def get_first_cross_attention(midblock): return midblock.attentions[0].transformer_blocks[0].attn2 base_channels = ctrl_to_base.out_channels ctrl_channels = ctrl_to_base.in_channels transformer_layers_per_block = len(base_midblock.attentions[0].transformer_blocks) temb_channels = base_midblock.resnets[0].time_emb_proj.in_features num_groups = base_midblock.resnets[0].norm1.num_groups ctrl_num_groups = ctrl_midblock.resnets[0].norm1.num_groups base_num_attention_heads = get_first_cross_attention(base_midblock).heads ctrl_num_attention_heads = get_first_cross_attention(ctrl_midblock).heads cross_attention_dim = get_first_cross_attention(base_midblock).cross_attention_dim upcast_attention = get_first_cross_attention(base_midblock).upcast_attention use_linear_projection = base_midblock.attentions[0].use_linear_projection model = cls(base_channels=base_channels, ctrl_channels=ctrl_channels, temb_channels=temb_channels, norm_num_groups=num_groups, ctrl_max_norm_num_groups=ctrl_num_groups, transformer_layers_per_block=transformer_layers_per_block, base_num_attention_heads=base_num_attention_heads, ctrl_num_attention_heads=ctrl_num_attention_heads, cross_attention_dim=cross_attention_dim, upcast_attention=upcast_attention, use_linear_projection=use_linear_projection) model.base_to_ctrl.load_state_dict(base_to_ctrl.state_dict()) model.base_midblock.load_state_dict(base_midblock.state_dict()) model.ctrl_midblock.load_state_dict(ctrl_midblock.state_dict()) model.ctrl_to_base.load_state_dict(ctrl_to_base.state_dict()) return model def freeze_base_params(self) -> None: for param in self.parameters(): param.requires_grad = True for param in self.base_midblock.parameters(): param.requires_grad = False def forward(self, hidden_states_base: Tensor, temb: Tensor, encoder_hidden_states: Tensor, hidden_states_ctrl: Optional[Tensor]=None, conditioning_scale: Optional[float]=1.0, cross_attention_kwargs: Optional[Dict[str, Any]]=None, attention_mask: Optional[Tensor]=None, encoder_attention_mask: Optional[Tensor]=None, apply_control: bool=True) -> Tuple[Tensor, Tensor]: if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') h_base = hidden_states_base h_ctrl = hidden_states_ctrl joint_args = {'temb': temb, 'encoder_hidden_states': encoder_hidden_states, 'attention_mask': attention_mask, 'cross_attention_kwargs': cross_attention_kwargs, 'encoder_attention_mask': encoder_attention_mask} if apply_control: h_ctrl = torch.cat([h_ctrl, self.base_to_ctrl(h_base)], dim=1) h_base = self.base_midblock(h_base, **joint_args) if apply_control: h_ctrl = self.ctrl_midblock(h_ctrl, **joint_args) h_base = h_base + self.ctrl_to_base(h_ctrl) * conditioning_scale return (h_base, h_ctrl) class ControlNetXSCrossAttnUpBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, prev_output_channel: int, ctrl_skip_channels: List[int], temb_channels: int, norm_num_groups: int=32, resolution_idx: Optional[int]=None, has_crossattn=True, transformer_layers_per_block: int=1, num_attention_heads: int=1, cross_attention_dim: int=1024, add_upsample: bool=True, upcast_attention: bool=False, use_linear_projection: Optional[bool]=True): super().__init__() resnets = [] attentions = [] ctrl_to_base = [] num_layers = 3 self.has_cross_attention = has_crossattn self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels ctrl_to_base.append(make_zero_conv(ctrl_skip_channels[i], resnet_in_channels)) resnets.append(ResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, groups=norm_num_groups)) if has_crossattn: attentions.append(Transformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, norm_num_groups=norm_num_groups)) self.resnets = nn.ModuleList(resnets) self.attentions = nn.ModuleList(attentions) if has_crossattn else [None] * num_layers self.ctrl_to_base = nn.ModuleList(ctrl_to_base) if add_upsample: self.upsamplers = Upsample2D(out_channels, use_conv=True, out_channels=out_channels) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx @classmethod def from_modules(cls, base_upblock: CrossAttnUpBlock2D, ctrl_upblock: UpBlockControlNetXSAdapter): ctrl_to_base_skip_connections = ctrl_upblock.ctrl_to_base def get_first_cross_attention(block): return block.attentions[0].transformer_blocks[0].attn2 out_channels = base_upblock.resnets[0].out_channels in_channels = base_upblock.resnets[-1].in_channels - out_channels prev_output_channels = base_upblock.resnets[0].in_channels - out_channels ctrl_skip_channelss = [c.in_channels for c in ctrl_to_base_skip_connections] temb_channels = base_upblock.resnets[0].time_emb_proj.in_features num_groups = base_upblock.resnets[0].norm1.num_groups resolution_idx = base_upblock.resolution_idx if hasattr(base_upblock, 'attentions'): has_crossattn = True transformer_layers_per_block = len(base_upblock.attentions[0].transformer_blocks) num_attention_heads = get_first_cross_attention(base_upblock).heads cross_attention_dim = get_first_cross_attention(base_upblock).cross_attention_dim upcast_attention = get_first_cross_attention(base_upblock).upcast_attention use_linear_projection = base_upblock.attentions[0].use_linear_projection else: has_crossattn = False transformer_layers_per_block = None num_attention_heads = None cross_attention_dim = None upcast_attention = None use_linear_projection = None add_upsample = base_upblock.upsamplers is not None model = cls(in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channels, ctrl_skip_channels=ctrl_skip_channelss, temb_channels=temb_channels, norm_num_groups=num_groups, resolution_idx=resolution_idx, has_crossattn=has_crossattn, transformer_layers_per_block=transformer_layers_per_block, num_attention_heads=num_attention_heads, cross_attention_dim=cross_attention_dim, add_upsample=add_upsample, upcast_attention=upcast_attention, use_linear_projection=use_linear_projection) model.resnets.load_state_dict(base_upblock.resnets.state_dict()) if has_crossattn: model.attentions.load_state_dict(base_upblock.attentions.state_dict()) if add_upsample: model.upsamplers.load_state_dict(base_upblock.upsamplers[0].state_dict()) model.ctrl_to_base.load_state_dict(ctrl_to_base_skip_connections.state_dict()) return model def freeze_base_params(self) -> None: for param in self.parameters(): param.requires_grad = True base_parts = [self.resnets] if isinstance(self.attentions, nn.ModuleList): base_parts.append(self.attentions) if self.upsamplers is not None: base_parts.append(self.upsamplers) for part in base_parts: for param in part.parameters(): param.requires_grad = False def forward(self, hidden_states: Tensor, res_hidden_states_tuple_base: Tuple[Tensor, ...], res_hidden_states_tuple_ctrl: Tuple[Tensor, ...], temb: Tensor, encoder_hidden_states: Optional[Tensor]=None, conditioning_scale: Optional[float]=1.0, cross_attention_kwargs: Optional[Dict[str, Any]]=None, attention_mask: Optional[Tensor]=None, upsample_size: Optional[int]=None, encoder_attention_mask: Optional[Tensor]=None, apply_control: bool=True) -> Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') is_freeu_enabled = getattr(self, 's1', None) and getattr(self, 's2', None) and getattr(self, 'b1', None) and getattr(self, 'b2', None) def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward def maybe_apply_freeu_to_subblock(hidden_states, res_h_base): if is_freeu_enabled: return apply_freeu(self.resolution_idx, hidden_states, res_h_base, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2) else: return (hidden_states, res_h_base) for (resnet, attn, c2b, res_h_base, res_h_ctrl) in zip(self.resnets, self.attentions, self.ctrl_to_base, reversed(res_hidden_states_tuple_base), reversed(res_hidden_states_tuple_ctrl)): if apply_control: hidden_states += c2b(res_h_ctrl) * conditioning_scale (hidden_states, res_h_base) = maybe_apply_freeu_to_subblock(hidden_states, res_h_base) hidden_states = torch.cat([hidden_states, res_h_base], dim=1) if self.training and self.gradient_checkpointing: ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs) else: hidden_states = resnet(hidden_states, temb) if attn is not None: hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] if self.upsamplers is not None: hidden_states = self.upsamplers(hidden_states, upsample_size) return hidden_states def make_zero_conv(in_channels, out_channels=None): return zero_module(nn.Conv2d(in_channels, out_channels, 1, padding=0)) def zero_module(module): for p in module.parameters(): nn.init.zeros_(p) return module def find_largest_factor(number, max_factor): factor = max_factor if factor >= number: return number while factor != 0: residual = number % factor if residual == 0: return factor factor -= 1 # File: diffusers-main/src/diffusers/models/downsampling.py from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from ..utils import deprecate from .normalization import RMSNorm from .upsampling import upfirdn2d_native class Downsample1D(nn.Module): def __init__(self, channels: int, use_conv: bool=False, out_channels: Optional[int]=None, padding: int=1, name: str='conv'): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.padding = padding stride = 2 self.name = name if use_conv: self.conv = nn.Conv1d(self.channels, self.out_channels, 3, stride=stride, padding=padding) else: assert self.channels == self.out_channels self.conv = nn.AvgPool1d(kernel_size=stride, stride=stride) def forward(self, inputs: torch.Tensor) -> torch.Tensor: assert inputs.shape[1] == self.channels return self.conv(inputs) class Downsample2D(nn.Module): def __init__(self, channels: int, use_conv: bool=False, out_channels: Optional[int]=None, padding: int=1, name: str='conv', kernel_size=3, norm_type=None, eps=None, elementwise_affine=None, bias=True): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.padding = padding stride = 2 self.name = name if norm_type == 'ln_norm': self.norm = nn.LayerNorm(channels, eps, elementwise_affine) elif norm_type == 'rms_norm': self.norm = RMSNorm(channels, eps, elementwise_affine) elif norm_type is None: self.norm = None else: raise ValueError(f'unknown norm_type: {norm_type}') if use_conv: conv = nn.Conv2d(self.channels, self.out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias) else: assert self.channels == self.out_channels conv = nn.AvgPool2d(kernel_size=stride, stride=stride) if name == 'conv': self.Conv2d_0 = conv self.conv = conv elif name == 'Conv2d_0': self.conv = conv else: self.conv = conv def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) assert hidden_states.shape[1] == self.channels if self.norm is not None: hidden_states = self.norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) if self.use_conv and self.padding == 0: pad = (0, 1, 0, 1) hidden_states = F.pad(hidden_states, pad, mode='constant', value=0) assert hidden_states.shape[1] == self.channels hidden_states = self.conv(hidden_states) return hidden_states class FirDownsample2D(nn.Module): def __init__(self, channels: Optional[int]=None, out_channels: Optional[int]=None, use_conv: bool=False, fir_kernel: Tuple[int, int, int, int]=(1, 3, 3, 1)): super().__init__() out_channels = out_channels if out_channels else channels if use_conv: self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) self.fir_kernel = fir_kernel self.use_conv = use_conv self.out_channels = out_channels def _downsample_2d(self, hidden_states: torch.Tensor, weight: Optional[torch.Tensor]=None, kernel: Optional[torch.Tensor]=None, factor: int=2, gain: float=1) -> torch.Tensor: assert isinstance(factor, int) and factor >= 1 if kernel is None: kernel = [1] * factor kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * gain if self.use_conv: (_, _, convH, convW) = weight.shape pad_value = kernel.shape[0] - factor + (convW - 1) stride_value = [factor, factor] upfirdn_input = upfirdn2d_native(hidden_states, torch.tensor(kernel, device=hidden_states.device), pad=((pad_value + 1) // 2, pad_value // 2)) output = F.conv2d(upfirdn_input, weight, stride=stride_value, padding=0) else: pad_value = kernel.shape[0] - factor output = upfirdn2d_native(hidden_states, torch.tensor(kernel, device=hidden_states.device), down=factor, pad=((pad_value + 1) // 2, pad_value // 2)) return output def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if self.use_conv: downsample_input = self._downsample_2d(hidden_states, weight=self.Conv2d_0.weight, kernel=self.fir_kernel) hidden_states = downsample_input + self.Conv2d_0.bias.reshape(1, -1, 1, 1) else: hidden_states = self._downsample_2d(hidden_states, kernel=self.fir_kernel, factor=2) return hidden_states class KDownsample2D(nn.Module): def __init__(self, pad_mode: str='reflect'): super().__init__() self.pad_mode = pad_mode kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) self.pad = kernel_1d.shape[1] // 2 - 1 self.register_buffer('kernel', kernel_1d.T @ kernel_1d, persistent=False) def forward(self, inputs: torch.Tensor) -> torch.Tensor: inputs = F.pad(inputs, (self.pad,) * 4, self.pad_mode) weight = inputs.new_zeros([inputs.shape[1], inputs.shape[1], self.kernel.shape[0], self.kernel.shape[1]]) indices = torch.arange(inputs.shape[1], device=inputs.device) kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1) weight[indices, indices] = kernel return F.conv2d(inputs, weight, stride=2) class CogVideoXDownsample3D(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: int=3, stride: int=2, padding: int=0, compress_time: bool=False): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.compress_time = compress_time def forward(self, x: torch.Tensor) -> torch.Tensor: if self.compress_time: (batch_size, channels, frames, height, width) = x.shape x = x.permute(0, 3, 4, 1, 2).reshape(batch_size * height * width, channels, frames) if x.shape[-1] % 2 == 1: (x_first, x_rest) = (x[..., 0], x[..., 1:]) if x_rest.shape[-1] > 0: x_rest = F.avg_pool1d(x_rest, kernel_size=2, stride=2) x = torch.cat([x_first[..., None], x_rest], dim=-1) x = x.reshape(batch_size, height, width, channels, x.shape[-1]).permute(0, 3, 4, 1, 2) else: x = F.avg_pool1d(x, kernel_size=2, stride=2) x = x.reshape(batch_size, height, width, channels, x.shape[-1]).permute(0, 3, 4, 1, 2) pad = (0, 1, 0, 1) x = F.pad(x, pad, mode='constant', value=0) (batch_size, channels, frames, height, width) = x.shape x = x.permute(0, 2, 1, 3, 4).reshape(batch_size * frames, channels, height, width) x = self.conv(x) x = x.reshape(batch_size, frames, x.shape[1], x.shape[2], x.shape[3]).permute(0, 2, 1, 3, 4) return x def downsample_2d(hidden_states: torch.Tensor, kernel: Optional[torch.Tensor]=None, factor: int=2, gain: float=1) -> torch.Tensor: assert isinstance(factor, int) and factor >= 1 if kernel is None: kernel = [1] * factor kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * gain pad_value = kernel.shape[0] - factor output = upfirdn2d_native(hidden_states, kernel.to(device=hidden_states.device), down=factor, pad=((pad_value + 1) // 2, pad_value // 2)) return output # File: diffusers-main/src/diffusers/models/embeddings.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F from torch import nn from ..utils import deprecate from .activations import FP32SiLU, get_activation from .attention_processor import Attention def get_timestep_embedding(timesteps: torch.Tensor, embedding_dim: int, flip_sin_to_cos: bool=False, downscale_freq_shift: float=1, scale: float=1, max_period: int=10000): assert len(timesteps.shape) == 1, 'Timesteps should be a 1d-array' half_dim = embedding_dim // 2 exponent = -math.log(max_period) * torch.arange(start=0, end=half_dim, dtype=torch.float32, device=timesteps.device) exponent = exponent / (half_dim - downscale_freq_shift) emb = torch.exp(exponent) emb = timesteps[:, None].float() * emb[None, :] emb = scale * emb emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) if flip_sin_to_cos: emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) return emb def get_3d_sincos_pos_embed(embed_dim: int, spatial_size: Union[int, Tuple[int, int]], temporal_size: int, spatial_interpolation_scale: float=1.0, temporal_interpolation_scale: float=1.0) -> np.ndarray: if embed_dim % 4 != 0: raise ValueError('`embed_dim` must be divisible by 4') if isinstance(spatial_size, int): spatial_size = (spatial_size, spatial_size) embed_dim_spatial = 3 * embed_dim // 4 embed_dim_temporal = embed_dim // 4 grid_h = np.arange(spatial_size[1], dtype=np.float32) / spatial_interpolation_scale grid_w = np.arange(spatial_size[0], dtype=np.float32) / spatial_interpolation_scale grid = np.meshgrid(grid_w, grid_h) grid = np.stack(grid, axis=0) grid = grid.reshape([2, 1, spatial_size[1], spatial_size[0]]) pos_embed_spatial = get_2d_sincos_pos_embed_from_grid(embed_dim_spatial, grid) grid_t = np.arange(temporal_size, dtype=np.float32) / temporal_interpolation_scale pos_embed_temporal = get_1d_sincos_pos_embed_from_grid(embed_dim_temporal, grid_t) pos_embed_spatial = pos_embed_spatial[np.newaxis, :, :] pos_embed_spatial = np.repeat(pos_embed_spatial, temporal_size, axis=0) pos_embed_temporal = pos_embed_temporal[:, np.newaxis, :] pos_embed_temporal = np.repeat(pos_embed_temporal, spatial_size[0] * spatial_size[1], axis=1) pos_embed = np.concatenate([pos_embed_temporal, pos_embed_spatial], axis=-1) return pos_embed def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0, interpolation_scale=1.0, base_size=16): if isinstance(grid_size, int): grid_size = (grid_size, grid_size) grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0] / base_size) / interpolation_scale grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1] / base_size) / interpolation_scale grid = np.meshgrid(grid_w, grid_h) grid = np.stack(grid, axis=0) grid = grid.reshape([2, 1, grid_size[1], grid_size[0]]) pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) if cls_token and extra_tokens > 0: pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0) return pos_embed def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): if embed_dim % 2 != 0: raise ValueError('embed_dim must be divisible by 2') emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) emb = np.concatenate([emb_h, emb_w], axis=1) return emb def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): if embed_dim % 2 != 0: raise ValueError('embed_dim must be divisible by 2') omega = np.arange(embed_dim // 2, dtype=np.float64) omega /= embed_dim / 2.0 omega = 1.0 / 10000 ** omega pos = pos.reshape(-1) out = np.einsum('m,d->md', pos, omega) emb_sin = np.sin(out) emb_cos = np.cos(out) emb = np.concatenate([emb_sin, emb_cos], axis=1) return emb class PatchEmbed(nn.Module): def __init__(self, height=224, width=224, patch_size=16, in_channels=3, embed_dim=768, layer_norm=False, flatten=True, bias=True, interpolation_scale=1, pos_embed_type='sincos', pos_embed_max_size=None): super().__init__() num_patches = height // patch_size * (width // patch_size) self.flatten = flatten self.layer_norm = layer_norm self.pos_embed_max_size = pos_embed_max_size self.proj = nn.Conv2d(in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias) if layer_norm: self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-06) else: self.norm = None self.patch_size = patch_size (self.height, self.width) = (height // patch_size, width // patch_size) self.base_size = height // patch_size self.interpolation_scale = interpolation_scale if pos_embed_max_size: grid_size = pos_embed_max_size else: grid_size = int(num_patches ** 0.5) if pos_embed_type is None: self.pos_embed = None elif pos_embed_type == 'sincos': pos_embed = get_2d_sincos_pos_embed(embed_dim, grid_size, base_size=self.base_size, interpolation_scale=self.interpolation_scale) persistent = True if pos_embed_max_size else False self.register_buffer('pos_embed', torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=persistent) else: raise ValueError(f'Unsupported pos_embed_type: {pos_embed_type}') def cropped_pos_embed(self, height, width): if self.pos_embed_max_size is None: raise ValueError('`pos_embed_max_size` must be set for cropping.') height = height // self.patch_size width = width // self.patch_size if height > self.pos_embed_max_size: raise ValueError(f'Height ({height}) cannot be greater than `pos_embed_max_size`: {self.pos_embed_max_size}.') if width > self.pos_embed_max_size: raise ValueError(f'Width ({width}) cannot be greater than `pos_embed_max_size`: {self.pos_embed_max_size}.') top = (self.pos_embed_max_size - height) // 2 left = (self.pos_embed_max_size - width) // 2 spatial_pos_embed = self.pos_embed.reshape(1, self.pos_embed_max_size, self.pos_embed_max_size, -1) spatial_pos_embed = spatial_pos_embed[:, top:top + height, left:left + width, :] spatial_pos_embed = spatial_pos_embed.reshape(1, -1, spatial_pos_embed.shape[-1]) return spatial_pos_embed def forward(self, latent): if self.pos_embed_max_size is not None: (height, width) = latent.shape[-2:] else: (height, width) = (latent.shape[-2] // self.patch_size, latent.shape[-1] // self.patch_size) latent = self.proj(latent) if self.flatten: latent = latent.flatten(2).transpose(1, 2) if self.layer_norm: latent = self.norm(latent) if self.pos_embed is None: return latent.to(latent.dtype) if self.pos_embed_max_size: pos_embed = self.cropped_pos_embed(height, width) elif self.height != height or self.width != width: pos_embed = get_2d_sincos_pos_embed(embed_dim=self.pos_embed.shape[-1], grid_size=(height, width), base_size=self.base_size, interpolation_scale=self.interpolation_scale) pos_embed = torch.from_numpy(pos_embed).float().unsqueeze(0).to(latent.device) else: pos_embed = self.pos_embed return (latent + pos_embed).to(latent.dtype) class LuminaPatchEmbed(nn.Module): def __init__(self, patch_size=2, in_channels=4, embed_dim=768, bias=True): super().__init__() self.patch_size = patch_size self.proj = nn.Linear(in_features=patch_size * patch_size * in_channels, out_features=embed_dim, bias=bias) def forward(self, x, freqs_cis): freqs_cis = freqs_cis.to(x[0].device) patch_height = patch_width = self.patch_size (batch_size, channel, height, width) = x.size() (height_tokens, width_tokens) = (height // patch_height, width // patch_width) x = x.view(batch_size, channel, height_tokens, patch_height, width_tokens, patch_width).permute(0, 2, 4, 1, 3, 5) x = x.flatten(3) x = self.proj(x) x = x.flatten(1, 2) mask = torch.ones(x.shape[0], x.shape[1], dtype=torch.int32, device=x.device) return (x, mask, [(height, width)] * batch_size, freqs_cis[:height_tokens, :width_tokens].flatten(0, 1).unsqueeze(0)) class CogVideoXPatchEmbed(nn.Module): def __init__(self, patch_size: int=2, in_channels: int=16, embed_dim: int=1920, text_embed_dim: int=4096, bias: bool=True, sample_width: int=90, sample_height: int=60, sample_frames: int=49, temporal_compression_ratio: int=4, max_text_seq_length: int=226, spatial_interpolation_scale: float=1.875, temporal_interpolation_scale: float=1.0, use_positional_embeddings: bool=True) -> None: super().__init__() self.patch_size = patch_size self.embed_dim = embed_dim self.sample_height = sample_height self.sample_width = sample_width self.sample_frames = sample_frames self.temporal_compression_ratio = temporal_compression_ratio self.max_text_seq_length = max_text_seq_length self.spatial_interpolation_scale = spatial_interpolation_scale self.temporal_interpolation_scale = temporal_interpolation_scale self.use_positional_embeddings = use_positional_embeddings self.proj = nn.Conv2d(in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias) self.text_proj = nn.Linear(text_embed_dim, embed_dim) if use_positional_embeddings: pos_embedding = self._get_positional_embeddings(sample_height, sample_width, sample_frames) self.register_buffer('pos_embedding', pos_embedding, persistent=False) def _get_positional_embeddings(self, sample_height: int, sample_width: int, sample_frames: int) -> torch.Tensor: post_patch_height = sample_height // self.patch_size post_patch_width = sample_width // self.patch_size post_time_compression_frames = (sample_frames - 1) // self.temporal_compression_ratio + 1 num_patches = post_patch_height * post_patch_width * post_time_compression_frames pos_embedding = get_3d_sincos_pos_embed(self.embed_dim, (post_patch_width, post_patch_height), post_time_compression_frames, self.spatial_interpolation_scale, self.temporal_interpolation_scale) pos_embedding = torch.from_numpy(pos_embedding).flatten(0, 1) joint_pos_embedding = torch.zeros(1, self.max_text_seq_length + num_patches, self.embed_dim, requires_grad=False) joint_pos_embedding.data[:, self.max_text_seq_length:].copy_(pos_embedding) return joint_pos_embedding def forward(self, text_embeds: torch.Tensor, image_embeds: torch.Tensor): text_embeds = self.text_proj(text_embeds) (batch, num_frames, channels, height, width) = image_embeds.shape image_embeds = image_embeds.reshape(-1, channels, height, width) image_embeds = self.proj(image_embeds) image_embeds = image_embeds.view(batch, num_frames, *image_embeds.shape[1:]) image_embeds = image_embeds.flatten(3).transpose(2, 3) image_embeds = image_embeds.flatten(1, 2) embeds = torch.cat([text_embeds, image_embeds], dim=1).contiguous() if self.use_positional_embeddings: pre_time_compression_frames = (num_frames - 1) * self.temporal_compression_ratio + 1 if self.sample_height != height or self.sample_width != width or self.sample_frames != pre_time_compression_frames: pos_embedding = self._get_positional_embeddings(height, width, pre_time_compression_frames) pos_embedding = pos_embedding.to(embeds.device, dtype=embeds.dtype) else: pos_embedding = self.pos_embedding embeds = embeds + pos_embedding return embeds def get_3d_rotary_pos_embed(embed_dim, crops_coords, grid_size, temporal_size, theta: int=10000, use_real: bool=True) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: if use_real is not True: raise ValueError(' `use_real = False` is not currently supported for get_3d_rotary_pos_embed') (start, stop) = crops_coords (grid_size_h, grid_size_w) = grid_size grid_h = np.linspace(start[0], stop[0], grid_size_h, endpoint=False, dtype=np.float32) grid_w = np.linspace(start[1], stop[1], grid_size_w, endpoint=False, dtype=np.float32) grid_t = np.linspace(0, temporal_size, temporal_size, endpoint=False, dtype=np.float32) dim_t = embed_dim // 4 dim_h = embed_dim // 8 * 3 dim_w = embed_dim // 8 * 3 freqs_t = get_1d_rotary_pos_embed(dim_t, grid_t, use_real=True) freqs_h = get_1d_rotary_pos_embed(dim_h, grid_h, use_real=True) freqs_w = get_1d_rotary_pos_embed(dim_w, grid_w, use_real=True) def combine_time_height_width(freqs_t, freqs_h, freqs_w): freqs_t = freqs_t[:, None, None, :].expand(-1, grid_size_h, grid_size_w, -1) freqs_h = freqs_h[None, :, None, :].expand(temporal_size, -1, grid_size_w, -1) freqs_w = freqs_w[None, None, :, :].expand(temporal_size, grid_size_h, -1, -1) freqs = torch.cat([freqs_t, freqs_h, freqs_w], dim=-1) freqs = freqs.view(temporal_size * grid_size_h * grid_size_w, -1) return freqs (t_cos, t_sin) = freqs_t (h_cos, h_sin) = freqs_h (w_cos, w_sin) = freqs_w cos = combine_time_height_width(t_cos, h_cos, w_cos) sin = combine_time_height_width(t_sin, h_sin, w_sin) return (cos, sin) def get_2d_rotary_pos_embed(embed_dim, crops_coords, grid_size, use_real=True): (start, stop) = crops_coords grid_h = np.linspace(start[0], stop[0], grid_size[0], endpoint=False, dtype=np.float32) grid_w = np.linspace(start[1], stop[1], grid_size[1], endpoint=False, dtype=np.float32) grid = np.meshgrid(grid_w, grid_h) grid = np.stack(grid, axis=0) grid = grid.reshape([2, 1, *grid.shape[1:]]) pos_embed = get_2d_rotary_pos_embed_from_grid(embed_dim, grid, use_real=use_real) return pos_embed def get_2d_rotary_pos_embed_from_grid(embed_dim, grid, use_real=False): assert embed_dim % 4 == 0 emb_h = get_1d_rotary_pos_embed(embed_dim // 2, grid[0].reshape(-1), use_real=use_real) emb_w = get_1d_rotary_pos_embed(embed_dim // 2, grid[1].reshape(-1), use_real=use_real) if use_real: cos = torch.cat([emb_h[0], emb_w[0]], dim=1) sin = torch.cat([emb_h[1], emb_w[1]], dim=1) return (cos, sin) else: emb = torch.cat([emb_h, emb_w], dim=1) return emb def get_2d_rotary_pos_embed_lumina(embed_dim, len_h, len_w, linear_factor=1.0, ntk_factor=1.0): assert embed_dim % 4 == 0 emb_h = get_1d_rotary_pos_embed(embed_dim // 2, len_h, linear_factor=linear_factor, ntk_factor=ntk_factor) emb_w = get_1d_rotary_pos_embed(embed_dim // 2, len_w, linear_factor=linear_factor, ntk_factor=ntk_factor) emb_h = emb_h.view(len_h, 1, embed_dim // 4, 1).repeat(1, len_w, 1, 1) emb_w = emb_w.view(1, len_w, embed_dim // 4, 1).repeat(len_h, 1, 1, 1) emb = torch.cat([emb_h, emb_w], dim=-1).flatten(2) return emb def get_1d_rotary_pos_embed(dim: int, pos: Union[np.ndarray, int], theta: float=10000.0, use_real=False, linear_factor=1.0, ntk_factor=1.0, repeat_interleave_real=True, freqs_dtype=torch.float32): assert dim % 2 == 0 if isinstance(pos, int): pos = torch.arange(pos) if isinstance(pos, np.ndarray): pos = torch.from_numpy(pos) theta = theta * ntk_factor freqs = 1.0 / theta ** (torch.arange(0, dim, 2, dtype=freqs_dtype, device=pos.device)[:dim // 2] / dim) / linear_factor freqs = torch.outer(pos, freqs) if use_real and repeat_interleave_real: freqs_cos = freqs.cos().repeat_interleave(2, dim=1).float() freqs_sin = freqs.sin().repeat_interleave(2, dim=1).float() return (freqs_cos, freqs_sin) elif use_real: freqs_cos = torch.cat([freqs.cos(), freqs.cos()], dim=-1).float() freqs_sin = torch.cat([freqs.sin(), freqs.sin()], dim=-1).float() return (freqs_cos, freqs_sin) else: freqs_cis = torch.polar(torch.ones_like(freqs), freqs) return freqs_cis def apply_rotary_emb(x: torch.Tensor, freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]], use_real: bool=True, use_real_unbind_dim: int=-1) -> Tuple[torch.Tensor, torch.Tensor]: if use_real: (cos, sin) = freqs_cis cos = cos[None, None] sin = sin[None, None] (cos, sin) = (cos.to(x.device), sin.to(x.device)) if use_real_unbind_dim == -1: (x_real, x_imag) = x.reshape(*x.shape[:-1], -1, 2).unbind(-1) x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3) elif use_real_unbind_dim == -2: (x_real, x_imag) = x.reshape(*x.shape[:-1], 2, -1).unbind(-2) x_rotated = torch.cat([-x_imag, x_real], dim=-1) else: raise ValueError(f'`use_real_unbind_dim={use_real_unbind_dim}` but should be -1 or -2.') out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype) return out else: x_rotated = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2)) freqs_cis = freqs_cis.unsqueeze(2) x_out = torch.view_as_real(x_rotated * freqs_cis).flatten(3) return x_out.type_as(x) class FluxPosEmbed(nn.Module): def __init__(self, theta: int, axes_dim: List[int]): super().__init__() self.theta = theta self.axes_dim = axes_dim def forward(self, ids: torch.Tensor) -> torch.Tensor: n_axes = ids.shape[-1] cos_out = [] sin_out = [] pos = ids.float() is_mps = ids.device.type == 'mps' freqs_dtype = torch.float32 if is_mps else torch.float64 for i in range(n_axes): (cos, sin) = get_1d_rotary_pos_embed(self.axes_dim[i], pos[:, i], repeat_interleave_real=True, use_real=True, freqs_dtype=freqs_dtype) cos_out.append(cos) sin_out.append(sin) freqs_cos = torch.cat(cos_out, dim=-1).to(ids.device) freqs_sin = torch.cat(sin_out, dim=-1).to(ids.device) return (freqs_cos, freqs_sin) class TimestepEmbedding(nn.Module): def __init__(self, in_channels: int, time_embed_dim: int, act_fn: str='silu', out_dim: int=None, post_act_fn: Optional[str]=None, cond_proj_dim=None, sample_proj_bias=True): super().__init__() self.linear_1 = nn.Linear(in_channels, time_embed_dim, sample_proj_bias) if cond_proj_dim is not None: self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False) else: self.cond_proj = None self.act = get_activation(act_fn) if out_dim is not None: time_embed_dim_out = out_dim else: time_embed_dim_out = time_embed_dim self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out, sample_proj_bias) if post_act_fn is None: self.post_act = None else: self.post_act = get_activation(post_act_fn) def forward(self, sample, condition=None): if condition is not None: sample = sample + self.cond_proj(condition) sample = self.linear_1(sample) if self.act is not None: sample = self.act(sample) sample = self.linear_2(sample) if self.post_act is not None: sample = self.post_act(sample) return sample class Timesteps(nn.Module): def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float, scale: int=1): super().__init__() self.num_channels = num_channels self.flip_sin_to_cos = flip_sin_to_cos self.downscale_freq_shift = downscale_freq_shift self.scale = scale def forward(self, timesteps): t_emb = get_timestep_embedding(timesteps, self.num_channels, flip_sin_to_cos=self.flip_sin_to_cos, downscale_freq_shift=self.downscale_freq_shift, scale=self.scale) return t_emb class GaussianFourierProjection(nn.Module): def __init__(self, embedding_size: int=256, scale: float=1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False): super().__init__() self.weight = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) self.log = log self.flip_sin_to_cos = flip_sin_to_cos if set_W_to_weight: del self.weight self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) self.weight = self.W del self.W def forward(self, x): if self.log: x = torch.log(x) x_proj = x[:, None] * self.weight[None, :] * 2 * np.pi if self.flip_sin_to_cos: out = torch.cat([torch.cos(x_proj), torch.sin(x_proj)], dim=-1) else: out = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1) return out class SinusoidalPositionalEmbedding(nn.Module): def __init__(self, embed_dim: int, max_seq_length: int=32): super().__init__() position = torch.arange(max_seq_length).unsqueeze(1) div_term = torch.exp(torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim)) pe = torch.zeros(1, max_seq_length, embed_dim) pe[0, :, 0::2] = torch.sin(position * div_term) pe[0, :, 1::2] = torch.cos(position * div_term) self.register_buffer('pe', pe) def forward(self, x): (_, seq_length, _) = x.shape x = x + self.pe[:, :seq_length] return x class ImagePositionalEmbeddings(nn.Module): def __init__(self, num_embed: int, height: int, width: int, embed_dim: int): super().__init__() self.height = height self.width = width self.num_embed = num_embed self.embed_dim = embed_dim self.emb = nn.Embedding(self.num_embed, embed_dim) self.height_emb = nn.Embedding(self.height, embed_dim) self.width_emb = nn.Embedding(self.width, embed_dim) def forward(self, index): emb = self.emb(index) height_emb = self.height_emb(torch.arange(self.height, device=index.device).view(1, self.height)) height_emb = height_emb.unsqueeze(2) width_emb = self.width_emb(torch.arange(self.width, device=index.device).view(1, self.width)) width_emb = width_emb.unsqueeze(1) pos_emb = height_emb + width_emb pos_emb = pos_emb.view(1, self.height * self.width, -1) emb = emb + pos_emb[:, :emb.shape[1], :] return emb class LabelEmbedding(nn.Module): def __init__(self, num_classes, hidden_size, dropout_prob): super().__init__() use_cfg_embedding = dropout_prob > 0 self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size) self.num_classes = num_classes self.dropout_prob = dropout_prob def token_drop(self, labels, force_drop_ids=None): if force_drop_ids is None: drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob else: drop_ids = torch.tensor(force_drop_ids == 1) labels = torch.where(drop_ids, self.num_classes, labels) return labels def forward(self, labels: torch.LongTensor, force_drop_ids=None): use_dropout = self.dropout_prob > 0 if self.training and use_dropout or force_drop_ids is not None: labels = self.token_drop(labels, force_drop_ids) embeddings = self.embedding_table(labels) return embeddings class TextImageProjection(nn.Module): def __init__(self, text_embed_dim: int=1024, image_embed_dim: int=768, cross_attention_dim: int=768, num_image_text_embeds: int=10): super().__init__() self.num_image_text_embeds = num_image_text_embeds self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim) self.text_proj = nn.Linear(text_embed_dim, cross_attention_dim) def forward(self, text_embeds: torch.Tensor, image_embeds: torch.Tensor): batch_size = text_embeds.shape[0] image_text_embeds = self.image_embeds(image_embeds) image_text_embeds = image_text_embeds.reshape(batch_size, self.num_image_text_embeds, -1) text_embeds = self.text_proj(text_embeds) return torch.cat([image_text_embeds, text_embeds], dim=1) class ImageProjection(nn.Module): def __init__(self, image_embed_dim: int=768, cross_attention_dim: int=768, num_image_text_embeds: int=32): super().__init__() self.num_image_text_embeds = num_image_text_embeds self.image_embeds = nn.Linear(image_embed_dim, self.num_image_text_embeds * cross_attention_dim) self.norm = nn.LayerNorm(cross_attention_dim) def forward(self, image_embeds: torch.Tensor): batch_size = image_embeds.shape[0] image_embeds = self.image_embeds(image_embeds) image_embeds = image_embeds.reshape(batch_size, self.num_image_text_embeds, -1) image_embeds = self.norm(image_embeds) return image_embeds class IPAdapterFullImageProjection(nn.Module): def __init__(self, image_embed_dim=1024, cross_attention_dim=1024): super().__init__() from .attention import FeedForward self.ff = FeedForward(image_embed_dim, cross_attention_dim, mult=1, activation_fn='gelu') self.norm = nn.LayerNorm(cross_attention_dim) def forward(self, image_embeds: torch.Tensor): return self.norm(self.ff(image_embeds)) class IPAdapterFaceIDImageProjection(nn.Module): def __init__(self, image_embed_dim=1024, cross_attention_dim=1024, mult=1, num_tokens=1): super().__init__() from .attention import FeedForward self.num_tokens = num_tokens self.cross_attention_dim = cross_attention_dim self.ff = FeedForward(image_embed_dim, cross_attention_dim * num_tokens, mult=mult, activation_fn='gelu') self.norm = nn.LayerNorm(cross_attention_dim) def forward(self, image_embeds: torch.Tensor): x = self.ff(image_embeds) x = x.reshape(-1, self.num_tokens, self.cross_attention_dim) return self.norm(x) class CombinedTimestepLabelEmbeddings(nn.Module): def __init__(self, num_classes, embedding_dim, class_dropout_prob=0.1): super().__init__() self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=1) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.class_embedder = LabelEmbedding(num_classes, embedding_dim, class_dropout_prob) def forward(self, timestep, class_labels, hidden_dtype=None): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) class_labels = self.class_embedder(class_labels) conditioning = timesteps_emb + class_labels return conditioning class CombinedTimestepTextProjEmbeddings(nn.Module): def __init__(self, embedding_dim, pooled_projection_dim): super().__init__() self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.text_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn='silu') def forward(self, timestep, pooled_projection): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) pooled_projections = self.text_embedder(pooled_projection) conditioning = timesteps_emb + pooled_projections return conditioning class CombinedTimestepGuidanceTextProjEmbeddings(nn.Module): def __init__(self, embedding_dim, pooled_projection_dim): super().__init__() self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.guidance_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.text_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn='silu') def forward(self, timestep, guidance, pooled_projection): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) guidance_proj = self.time_proj(guidance) guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) time_guidance_emb = timesteps_emb + guidance_emb pooled_projections = self.text_embedder(pooled_projection) conditioning = time_guidance_emb + pooled_projections return conditioning class HunyuanDiTAttentionPool(nn.Module): def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int=None): super().__init__() self.positional_embedding = nn.Parameter(torch.randn(spacial_dim + 1, embed_dim) / embed_dim ** 0.5) self.k_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) self.num_heads = num_heads def forward(self, x): x = x.permute(1, 0, 2) x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) x = x + self.positional_embedding[:, None, :].to(x.dtype) (x, _) = F.multi_head_attention_forward(query=x[:1], key=x, value=x, embed_dim_to_check=x.shape[-1], num_heads=self.num_heads, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None, in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn=False, dropout_p=0, out_proj_weight=self.c_proj.weight, out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True, training=self.training, need_weights=False) return x.squeeze(0) class HunyuanCombinedTimestepTextSizeStyleEmbedding(nn.Module): def __init__(self, embedding_dim, pooled_projection_dim=1024, seq_len=256, cross_attention_dim=2048, use_style_cond_and_image_meta_size=True): super().__init__() self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.size_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.pooler = HunyuanDiTAttentionPool(seq_len, cross_attention_dim, num_heads=8, output_dim=pooled_projection_dim) self.use_style_cond_and_image_meta_size = use_style_cond_and_image_meta_size if use_style_cond_and_image_meta_size: self.style_embedder = nn.Embedding(1, embedding_dim) extra_in_dim = 256 * 6 + embedding_dim + pooled_projection_dim else: extra_in_dim = pooled_projection_dim self.extra_embedder = PixArtAlphaTextProjection(in_features=extra_in_dim, hidden_size=embedding_dim * 4, out_features=embedding_dim, act_fn='silu_fp32') def forward(self, timestep, encoder_hidden_states, image_meta_size, style, hidden_dtype=None): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) pooled_projections = self.pooler(encoder_hidden_states) if self.use_style_cond_and_image_meta_size: image_meta_size = self.size_proj(image_meta_size.view(-1)) image_meta_size = image_meta_size.to(dtype=hidden_dtype) image_meta_size = image_meta_size.view(-1, 6 * 256) style_embedding = self.style_embedder(style) extra_cond = torch.cat([pooled_projections, image_meta_size, style_embedding], dim=1) else: extra_cond = torch.cat([pooled_projections], dim=1) conditioning = timesteps_emb + self.extra_embedder(extra_cond) return conditioning class LuminaCombinedTimestepCaptionEmbedding(nn.Module): def __init__(self, hidden_size=4096, cross_attention_dim=2048, frequency_embedding_size=256): super().__init__() self.time_proj = Timesteps(num_channels=frequency_embedding_size, flip_sin_to_cos=True, downscale_freq_shift=0.0) self.timestep_embedder = TimestepEmbedding(in_channels=frequency_embedding_size, time_embed_dim=hidden_size) self.caption_embedder = nn.Sequential(nn.LayerNorm(cross_attention_dim), nn.Linear(cross_attention_dim, hidden_size, bias=True)) def forward(self, timestep, caption_feat, caption_mask): time_freq = self.time_proj(timestep) time_embed = self.timestep_embedder(time_freq.to(dtype=self.timestep_embedder.linear_1.weight.dtype)) caption_mask_float = caption_mask.float().unsqueeze(-1) caption_feats_pool = (caption_feat * caption_mask_float).sum(dim=1) / caption_mask_float.sum(dim=1) caption_feats_pool = caption_feats_pool.to(caption_feat) caption_embed = self.caption_embedder(caption_feats_pool) conditioning = time_embed + caption_embed return conditioning class TextTimeEmbedding(nn.Module): def __init__(self, encoder_dim: int, time_embed_dim: int, num_heads: int=64): super().__init__() self.norm1 = nn.LayerNorm(encoder_dim) self.pool = AttentionPooling(num_heads, encoder_dim) self.proj = nn.Linear(encoder_dim, time_embed_dim) self.norm2 = nn.LayerNorm(time_embed_dim) def forward(self, hidden_states): hidden_states = self.norm1(hidden_states) hidden_states = self.pool(hidden_states) hidden_states = self.proj(hidden_states) hidden_states = self.norm2(hidden_states) return hidden_states class TextImageTimeEmbedding(nn.Module): def __init__(self, text_embed_dim: int=768, image_embed_dim: int=768, time_embed_dim: int=1536): super().__init__() self.text_proj = nn.Linear(text_embed_dim, time_embed_dim) self.text_norm = nn.LayerNorm(time_embed_dim) self.image_proj = nn.Linear(image_embed_dim, time_embed_dim) def forward(self, text_embeds: torch.Tensor, image_embeds: torch.Tensor): time_text_embeds = self.text_proj(text_embeds) time_text_embeds = self.text_norm(time_text_embeds) time_image_embeds = self.image_proj(image_embeds) return time_image_embeds + time_text_embeds class ImageTimeEmbedding(nn.Module): def __init__(self, image_embed_dim: int=768, time_embed_dim: int=1536): super().__init__() self.image_proj = nn.Linear(image_embed_dim, time_embed_dim) self.image_norm = nn.LayerNorm(time_embed_dim) def forward(self, image_embeds: torch.Tensor): time_image_embeds = self.image_proj(image_embeds) time_image_embeds = self.image_norm(time_image_embeds) return time_image_embeds class ImageHintTimeEmbedding(nn.Module): def __init__(self, image_embed_dim: int=768, time_embed_dim: int=1536): super().__init__() self.image_proj = nn.Linear(image_embed_dim, time_embed_dim) self.image_norm = nn.LayerNorm(time_embed_dim) self.input_hint_block = nn.Sequential(nn.Conv2d(3, 16, 3, padding=1), nn.SiLU(), nn.Conv2d(16, 16, 3, padding=1), nn.SiLU(), nn.Conv2d(16, 32, 3, padding=1, stride=2), nn.SiLU(), nn.Conv2d(32, 32, 3, padding=1), nn.SiLU(), nn.Conv2d(32, 96, 3, padding=1, stride=2), nn.SiLU(), nn.Conv2d(96, 96, 3, padding=1), nn.SiLU(), nn.Conv2d(96, 256, 3, padding=1, stride=2), nn.SiLU(), nn.Conv2d(256, 4, 3, padding=1)) def forward(self, image_embeds: torch.Tensor, hint: torch.Tensor): time_image_embeds = self.image_proj(image_embeds) time_image_embeds = self.image_norm(time_image_embeds) hint = self.input_hint_block(hint) return (time_image_embeds, hint) class AttentionPooling(nn.Module): def __init__(self, num_heads, embed_dim, dtype=None): super().__init__() self.dtype = dtype self.positional_embedding = nn.Parameter(torch.randn(1, embed_dim) / embed_dim ** 0.5) self.k_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype) self.q_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype) self.v_proj = nn.Linear(embed_dim, embed_dim, dtype=self.dtype) self.num_heads = num_heads self.dim_per_head = embed_dim // self.num_heads def forward(self, x): (bs, length, width) = x.size() def shape(x): x = x.view(bs, -1, self.num_heads, self.dim_per_head) x = x.transpose(1, 2) x = x.reshape(bs * self.num_heads, -1, self.dim_per_head) x = x.transpose(1, 2) return x class_token = x.mean(dim=1, keepdim=True) + self.positional_embedding.to(x.dtype) x = torch.cat([class_token, x], dim=1) q = shape(self.q_proj(class_token)) k = shape(self.k_proj(x)) v = shape(self.v_proj(x)) scale = 1 / math.sqrt(math.sqrt(self.dim_per_head)) weight = torch.einsum('bct,bcs->bts', q * scale, k * scale) weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) a = torch.einsum('bts,bcs->bct', weight, v) a = a.reshape(bs, -1, 1).transpose(1, 2) return a[:, 0, :] def get_fourier_embeds_from_boundingbox(embed_dim, box): (batch_size, num_boxes) = box.shape[:2] emb = 100 ** (torch.arange(embed_dim) / embed_dim) emb = emb[None, None, None].to(device=box.device, dtype=box.dtype) emb = emb * box.unsqueeze(-1) emb = torch.stack((emb.sin(), emb.cos()), dim=-1) emb = emb.permute(0, 1, 3, 4, 2).reshape(batch_size, num_boxes, embed_dim * 2 * 4) return emb class GLIGENTextBoundingboxProjection(nn.Module): def __init__(self, positive_len, out_dim, feature_type='text-only', fourier_freqs=8): super().__init__() self.positive_len = positive_len self.out_dim = out_dim self.fourier_embedder_dim = fourier_freqs self.position_dim = fourier_freqs * 2 * 4 if isinstance(out_dim, tuple): out_dim = out_dim[0] if feature_type == 'text-only': self.linears = nn.Sequential(nn.Linear(self.positive_len + self.position_dim, 512), nn.SiLU(), nn.Linear(512, 512), nn.SiLU(), nn.Linear(512, out_dim)) self.null_positive_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) elif feature_type == 'text-image': self.linears_text = nn.Sequential(nn.Linear(self.positive_len + self.position_dim, 512), nn.SiLU(), nn.Linear(512, 512), nn.SiLU(), nn.Linear(512, out_dim)) self.linears_image = nn.Sequential(nn.Linear(self.positive_len + self.position_dim, 512), nn.SiLU(), nn.Linear(512, 512), nn.SiLU(), nn.Linear(512, out_dim)) self.null_text_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) self.null_image_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) self.null_position_feature = torch.nn.Parameter(torch.zeros([self.position_dim])) def forward(self, boxes, masks, positive_embeddings=None, phrases_masks=None, image_masks=None, phrases_embeddings=None, image_embeddings=None): masks = masks.unsqueeze(-1) xyxy_embedding = get_fourier_embeds_from_boundingbox(self.fourier_embedder_dim, boxes) xyxy_null = self.null_position_feature.view(1, 1, -1) xyxy_embedding = xyxy_embedding * masks + (1 - masks) * xyxy_null if positive_embeddings is not None: positive_null = self.null_positive_feature.view(1, 1, -1) positive_embeddings = positive_embeddings * masks + (1 - masks) * positive_null objs = self.linears(torch.cat([positive_embeddings, xyxy_embedding], dim=-1)) else: phrases_masks = phrases_masks.unsqueeze(-1) image_masks = image_masks.unsqueeze(-1) text_null = self.null_text_feature.view(1, 1, -1) image_null = self.null_image_feature.view(1, 1, -1) phrases_embeddings = phrases_embeddings * phrases_masks + (1 - phrases_masks) * text_null image_embeddings = image_embeddings * image_masks + (1 - image_masks) * image_null objs_text = self.linears_text(torch.cat([phrases_embeddings, xyxy_embedding], dim=-1)) objs_image = self.linears_image(torch.cat([image_embeddings, xyxy_embedding], dim=-1)) objs = torch.cat([objs_text, objs_image], dim=1) return objs class PixArtAlphaCombinedTimestepSizeEmbeddings(nn.Module): def __init__(self, embedding_dim, size_emb_dim, use_additional_conditions: bool=False): super().__init__() self.outdim = size_emb_dim self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.use_additional_conditions = use_additional_conditions if use_additional_conditions: self.additional_condition_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.resolution_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim) self.aspect_ratio_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=size_emb_dim) def forward(self, timestep, resolution, aspect_ratio, batch_size, hidden_dtype): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) if self.use_additional_conditions: resolution_emb = self.additional_condition_proj(resolution.flatten()).to(hidden_dtype) resolution_emb = self.resolution_embedder(resolution_emb).reshape(batch_size, -1) aspect_ratio_emb = self.additional_condition_proj(aspect_ratio.flatten()).to(hidden_dtype) aspect_ratio_emb = self.aspect_ratio_embedder(aspect_ratio_emb).reshape(batch_size, -1) conditioning = timesteps_emb + torch.cat([resolution_emb, aspect_ratio_emb], dim=1) else: conditioning = timesteps_emb return conditioning class PixArtAlphaTextProjection(nn.Module): def __init__(self, in_features, hidden_size, out_features=None, act_fn='gelu_tanh'): super().__init__() if out_features is None: out_features = hidden_size self.linear_1 = nn.Linear(in_features=in_features, out_features=hidden_size, bias=True) if act_fn == 'gelu_tanh': self.act_1 = nn.GELU(approximate='tanh') elif act_fn == 'silu': self.act_1 = nn.SiLU() elif act_fn == 'silu_fp32': self.act_1 = FP32SiLU() else: raise ValueError(f'Unknown activation function: {act_fn}') self.linear_2 = nn.Linear(in_features=hidden_size, out_features=out_features, bias=True) def forward(self, caption): hidden_states = self.linear_1(caption) hidden_states = self.act_1(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states class IPAdapterPlusImageProjectionBlock(nn.Module): def __init__(self, embed_dims: int=768, dim_head: int=64, heads: int=16, ffn_ratio: float=4) -> None: super().__init__() from .attention import FeedForward self.ln0 = nn.LayerNorm(embed_dims) self.ln1 = nn.LayerNorm(embed_dims) self.attn = Attention(query_dim=embed_dims, dim_head=dim_head, heads=heads, out_bias=False) self.ff = nn.Sequential(nn.LayerNorm(embed_dims), FeedForward(embed_dims, embed_dims, activation_fn='gelu', mult=ffn_ratio, bias=False)) def forward(self, x, latents, residual): encoder_hidden_states = self.ln0(x) latents = self.ln1(latents) encoder_hidden_states = torch.cat([encoder_hidden_states, latents], dim=-2) latents = self.attn(latents, encoder_hidden_states) + residual latents = self.ff(latents) + latents return latents class IPAdapterPlusImageProjection(nn.Module): def __init__(self, embed_dims: int=768, output_dims: int=1024, hidden_dims: int=1280, depth: int=4, dim_head: int=64, heads: int=16, num_queries: int=8, ffn_ratio: float=4) -> None: super().__init__() self.latents = nn.Parameter(torch.randn(1, num_queries, hidden_dims) / hidden_dims ** 0.5) self.proj_in = nn.Linear(embed_dims, hidden_dims) self.proj_out = nn.Linear(hidden_dims, output_dims) self.norm_out = nn.LayerNorm(output_dims) self.layers = nn.ModuleList([IPAdapterPlusImageProjectionBlock(hidden_dims, dim_head, heads, ffn_ratio) for _ in range(depth)]) def forward(self, x: torch.Tensor) -> torch.Tensor: latents = self.latents.repeat(x.size(0), 1, 1) x = self.proj_in(x) for block in self.layers: residual = latents latents = block(x, latents, residual) latents = self.proj_out(latents) return self.norm_out(latents) class IPAdapterFaceIDPlusImageProjection(nn.Module): def __init__(self, embed_dims: int=768, output_dims: int=768, hidden_dims: int=1280, id_embeddings_dim: int=512, depth: int=4, dim_head: int=64, heads: int=16, num_tokens: int=4, num_queries: int=8, ffn_ratio: float=4, ffproj_ratio: int=2) -> None: super().__init__() from .attention import FeedForward self.num_tokens = num_tokens self.embed_dim = embed_dims self.clip_embeds = None self.shortcut = False self.shortcut_scale = 1.0 self.proj = FeedForward(id_embeddings_dim, embed_dims * num_tokens, activation_fn='gelu', mult=ffproj_ratio) self.norm = nn.LayerNorm(embed_dims) self.proj_in = nn.Linear(hidden_dims, embed_dims) self.proj_out = nn.Linear(embed_dims, output_dims) self.norm_out = nn.LayerNorm(output_dims) self.layers = nn.ModuleList([IPAdapterPlusImageProjectionBlock(embed_dims, dim_head, heads, ffn_ratio) for _ in range(depth)]) def forward(self, id_embeds: torch.Tensor) -> torch.Tensor: id_embeds = id_embeds.to(self.clip_embeds.dtype) id_embeds = self.proj(id_embeds) id_embeds = id_embeds.reshape(-1, self.num_tokens, self.embed_dim) id_embeds = self.norm(id_embeds) latents = id_embeds clip_embeds = self.proj_in(self.clip_embeds) x = clip_embeds.reshape(-1, clip_embeds.shape[2], clip_embeds.shape[3]) for block in self.layers: residual = latents latents = block(x, latents, residual) latents = self.proj_out(latents) out = self.norm_out(latents) if self.shortcut: out = id_embeds + self.shortcut_scale * out return out class MultiIPAdapterImageProjection(nn.Module): def __init__(self, IPAdapterImageProjectionLayers: Union[List[nn.Module], Tuple[nn.Module]]): super().__init__() self.image_projection_layers = nn.ModuleList(IPAdapterImageProjectionLayers) def forward(self, image_embeds: List[torch.Tensor]): projected_image_embeds = [] if not isinstance(image_embeds, list): deprecation_message = 'You have passed a tensor as `image_embeds`.This is deprecated and will be removed in a future release. Please make sure to update your script to pass `image_embeds` as a list of tensors to suppress this warning.' deprecate('image_embeds not a list', '1.0.0', deprecation_message, standard_warn=False) image_embeds = [image_embeds.unsqueeze(1)] if len(image_embeds) != len(self.image_projection_layers): raise ValueError(f'image_embeds must have the same length as image_projection_layers, got {len(image_embeds)} and {len(self.image_projection_layers)}') for (image_embed, image_projection_layer) in zip(image_embeds, self.image_projection_layers): (batch_size, num_images) = (image_embed.shape[0], image_embed.shape[1]) image_embed = image_embed.reshape((batch_size * num_images,) + image_embed.shape[2:]) image_embed = image_projection_layer(image_embed) image_embed = image_embed.reshape((batch_size, num_images) + image_embed.shape[1:]) projected_image_embeds.append(image_embed) return projected_image_embeds # File: diffusers-main/src/diffusers/models/embeddings_flax.py import math import flax.linen as nn import jax.numpy as jnp def get_sinusoidal_embeddings(timesteps: jnp.ndarray, embedding_dim: int, freq_shift: float=1, min_timescale: float=1, max_timescale: float=10000.0, flip_sin_to_cos: bool=False, scale: float=1.0) -> jnp.ndarray: assert timesteps.ndim == 1, 'Timesteps should be a 1d-array' assert embedding_dim % 2 == 0, f'Embedding dimension {embedding_dim} should be even' num_timescales = float(embedding_dim // 2) log_timescale_increment = math.log(max_timescale / min_timescale) / (num_timescales - freq_shift) inv_timescales = min_timescale * jnp.exp(jnp.arange(num_timescales, dtype=jnp.float32) * -log_timescale_increment) emb = jnp.expand_dims(timesteps, 1) * jnp.expand_dims(inv_timescales, 0) scaled_time = scale * emb if flip_sin_to_cos: signal = jnp.concatenate([jnp.cos(scaled_time), jnp.sin(scaled_time)], axis=1) else: signal = jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=1) signal = jnp.reshape(signal, [jnp.shape(timesteps)[0], embedding_dim]) return signal class FlaxTimestepEmbedding(nn.Module): time_embed_dim: int = 32 dtype: jnp.dtype = jnp.float32 @nn.compact def __call__(self, temb): temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name='linear_1')(temb) temb = nn.silu(temb) temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name='linear_2')(temb) return temb class FlaxTimesteps(nn.Module): dim: int = 32 flip_sin_to_cos: bool = False freq_shift: float = 1 @nn.compact def __call__(self, timesteps): return get_sinusoidal_embeddings(timesteps, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift) # File: diffusers-main/src/diffusers/models/lora.py from typing import Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from ..utils import deprecate, logging from ..utils.import_utils import is_transformers_available if is_transformers_available(): from transformers import CLIPTextModel, CLIPTextModelWithProjection logger = logging.get_logger(__name__) def text_encoder_attn_modules(text_encoder): attn_modules = [] if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)): for (i, layer) in enumerate(text_encoder.text_model.encoder.layers): name = f'text_model.encoder.layers.{i}.self_attn' mod = layer.self_attn attn_modules.append((name, mod)) else: raise ValueError(f'do not know how to get attention modules for: {text_encoder.__class__.__name__}') return attn_modules def text_encoder_mlp_modules(text_encoder): mlp_modules = [] if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)): for (i, layer) in enumerate(text_encoder.text_model.encoder.layers): mlp_mod = layer.mlp name = f'text_model.encoder.layers.{i}.mlp' mlp_modules.append((name, mlp_mod)) else: raise ValueError(f'do not know how to get mlp modules for: {text_encoder.__class__.__name__}') return mlp_modules def adjust_lora_scale_text_encoder(text_encoder, lora_scale: float=1.0): for (_, attn_module) in text_encoder_attn_modules(text_encoder): if isinstance(attn_module.q_proj, PatchedLoraProjection): attn_module.q_proj.lora_scale = lora_scale attn_module.k_proj.lora_scale = lora_scale attn_module.v_proj.lora_scale = lora_scale attn_module.out_proj.lora_scale = lora_scale for (_, mlp_module) in text_encoder_mlp_modules(text_encoder): if isinstance(mlp_module.fc1, PatchedLoraProjection): mlp_module.fc1.lora_scale = lora_scale mlp_module.fc2.lora_scale = lora_scale class PatchedLoraProjection(torch.nn.Module): def __init__(self, regular_linear_layer, lora_scale=1, network_alpha=None, rank=4, dtype=None): deprecation_message = 'Use of `PatchedLoraProjection` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.' deprecate('PatchedLoraProjection', '1.0.0', deprecation_message) super().__init__() from ..models.lora import LoRALinearLayer self.regular_linear_layer = regular_linear_layer device = self.regular_linear_layer.weight.device if dtype is None: dtype = self.regular_linear_layer.weight.dtype self.lora_linear_layer = LoRALinearLayer(self.regular_linear_layer.in_features, self.regular_linear_layer.out_features, network_alpha=network_alpha, device=device, dtype=dtype, rank=rank) self.lora_scale = lora_scale def state_dict(self, *args, destination=None, prefix='', keep_vars=False): if self.lora_linear_layer is None: return self.regular_linear_layer.state_dict(*args, destination=destination, prefix=prefix, keep_vars=keep_vars) return super().state_dict(*args, destination=destination, prefix=prefix, keep_vars=keep_vars) def _fuse_lora(self, lora_scale=1.0, safe_fusing=False): if self.lora_linear_layer is None: return (dtype, device) = (self.regular_linear_layer.weight.data.dtype, self.regular_linear_layer.weight.data.device) w_orig = self.regular_linear_layer.weight.data.float() w_up = self.lora_linear_layer.up.weight.data.float() w_down = self.lora_linear_layer.down.weight.data.float() if self.lora_linear_layer.network_alpha is not None: w_up = w_up * self.lora_linear_layer.network_alpha / self.lora_linear_layer.rank fused_weight = w_orig + lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0] if safe_fusing and torch.isnan(fused_weight).any().item(): raise ValueError(f'This LoRA weight seems to be broken. Encountered NaN values when trying to fuse LoRA weights for {self}.LoRA weights will not be fused.') self.regular_linear_layer.weight.data = fused_weight.to(device=device, dtype=dtype) self.lora_linear_layer = None self.w_up = w_up.cpu() self.w_down = w_down.cpu() self.lora_scale = lora_scale def _unfuse_lora(self): if not (getattr(self, 'w_up', None) is not None and getattr(self, 'w_down', None) is not None): return fused_weight = self.regular_linear_layer.weight.data (dtype, device) = (fused_weight.dtype, fused_weight.device) w_up = self.w_up.to(device=device).float() w_down = self.w_down.to(device).float() unfused_weight = fused_weight.float() - self.lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0] self.regular_linear_layer.weight.data = unfused_weight.to(device=device, dtype=dtype) self.w_up = None self.w_down = None def forward(self, input): if self.lora_scale is None: self.lora_scale = 1.0 if self.lora_linear_layer is None: return self.regular_linear_layer(input) return self.regular_linear_layer(input) + self.lora_scale * self.lora_linear_layer(input) class LoRALinearLayer(nn.Module): def __init__(self, in_features: int, out_features: int, rank: int=4, network_alpha: Optional[float]=None, device: Optional[Union[torch.device, str]]=None, dtype: Optional[torch.dtype]=None): super().__init__() deprecation_message = 'Use of `LoRALinearLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.' deprecate('LoRALinearLayer', '1.0.0', deprecation_message) self.down = nn.Linear(in_features, rank, bias=False, device=device, dtype=dtype) self.up = nn.Linear(rank, out_features, bias=False, device=device, dtype=dtype) self.network_alpha = network_alpha self.rank = rank self.out_features = out_features self.in_features = in_features nn.init.normal_(self.down.weight, std=1 / rank) nn.init.zeros_(self.up.weight) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: orig_dtype = hidden_states.dtype dtype = self.down.weight.dtype down_hidden_states = self.down(hidden_states.to(dtype)) up_hidden_states = self.up(down_hidden_states) if self.network_alpha is not None: up_hidden_states *= self.network_alpha / self.rank return up_hidden_states.to(orig_dtype) class LoRAConv2dLayer(nn.Module): def __init__(self, in_features: int, out_features: int, rank: int=4, kernel_size: Union[int, Tuple[int, int]]=(1, 1), stride: Union[int, Tuple[int, int]]=(1, 1), padding: Union[int, Tuple[int, int], str]=0, network_alpha: Optional[float]=None): super().__init__() deprecation_message = 'Use of `LoRAConv2dLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.' deprecate('LoRAConv2dLayer', '1.0.0', deprecation_message) self.down = nn.Conv2d(in_features, rank, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) self.up = nn.Conv2d(rank, out_features, kernel_size=(1, 1), stride=(1, 1), bias=False) self.network_alpha = network_alpha self.rank = rank nn.init.normal_(self.down.weight, std=1 / rank) nn.init.zeros_(self.up.weight) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: orig_dtype = hidden_states.dtype dtype = self.down.weight.dtype down_hidden_states = self.down(hidden_states.to(dtype)) up_hidden_states = self.up(down_hidden_states) if self.network_alpha is not None: up_hidden_states *= self.network_alpha / self.rank return up_hidden_states.to(orig_dtype) class LoRACompatibleConv(nn.Conv2d): def __init__(self, *args, lora_layer: Optional[LoRAConv2dLayer]=None, **kwargs): deprecation_message = 'Use of `LoRACompatibleConv` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.' deprecate('LoRACompatibleConv', '1.0.0', deprecation_message) super().__init__(*args, **kwargs) self.lora_layer = lora_layer def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]): deprecation_message = 'Use of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.' deprecate('set_lora_layer', '1.0.0', deprecation_message) self.lora_layer = lora_layer def _fuse_lora(self, lora_scale: float=1.0, safe_fusing: bool=False): if self.lora_layer is None: return (dtype, device) = (self.weight.data.dtype, self.weight.data.device) w_orig = self.weight.data.float() w_up = self.lora_layer.up.weight.data.float() w_down = self.lora_layer.down.weight.data.float() if self.lora_layer.network_alpha is not None: w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank fusion = torch.mm(w_up.flatten(start_dim=1), w_down.flatten(start_dim=1)) fusion = fusion.reshape(w_orig.shape) fused_weight = w_orig + lora_scale * fusion if safe_fusing and torch.isnan(fused_weight).any().item(): raise ValueError(f'This LoRA weight seems to be broken. Encountered NaN values when trying to fuse LoRA weights for {self}.LoRA weights will not be fused.') self.weight.data = fused_weight.to(device=device, dtype=dtype) self.lora_layer = None self.w_up = w_up.cpu() self.w_down = w_down.cpu() self._lora_scale = lora_scale def _unfuse_lora(self): if not (getattr(self, 'w_up', None) is not None and getattr(self, 'w_down', None) is not None): return fused_weight = self.weight.data (dtype, device) = (fused_weight.data.dtype, fused_weight.data.device) self.w_up = self.w_up.to(device=device).float() self.w_down = self.w_down.to(device).float() fusion = torch.mm(self.w_up.flatten(start_dim=1), self.w_down.flatten(start_dim=1)) fusion = fusion.reshape(fused_weight.shape) unfused_weight = fused_weight.float() - self._lora_scale * fusion self.weight.data = unfused_weight.to(device=device, dtype=dtype) self.w_up = None self.w_down = None def forward(self, hidden_states: torch.Tensor, scale: float=1.0) -> torch.Tensor: if self.padding_mode != 'zeros': hidden_states = F.pad(hidden_states, self._reversed_padding_repeated_twice, mode=self.padding_mode) padding = (0, 0) else: padding = self.padding original_outputs = F.conv2d(hidden_states, self.weight, self.bias, self.stride, padding, self.dilation, self.groups) if self.lora_layer is None: return original_outputs else: return original_outputs + scale * self.lora_layer(hidden_states) class LoRACompatibleLinear(nn.Linear): def __init__(self, *args, lora_layer: Optional[LoRALinearLayer]=None, **kwargs): deprecation_message = 'Use of `LoRACompatibleLinear` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.' deprecate('LoRACompatibleLinear', '1.0.0', deprecation_message) super().__init__(*args, **kwargs) self.lora_layer = lora_layer def set_lora_layer(self, lora_layer: Optional[LoRALinearLayer]): deprecation_message = 'Use of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.' deprecate('set_lora_layer', '1.0.0', deprecation_message) self.lora_layer = lora_layer def _fuse_lora(self, lora_scale: float=1.0, safe_fusing: bool=False): if self.lora_layer is None: return (dtype, device) = (self.weight.data.dtype, self.weight.data.device) w_orig = self.weight.data.float() w_up = self.lora_layer.up.weight.data.float() w_down = self.lora_layer.down.weight.data.float() if self.lora_layer.network_alpha is not None: w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank fused_weight = w_orig + lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0] if safe_fusing and torch.isnan(fused_weight).any().item(): raise ValueError(f'This LoRA weight seems to be broken. Encountered NaN values when trying to fuse LoRA weights for {self}.LoRA weights will not be fused.') self.weight.data = fused_weight.to(device=device, dtype=dtype) self.lora_layer = None self.w_up = w_up.cpu() self.w_down = w_down.cpu() self._lora_scale = lora_scale def _unfuse_lora(self): if not (getattr(self, 'w_up', None) is not None and getattr(self, 'w_down', None) is not None): return fused_weight = self.weight.data (dtype, device) = (fused_weight.dtype, fused_weight.device) w_up = self.w_up.to(device=device).float() w_down = self.w_down.to(device).float() unfused_weight = fused_weight.float() - self._lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0] self.weight.data = unfused_weight.to(device=device, dtype=dtype) self.w_up = None self.w_down = None def forward(self, hidden_states: torch.Tensor, scale: float=1.0) -> torch.Tensor: if self.lora_layer is None: out = super().forward(hidden_states) return out else: out = super().forward(hidden_states) + scale * self.lora_layer(hidden_states) return out # File: diffusers-main/src/diffusers/models/model_loading_utils.py import importlib import inspect import os from collections import OrderedDict from pathlib import Path from typing import List, Optional, Union import safetensors import torch from huggingface_hub.utils import EntryNotFoundError from ..utils import SAFE_WEIGHTS_INDEX_NAME, SAFETENSORS_FILE_EXTENSION, WEIGHTS_INDEX_NAME, _add_variant, _get_model_file, is_accelerate_available, is_torch_version, logging logger = logging.get_logger(__name__) _CLASS_REMAPPING_DICT = {'Transformer2DModel': {'ada_norm_zero': 'DiTTransformer2DModel', 'ada_norm_single': 'PixArtTransformer2DModel'}} if is_accelerate_available(): from accelerate import infer_auto_device_map from accelerate.utils import get_balanced_memory, get_max_memory, set_module_tensor_to_device def _determine_device_map(model: torch.nn.Module, device_map, max_memory, torch_dtype): if isinstance(device_map, str): no_split_modules = model._get_no_split_modules(device_map) device_map_kwargs = {'no_split_module_classes': no_split_modules} if device_map != 'sequential': max_memory = get_balanced_memory(model, dtype=torch_dtype, low_zero=device_map == 'balanced_low_0', max_memory=max_memory, **device_map_kwargs) else: max_memory = get_max_memory(max_memory) device_map_kwargs['max_memory'] = max_memory device_map = infer_auto_device_map(model, dtype=torch_dtype, **device_map_kwargs) return device_map def _fetch_remapped_cls_from_config(config, old_class): previous_class_name = old_class.__name__ remapped_class_name = _CLASS_REMAPPING_DICT.get(previous_class_name).get(config['norm_type'], None) if remapped_class_name: diffusers_library = importlib.import_module(__name__.split('.')[0]) remapped_class = getattr(diffusers_library, remapped_class_name) logger.info(f"Changing class object to be of `{remapped_class_name}` type from `{previous_class_name}` type.This is because `{previous_class_name}` is scheduled to be deprecated in a future version. Note that this DOESN'T affect the final results.") return remapped_class else: return old_class def load_state_dict(checkpoint_file: Union[str, os.PathLike], variant: Optional[str]=None): try: file_extension = os.path.basename(checkpoint_file).split('.')[-1] if file_extension == SAFETENSORS_FILE_EXTENSION: return safetensors.torch.load_file(checkpoint_file, device='cpu') else: weights_only_kwarg = {'weights_only': True} if is_torch_version('>=', '1.13') else {} return torch.load(checkpoint_file, map_location='cpu', **weights_only_kwarg) except Exception as e: try: with open(checkpoint_file) as f: if f.read().startswith('version'): raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.') else: raise ValueError(f'Unable to locate the file {checkpoint_file} which is necessary to load this pretrained model. Make sure you have saved the model properly.') from e except (UnicodeDecodeError, ValueError): raise OSError(f"Unable to load weights from checkpoint file for '{checkpoint_file}' at '{checkpoint_file}'. ") def load_model_dict_into_meta(model, state_dict: OrderedDict, device: Optional[Union[str, torch.device]]=None, dtype: Optional[Union[str, torch.dtype]]=None, model_name_or_path: Optional[str]=None) -> List[str]: device = device or torch.device('cpu') dtype = dtype or torch.float32 accepts_dtype = 'dtype' in set(inspect.signature(set_module_tensor_to_device).parameters.keys()) unexpected_keys = [] empty_state_dict = model.state_dict() for (param_name, param) in state_dict.items(): if param_name not in empty_state_dict: unexpected_keys.append(param_name) continue if empty_state_dict[param_name].shape != param.shape: model_name_or_path_str = f'{model_name_or_path} ' if model_name_or_path is not None else '' raise ValueError(f'Cannot load {model_name_or_path_str}because {param_name} expected shape {empty_state_dict[param_name]}, but got {param.shape}. If you want to instead overwrite randomly initialized weights, please make sure to pass both `low_cpu_mem_usage=False` and `ignore_mismatched_sizes=True`. For more information, see also: https://github.com/huggingface/diffusers/issues/1619#issuecomment-1345604389 as an example.') if accepts_dtype: set_module_tensor_to_device(model, param_name, device, value=param, dtype=dtype) else: set_module_tensor_to_device(model, param_name, device, value=param) return unexpected_keys def _load_state_dict_into_model(model_to_load, state_dict: OrderedDict) -> List[str]: state_dict = state_dict.copy() error_msgs = [] def load(module: torch.nn.Module, prefix: str=''): args = (state_dict, prefix, {}, True, [], [], error_msgs) module._load_from_state_dict(*args) for (name, child) in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(model_to_load) return error_msgs def _fetch_index_file(is_local, pretrained_model_name_or_path, subfolder, use_safetensors, cache_dir, variant, force_download, proxies, local_files_only, token, revision, user_agent, commit_hash): if is_local: index_file = Path(pretrained_model_name_or_path, subfolder or '', _add_variant(SAFE_WEIGHTS_INDEX_NAME if use_safetensors else WEIGHTS_INDEX_NAME, variant)) else: index_file_in_repo = Path(subfolder or '', _add_variant(SAFE_WEIGHTS_INDEX_NAME if use_safetensors else WEIGHTS_INDEX_NAME, variant)).as_posix() try: index_file = _get_model_file(pretrained_model_name_or_path, weights_name=index_file_in_repo, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=None, user_agent=user_agent, commit_hash=commit_hash) index_file = Path(index_file) except (EntryNotFoundError, EnvironmentError): index_file = None return index_file # File: diffusers-main/src/diffusers/models/modeling_flax_pytorch_utils.py """""" import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging logger = logging.get_logger(__name__) def rename_key(key): regex = '\\w+[.]\\d+' pats = re.findall(regex, key) for pat in pats: key = key.replace(pat, '_'.join(pat.split('.'))) return key def rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict): renamed_pt_tuple_key = pt_tuple_key[:-1] + ('scale',) if len(pt_tuple_key) > 1: for (rename_from, rename_to) in (('to_out_0', 'proj_attn'), ('to_k', 'key'), ('to_v', 'value'), ('to_q', 'query')): if pt_tuple_key[-2] == rename_from: weight_name = pt_tuple_key[-1] weight_name = 'kernel' if weight_name == 'weight' else weight_name renamed_pt_tuple_key = pt_tuple_key[:-2] + (rename_to, weight_name) if renamed_pt_tuple_key in random_flax_state_dict: assert random_flax_state_dict[renamed_pt_tuple_key].shape == pt_tensor.T.shape return (renamed_pt_tuple_key, pt_tensor.T) if any(('norm' in str_ for str_ in pt_tuple_key)) and pt_tuple_key[-1] == 'bias' and (pt_tuple_key[:-1] + ('bias',) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ('scale',) in random_flax_state_dict): renamed_pt_tuple_key = pt_tuple_key[:-1] + ('scale',) return (renamed_pt_tuple_key, pt_tensor) elif pt_tuple_key[-1] in ['weight', 'gamma'] and pt_tuple_key[:-1] + ('scale',) in random_flax_state_dict: renamed_pt_tuple_key = pt_tuple_key[:-1] + ('scale',) return (renamed_pt_tuple_key, pt_tensor) if pt_tuple_key[-1] == 'weight' and pt_tuple_key[:-1] + ('embedding',) in random_flax_state_dict: pt_tuple_key = pt_tuple_key[:-1] + ('embedding',) return (renamed_pt_tuple_key, pt_tensor) renamed_pt_tuple_key = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == 'weight' and pt_tensor.ndim == 4: pt_tensor = pt_tensor.transpose(2, 3, 1, 0) return (renamed_pt_tuple_key, pt_tensor) renamed_pt_tuple_key = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == 'weight': pt_tensor = pt_tensor.T return (renamed_pt_tuple_key, pt_tensor) renamed_pt_tuple_key = pt_tuple_key[:-1] + ('weight',) if pt_tuple_key[-1] == 'gamma': return (renamed_pt_tuple_key, pt_tensor) renamed_pt_tuple_key = pt_tuple_key[:-1] + ('bias',) if pt_tuple_key[-1] == 'beta': return (renamed_pt_tuple_key, pt_tensor) return (pt_tuple_key, pt_tensor) def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model, init_key=42): pt_state_dict = {k: v.numpy() for (k, v) in pt_state_dict.items()} random_flax_params = flax_model.init_weights(PRNGKey(init_key)) random_flax_state_dict = flatten_dict(random_flax_params) flax_state_dict = {} for (pt_key, pt_tensor) in pt_state_dict.items(): renamed_pt_key = rename_key(pt_key) pt_tuple_key = tuple(renamed_pt_key.split('.')) (flax_key, flax_tensor) = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError(f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape {random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.') flax_state_dict[flax_key] = jnp.asarray(flax_tensor) return unflatten_dict(flax_state_dict) # File: diffusers-main/src/diffusers/models/modeling_flax_utils.py import os from pickle import UnpicklingError from typing import Any, Dict, Union import jax import jax.numpy as jnp import msgpack.exceptions from flax.core.frozen_dict import FrozenDict, unfreeze from flax.serialization import from_bytes, to_bytes from flax.traverse_util import flatten_dict, unflatten_dict from huggingface_hub import create_repo, hf_hub_download from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, validate_hf_hub_args from requests import HTTPError from .. import __version__, is_torch_available from ..utils import CONFIG_NAME, FLAX_WEIGHTS_NAME, HUGGINGFACE_CO_RESOLVE_ENDPOINT, WEIGHTS_NAME, PushToHubMixin, logging from .modeling_flax_pytorch_utils import convert_pytorch_state_dict_to_flax logger = logging.get_logger(__name__) class FlaxModelMixin(PushToHubMixin): config_name = CONFIG_NAME _automatically_saved_args = ['_diffusers_version', '_class_name', '_name_or_path'] _flax_internal_args = ['name', 'parent', 'dtype'] @classmethod def _from_config(cls, config, **kwargs): return cls(config, **kwargs) def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any=None) -> Any: def conditional_cast(param): if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating): param = param.astype(dtype) return param if mask is None: return jax.tree_map(conditional_cast, params) flat_params = flatten_dict(params) (flat_mask, _) = jax.tree_flatten(mask) for (masked, key) in zip(flat_mask, flat_params.keys()): if masked: param = flat_params[key] flat_params[key] = conditional_cast(param) return unflatten_dict(flat_params) def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any=None): return self._cast_floating_to(params, jnp.bfloat16, mask) def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any=None): return self._cast_floating_to(params, jnp.float32, mask) def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any=None): return self._cast_floating_to(params, jnp.float16, mask) def init_weights(self, rng: jax.Array) -> Dict: raise NotImplementedError(f'init_weights method has to be implemented for {self}') @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], dtype: jnp.dtype=jnp.float32, *model_args, **kwargs): config = kwargs.pop('config', None) cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) from_pt = kwargs.pop('from_pt', False) proxies = kwargs.pop('proxies', None) local_files_only = kwargs.pop('local_files_only', False) token = kwargs.pop('token', None) revision = kwargs.pop('revision', None) subfolder = kwargs.pop('subfolder', None) user_agent = {'diffusers': __version__, 'file_type': 'model', 'framework': 'flax'} if config is None: (config, unused_kwargs) = cls.load_config(pretrained_model_name_or_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, **kwargs) (model, model_kwargs) = cls.from_config(config, dtype=dtype, return_unused_kwargs=True, **unused_kwargs) pretrained_path_with_subfolder = pretrained_model_name_or_path if subfolder is None else os.path.join(pretrained_model_name_or_path, subfolder) if os.path.isdir(pretrained_path_with_subfolder): if from_pt: if not os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)): raise EnvironmentError(f'Error no file named {WEIGHTS_NAME} found in directory {pretrained_path_with_subfolder} ') model_file = os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)): model_file = os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)): raise EnvironmentError(f'{WEIGHTS_NAME} file found in directory {pretrained_path_with_subfolder}. Please load the model using `from_pt=True`.') else: raise EnvironmentError(f'Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory {pretrained_path_with_subfolder}.') else: try: model_file = hf_hub_download(pretrained_model_name_or_path, filename=FLAX_WEIGHTS_NAME if not from_pt else WEIGHTS_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, user_agent=user_agent, subfolder=subfolder, revision=revision) except RepositoryNotFoundError: raise EnvironmentError(f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo with `token` or log in with `huggingface-cli login`.") except RevisionNotFoundError: raise EnvironmentError(f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this model name. Check the model page at 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.") except EntryNotFoundError: raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME}.') except HTTPError as err: raise EnvironmentError(f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}') except ValueError: raise EnvironmentError(f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\nCheckout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.") except EnvironmentError: raise EnvironmentError(f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.") if from_pt: if is_torch_available(): from .modeling_utils import load_state_dict else: raise EnvironmentError("Can't load the model in PyTorch format because PyTorch is not installed. Please, install PyTorch or use native Flax weights.") pytorch_model_file = load_state_dict(model_file) state = convert_pytorch_state_dict_to_flax(pytorch_model_file, model) else: try: with open(model_file, 'rb') as state_f: state = from_bytes(cls, state_f.read()) except (UnpicklingError, msgpack.exceptions.ExtraData) as e: try: with open(model_file) as f: if f.read().startswith('version'): raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.') else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ') state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend='cpu')[0]), state) state = flatten_dict(state) params_shape_tree = jax.eval_shape(model.init_weights, rng=jax.random.PRNGKey(0)) required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys()) shape_state = flatten_dict(unfreeze(params_shape_tree)) missing_keys = required_params - set(state.keys()) unexpected_keys = set(state.keys()) - required_params if missing_keys: logger.warning(f'The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. Make sure to call model.init_weights to initialize the missing weights.') cls._missing_keys = missing_keys for key in state.keys(): if key in shape_state and state[key].shape != shape_state[key].shape: raise ValueError(f'Trying to load the pretrained weight for {key} failed: checkpoint has shape {state[key].shape} which is incompatible with the model shape {shape_state[key].shape}. ') for unexpected_key in unexpected_keys: del state[unexpected_key] if len(unexpected_keys) > 0: logger.warning(f'Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or with another architecture.') else: logger.info(f'All model checkpoint weights were used when initializing {model.__class__.__name__}.\n') if len(missing_keys) > 0: logger.warning(f'Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') else: logger.info(f'All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use {model.__class__.__name__} for predictions without further training.') return (model, unflatten_dict(state)) def save_pretrained(self, save_directory: Union[str, os.PathLike], params: Union[Dict, FrozenDict], is_main_process: bool=True, push_to_hub: bool=False, **kwargs): if os.path.isfile(save_directory): logger.error(f'Provided path ({save_directory}) should be a directory, not a file') return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop('commit_message', None) private = kwargs.pop('private', False) create_pr = kwargs.pop('create_pr', False) token = kwargs.pop('token', None) repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id model_to_save = self if is_main_process: model_to_save.save_config(save_directory) output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME) with open(output_model_file, 'wb') as f: model_bytes = to_bytes(params) f.write(model_bytes) logger.info(f'Model weights saved in {output_model_file}') if push_to_hub: self._upload_folder(save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr) # File: diffusers-main/src/diffusers/models/modeling_outputs.py from dataclasses import dataclass from ..utils import BaseOutput @dataclass class AutoencoderKLOutput(BaseOutput): latent_dist: 'DiagonalGaussianDistribution' @dataclass class Transformer2DModelOutput(BaseOutput): sample: 'torch.Tensor' # File: diffusers-main/src/diffusers/models/modeling_pytorch_flax_utils.py """""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging logger = logging.get_logger(__name__) def load_flax_checkpoint_in_pytorch_model(pt_model, model_file): try: with open(model_file, 'rb') as flax_state_f: flax_state = from_bytes(None, flax_state_f.read()) except UnpicklingError as e: try: with open(model_file) as f: if f.read().startswith('version'): raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.') else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ') return load_flax_weights_in_pytorch_model(pt_model, flax_state) def load_flax_weights_in_pytorch_model(pt_model, flax_state): try: import torch except ImportError: logger.error('Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions.') raise is_type_bf16 = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype == jnp.bfloat16, flax_state)).values() if any(is_type_bf16): logger.warning('Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` before loading those in PyTorch model.') flax_state = jax.tree_util.tree_map(lambda params: params.astype(np.float32) if params.dtype == jnp.bfloat16 else params, flax_state) pt_model.base_model_prefix = '' flax_state_dict = flatten_dict(flax_state, sep='.') pt_model_dict = pt_model.state_dict() unexpected_keys = [] missing_keys = set(pt_model_dict.keys()) for (flax_key_tuple, flax_tensor) in flax_state_dict.items(): flax_key_tuple_array = flax_key_tuple.split('.') if flax_key_tuple_array[-1] == 'kernel' and flax_tensor.ndim == 4: flax_key_tuple_array = flax_key_tuple_array[:-1] + ['weight'] flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1)) elif flax_key_tuple_array[-1] == 'kernel': flax_key_tuple_array = flax_key_tuple_array[:-1] + ['weight'] flax_tensor = flax_tensor.T elif flax_key_tuple_array[-1] == 'scale': flax_key_tuple_array = flax_key_tuple_array[:-1] + ['weight'] if 'time_embedding' not in flax_key_tuple_array: for (i, flax_key_tuple_string) in enumerate(flax_key_tuple_array): flax_key_tuple_array[i] = flax_key_tuple_string.replace('_0', '.0').replace('_1', '.1').replace('_2', '.2').replace('_3', '.3').replace('_4', '.4').replace('_5', '.5').replace('_6', '.6').replace('_7', '.7').replace('_8', '.8').replace('_9', '.9') flax_key = '.'.join(flax_key_tuple_array) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError(f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.') else: flax_tensor = np.asarray(flax_tensor) if not isinstance(flax_tensor, np.ndarray) else flax_tensor pt_model_dict[flax_key] = torch.from_numpy(flax_tensor) missing_keys.remove(flax_key) else: unexpected_keys.append(flax_key) pt_model.load_state_dict(pt_model_dict) missing_keys = list(missing_keys) if len(unexpected_keys) > 0: logger.warning(f'Some weights of the Flax model were not used when initializing the PyTorch model {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect to be exactly identical (e.g. initializing a BertForSequenceClassification model from a FlaxBertForSequenceClassification model).') if len(missing_keys) > 0: logger.warning(f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') return pt_model # File: diffusers-main/src/diffusers/models/modeling_utils.py import inspect import itertools import json import os import re from collections import OrderedDict from functools import partial from pathlib import Path from typing import Any, Callable, List, Optional, Tuple, Union import safetensors import torch from huggingface_hub import create_repo, split_torch_state_dict_into_shards from huggingface_hub.utils import validate_hf_hub_args from torch import Tensor, nn from .. import __version__ from ..utils import CONFIG_NAME, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, _add_variant, _get_checkpoint_shard_files, _get_model_file, deprecate, is_accelerate_available, is_torch_version, logging from ..utils.hub_utils import PushToHubMixin, load_or_create_model_card, populate_model_card from .model_loading_utils import _determine_device_map, _fetch_index_file, _load_state_dict_into_model, load_model_dict_into_meta, load_state_dict logger = logging.get_logger(__name__) _REGEX_SHARD = re.compile('(.*?)-\\d{5}-of-\\d{5}') if is_torch_version('>=', '1.9.0'): _LOW_CPU_MEM_USAGE_DEFAULT = True else: _LOW_CPU_MEM_USAGE_DEFAULT = False if is_accelerate_available(): import accelerate def get_parameter_device(parameter: torch.nn.Module) -> torch.device: try: parameters_and_buffers = itertools.chain(parameter.parameters(), parameter.buffers()) return next(parameters_and_buffers).device except StopIteration: def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for (k, v) in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].device def get_parameter_dtype(parameter: torch.nn.Module) -> torch.dtype: try: params = tuple(parameter.parameters()) if len(params) > 0: return params[0].dtype buffers = tuple(parameter.buffers()) if len(buffers) > 0: return buffers[0].dtype except StopIteration: def find_tensor_attributes(module: torch.nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for (k, v) in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].dtype class ModelMixin(torch.nn.Module, PushToHubMixin): config_name = CONFIG_NAME _automatically_saved_args = ['_diffusers_version', '_class_name', '_name_or_path'] _supports_gradient_checkpointing = False _keys_to_ignore_on_load_unexpected = None _no_split_modules = None def __init__(self): super().__init__() def __getattr__(self, name: str) -> Any: is_in_config = '_internal_dict' in self.__dict__ and hasattr(self.__dict__['_internal_dict'], name) is_attribute = name in self.__dict__ if is_in_config and (not is_attribute): deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'unet.config.{name}'." deprecate('direct config name access', '1.0.0', deprecation_message, standard_warn=False, stacklevel=3) return self._internal_dict[name] return super().__getattr__(name) @property def is_gradient_checkpointing(self) -> bool: return any((hasattr(m, 'gradient_checkpointing') and m.gradient_checkpointing for m in self.modules())) def enable_gradient_checkpointing(self) -> None: if not self._supports_gradient_checkpointing: raise ValueError(f'{self.__class__.__name__} does not support gradient checkpointing.') self.apply(partial(self._set_gradient_checkpointing, value=True)) def disable_gradient_checkpointing(self) -> None: if self._supports_gradient_checkpointing: self.apply(partial(self._set_gradient_checkpointing, value=False)) def set_use_npu_flash_attention(self, valid: bool) -> None: def fn_recursive_set_npu_flash_attention(module: torch.nn.Module): if hasattr(module, 'set_use_npu_flash_attention'): module.set_use_npu_flash_attention(valid) for child in module.children(): fn_recursive_set_npu_flash_attention(child) for module in self.children(): if isinstance(module, torch.nn.Module): fn_recursive_set_npu_flash_attention(module) def enable_npu_flash_attention(self) -> None: self.set_use_npu_flash_attention(True) def disable_npu_flash_attention(self) -> None: self.set_use_npu_flash_attention(False) def set_use_memory_efficient_attention_xformers(self, valid: bool, attention_op: Optional[Callable]=None) -> None: def fn_recursive_set_mem_eff(module: torch.nn.Module): if hasattr(module, 'set_use_memory_efficient_attention_xformers'): module.set_use_memory_efficient_attention_xformers(valid, attention_op) for child in module.children(): fn_recursive_set_mem_eff(child) for module in self.children(): if isinstance(module, torch.nn.Module): fn_recursive_set_mem_eff(module) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None) -> None: self.set_use_memory_efficient_attention_xformers(True, attention_op) def disable_xformers_memory_efficient_attention(self) -> None: self.set_use_memory_efficient_attention_xformers(False) def save_pretrained(self, save_directory: Union[str, os.PathLike], is_main_process: bool=True, save_function: Optional[Callable]=None, safe_serialization: bool=True, variant: Optional[str]=None, max_shard_size: Union[int, str]='10GB', push_to_hub: bool=False, **kwargs): if os.path.isfile(save_directory): logger.error(f'Provided path ({save_directory}) should be a directory, not a file') return weights_name = SAFETENSORS_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME weights_name = _add_variant(weights_name, variant) weight_name_split = weights_name.split('.') if len(weight_name_split) in [2, 3]: weights_name_pattern = weight_name_split[0] + '{suffix}.' + '.'.join(weight_name_split[1:]) else: raise ValueError(f'Invalid {weights_name} provided.') os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop('commit_message', None) private = kwargs.pop('private', False) create_pr = kwargs.pop('create_pr', False) token = kwargs.pop('token', None) repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id model_to_save = self if is_main_process: model_to_save.save_config(save_directory) state_dict = model_to_save.state_dict() state_dict_split = split_torch_state_dict_into_shards(state_dict, max_shard_size=max_shard_size, filename_pattern=weights_name_pattern) if is_main_process: for filename in os.listdir(save_directory): if filename in state_dict_split.filename_to_tensors.keys(): continue full_filename = os.path.join(save_directory, filename) if not os.path.isfile(full_filename): continue weights_without_ext = weights_name_pattern.replace('.bin', '').replace('.safetensors', '') weights_without_ext = weights_without_ext.replace('{suffix}', '') filename_without_ext = filename.replace('.bin', '').replace('.safetensors', '') if filename.startswith(weights_without_ext) and _REGEX_SHARD.fullmatch(filename_without_ext) is not None: os.remove(full_filename) for (filename, tensors) in state_dict_split.filename_to_tensors.items(): shard = {tensor: state_dict[tensor] for tensor in tensors} filepath = os.path.join(save_directory, filename) if safe_serialization: safetensors.torch.save_file(shard, filepath, metadata={'format': 'pt'}) else: torch.save(shard, filepath) if state_dict_split.is_sharded: index = {'metadata': state_dict_split.metadata, 'weight_map': state_dict_split.tensor_to_filename} save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME save_index_file = os.path.join(save_directory, _add_variant(save_index_file, variant)) with open(save_index_file, 'w', encoding='utf-8') as f: content = json.dumps(index, indent=2, sort_keys=True) + '\n' f.write(content) logger.info(f'The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be split in {len(state_dict_split.filename_to_tensors)} checkpoint shards. You can find where each parameters has been saved in the index located at {save_index_file}.') else: path_to_weights = os.path.join(save_directory, weights_name) logger.info(f'Model weights saved in {path_to_weights}') if push_to_hub: model_card = load_or_create_model_card(repo_id, token=token) model_card = populate_model_card(model_card) model_card.save(Path(save_directory, 'README.md').as_posix()) self._upload_folder(save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr) @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): cache_dir = kwargs.pop('cache_dir', None) ignore_mismatched_sizes = kwargs.pop('ignore_mismatched_sizes', False) force_download = kwargs.pop('force_download', False) from_flax = kwargs.pop('from_flax', False) proxies = kwargs.pop('proxies', None) output_loading_info = kwargs.pop('output_loading_info', False) local_files_only = kwargs.pop('local_files_only', None) token = kwargs.pop('token', None) revision = kwargs.pop('revision', None) torch_dtype = kwargs.pop('torch_dtype', None) subfolder = kwargs.pop('subfolder', None) device_map = kwargs.pop('device_map', None) max_memory = kwargs.pop('max_memory', None) offload_folder = kwargs.pop('offload_folder', None) offload_state_dict = kwargs.pop('offload_state_dict', False) low_cpu_mem_usage = kwargs.pop('low_cpu_mem_usage', _LOW_CPU_MEM_USAGE_DEFAULT) variant = kwargs.pop('variant', None) use_safetensors = kwargs.pop('use_safetensors', None) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True if low_cpu_mem_usage and (not is_accelerate_available()): low_cpu_mem_usage = False logger.warning('Cannot initialize model with low cpu memory usage because `accelerate` was not found in the environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip install accelerate\n```\n.') if device_map is not None and (not is_accelerate_available()): raise NotImplementedError('Loading and dispatching requires `accelerate`. Please make sure to install accelerate or set `device_map=None`. You can install accelerate with `pip install accelerate`.') if device_map is not None and (not is_torch_version('>=', '1.9.0')): raise NotImplementedError('Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set `device_map=None`.') if low_cpu_mem_usage is True and (not is_torch_version('>=', '1.9.0')): raise NotImplementedError('Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set `low_cpu_mem_usage=False`.') if low_cpu_mem_usage is False and device_map is not None: raise ValueError(f'You cannot set `low_cpu_mem_usage` to `False` while using device_map={device_map} for loading and dispatching. Please make sure to set `low_cpu_mem_usage=True`.') if isinstance(device_map, torch.device): device_map = {'': device_map} elif isinstance(device_map, str) and device_map not in ['auto', 'balanced', 'balanced_low_0', 'sequential']: try: device_map = {'': torch.device(device_map)} except RuntimeError: raise ValueError(f"When passing device_map as a string, the value needs to be a device name (e.g. cpu, cuda:0) or 'auto', 'balanced', 'balanced_low_0', 'sequential' but found {device_map}.") elif isinstance(device_map, int): if device_map < 0: raise ValueError("You can't pass device_map as a negative int. If you want to put the model on the cpu, pass device_map = 'cpu' ") else: device_map = {'': device_map} if device_map is not None: if low_cpu_mem_usage is None: low_cpu_mem_usage = True elif not low_cpu_mem_usage: raise ValueError('Passing along a `device_map` requires `low_cpu_mem_usage=True`') if low_cpu_mem_usage: if device_map is not None and (not is_torch_version('>=', '1.10')): raise ValueError('`low_cpu_mem_usage` and `device_map` require PyTorch >= 1.10.') config_path = pretrained_model_name_or_path user_agent = {'diffusers': __version__, 'file_type': 'model', 'framework': 'pytorch'} (config, unused_kwargs, commit_hash) = cls.load_config(config_path, cache_dir=cache_dir, return_unused_kwargs=True, return_commit_hash=True, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, **kwargs) is_sharded = False index_file = None is_local = os.path.isdir(pretrained_model_name_or_path) index_file = _fetch_index_file(is_local=is_local, pretrained_model_name_or_path=pretrained_model_name_or_path, subfolder=subfolder or '', use_safetensors=use_safetensors, cache_dir=cache_dir, variant=variant, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, user_agent=user_agent, commit_hash=commit_hash) if index_file is not None and index_file.is_file(): is_sharded = True if is_sharded and from_flax: raise ValueError('Loading of sharded checkpoints is not supported when `from_flax=True`.') model_file = None if from_flax: model_file = _get_model_file(pretrained_model_name_or_path, weights_name=FLAX_WEIGHTS_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, commit_hash=commit_hash) model = cls.from_config(config, **unused_kwargs) from .modeling_pytorch_flax_utils import load_flax_checkpoint_in_pytorch_model model = load_flax_checkpoint_in_pytorch_model(model, model_file) else: if is_sharded: (sharded_ckpt_cached_folder, sharded_metadata) = _get_checkpoint_shard_files(pretrained_model_name_or_path, index_file, cache_dir=cache_dir, proxies=proxies, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder or '') elif use_safetensors and (not is_sharded): try: model_file = _get_model_file(pretrained_model_name_or_path, weights_name=_add_variant(SAFETENSORS_WEIGHTS_NAME, variant), cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, commit_hash=commit_hash) except IOError as e: logger.error(f'An error occurred while trying to fetch {pretrained_model_name_or_path}: {e}') if not allow_pickle: raise logger.warning('Defaulting to unsafe serialization. Pass `allow_pickle=False` to raise an error instead.') if model_file is None and (not is_sharded): model_file = _get_model_file(pretrained_model_name_or_path, weights_name=_add_variant(WEIGHTS_NAME, variant), cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, commit_hash=commit_hash) if low_cpu_mem_usage: with accelerate.init_empty_weights(): model = cls.from_config(config, **unused_kwargs) if device_map is None and (not is_sharded): param_device = 'cpu' state_dict = load_state_dict(model_file, variant=variant) model._convert_deprecated_attention_blocks(state_dict) missing_keys = set(model.state_dict().keys()) - set(state_dict.keys()) if len(missing_keys) > 0: raise ValueError(f"Cannot load {cls} from {pretrained_model_name_or_path} because the following keys are missing: \n {', '.join(missing_keys)}. \n Please make sure to pass `low_cpu_mem_usage=False` and `device_map=None` if you want to randomly initialize those weights or else make sure your checkpoint file is correct.") unexpected_keys = load_model_dict_into_meta(model, state_dict, device=param_device, dtype=torch_dtype, model_name_or_path=pretrained_model_name_or_path) if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning(f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}") else: force_hook = True device_map = _determine_device_map(model, device_map, max_memory, torch_dtype) if device_map is None and is_sharded: device_map = {'': 'cpu'} force_hook = False try: accelerate.load_checkpoint_and_dispatch(model, model_file if not is_sharded else index_file, device_map, max_memory=max_memory, offload_folder=offload_folder, offload_state_dict=offload_state_dict, dtype=torch_dtype, force_hooks=force_hook, strict=True) except AttributeError as e: if "'Attention' object has no attribute" in str(e): logger.warning(f"Taking `{str(e)}` while using `accelerate.load_checkpoint_and_dispatch` to mean {pretrained_model_name_or_path} was saved with deprecated attention block weight names. We will load it with the deprecated attention block names and convert them on the fly to the new attention block format. Please re-save the model after this conversion, so we don't have to do the on the fly renaming in the future. If the model is from a hub checkpoint, please also re-upload it or open a PR on the original repository.") model._temp_convert_self_to_deprecated_attention_blocks() accelerate.load_checkpoint_and_dispatch(model, model_file if not is_sharded else index_file, device_map, max_memory=max_memory, offload_folder=offload_folder, offload_state_dict=offload_state_dict, dtype=torch_dtype, force_hooks=force_hook, strict=True) model._undo_temp_convert_self_to_deprecated_attention_blocks() else: raise e loading_info = {'missing_keys': [], 'unexpected_keys': [], 'mismatched_keys': [], 'error_msgs': []} else: model = cls.from_config(config, **unused_kwargs) state_dict = load_state_dict(model_file, variant=variant) model._convert_deprecated_attention_blocks(state_dict) (model, missing_keys, unexpected_keys, mismatched_keys, error_msgs) = cls._load_pretrained_model(model, state_dict, model_file, pretrained_model_name_or_path, ignore_mismatched_sizes=ignore_mismatched_sizes) loading_info = {'missing_keys': missing_keys, 'unexpected_keys': unexpected_keys, 'mismatched_keys': mismatched_keys, 'error_msgs': error_msgs} if torch_dtype is not None and (not isinstance(torch_dtype, torch.dtype)): raise ValueError(f'{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}.') elif torch_dtype is not None: model = model.to(torch_dtype) model.register_to_config(_name_or_path=pretrained_model_name_or_path) model.eval() if output_loading_info: return (model, loading_info) return model @classmethod def _load_pretrained_model(cls, model, state_dict: OrderedDict, resolved_archive_file, pretrained_model_name_or_path: Union[str, os.PathLike], ignore_mismatched_sizes: bool=False): model_state_dict = model.state_dict() loaded_keys = list(state_dict.keys()) expected_keys = list(model_state_dict.keys()) original_loaded_keys = loaded_keys missing_keys = list(set(expected_keys) - set(loaded_keys)) unexpected_keys = list(set(loaded_keys) - set(expected_keys)) model_to_load = model def _find_mismatched_keys(state_dict, model_state_dict, loaded_keys, ignore_mismatched_sizes): mismatched_keys = [] if ignore_mismatched_sizes: for checkpoint_key in loaded_keys: model_key = checkpoint_key if model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape: mismatched_keys.append((checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape)) del state_dict[checkpoint_key] return mismatched_keys if state_dict is not None: mismatched_keys = _find_mismatched_keys(state_dict, model_state_dict, original_loaded_keys, ignore_mismatched_sizes) error_msgs = _load_state_dict_into_model(model_to_load, state_dict) if len(error_msgs) > 0: error_msg = '\n\t'.join(error_msgs) if 'size mismatch' in error_msg: error_msg += '\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method.' raise RuntimeError(f'Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}') if len(unexpected_keys) > 0: logger.warning(f'Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).') else: logger.info(f'All model checkpoint weights were used when initializing {model.__class__.__name__}.\n') if len(missing_keys) > 0: logger.warning(f'Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') elif len(mismatched_keys) == 0: logger.info(f'All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use {model.__class__.__name__} for predictions without further training.') if len(mismatched_keys) > 0: mismatched_warning = '\n'.join([f'- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated' for (key, shape1, shape2) in mismatched_keys]) logger.warning(f'Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized because the shapes did not match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') return (model, missing_keys, unexpected_keys, mismatched_keys, error_msgs) @classmethod def _get_signature_keys(cls, obj): parameters = inspect.signature(obj.__init__).parameters required_parameters = {k: v for (k, v) in parameters.items() if v.default == inspect._empty} optional_parameters = set({k for (k, v) in parameters.items() if v.default != inspect._empty}) expected_modules = set(required_parameters.keys()) - {'self'} return (expected_modules, optional_parameters) def _get_no_split_modules(self, device_map: str): _no_split_modules = set() modules_to_check = [self] while len(modules_to_check) > 0: module = modules_to_check.pop(-1) if module.__class__.__name__ not in _no_split_modules: if isinstance(module, ModelMixin): if module._no_split_modules is None: raise ValueError(f"{module.__class__.__name__} does not support `device_map='{device_map}'`. To implement support, the model class needs to implement the `_no_split_modules` attribute.") else: _no_split_modules = _no_split_modules | set(module._no_split_modules) modules_to_check += list(module.children()) return list(_no_split_modules) @property def device(self) -> torch.device: return get_parameter_device(self) @property def dtype(self) -> torch.dtype: return get_parameter_dtype(self) def num_parameters(self, only_trainable: bool=False, exclude_embeddings: bool=False) -> int: if exclude_embeddings: embedding_param_names = [f'{name}.weight' for (name, module_type) in self.named_modules() if isinstance(module_type, torch.nn.Embedding)] non_embedding_parameters = [parameter for (name, parameter) in self.named_parameters() if name not in embedding_param_names] return sum((p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable)) else: return sum((p.numel() for p in self.parameters() if p.requires_grad or not only_trainable)) def _convert_deprecated_attention_blocks(self, state_dict: OrderedDict) -> None: deprecated_attention_block_paths = [] def recursive_find_attn_block(name, module): if hasattr(module, '_from_deprecated_attn_block') and module._from_deprecated_attn_block: deprecated_attention_block_paths.append(name) for (sub_name, sub_module) in module.named_children(): sub_name = sub_name if name == '' else f'{name}.{sub_name}' recursive_find_attn_block(sub_name, sub_module) recursive_find_attn_block('', self) for path in deprecated_attention_block_paths: if f'{path}.query.weight' in state_dict: state_dict[f'{path}.to_q.weight'] = state_dict.pop(f'{path}.query.weight') if f'{path}.query.bias' in state_dict: state_dict[f'{path}.to_q.bias'] = state_dict.pop(f'{path}.query.bias') if f'{path}.key.weight' in state_dict: state_dict[f'{path}.to_k.weight'] = state_dict.pop(f'{path}.key.weight') if f'{path}.key.bias' in state_dict: state_dict[f'{path}.to_k.bias'] = state_dict.pop(f'{path}.key.bias') if f'{path}.value.weight' in state_dict: state_dict[f'{path}.to_v.weight'] = state_dict.pop(f'{path}.value.weight') if f'{path}.value.bias' in state_dict: state_dict[f'{path}.to_v.bias'] = state_dict.pop(f'{path}.value.bias') if f'{path}.proj_attn.weight' in state_dict: state_dict[f'{path}.to_out.0.weight'] = state_dict.pop(f'{path}.proj_attn.weight') if f'{path}.proj_attn.bias' in state_dict: state_dict[f'{path}.to_out.0.bias'] = state_dict.pop(f'{path}.proj_attn.bias') def _temp_convert_self_to_deprecated_attention_blocks(self) -> None: deprecated_attention_block_modules = [] def recursive_find_attn_block(module): if hasattr(module, '_from_deprecated_attn_block') and module._from_deprecated_attn_block: deprecated_attention_block_modules.append(module) for sub_module in module.children(): recursive_find_attn_block(sub_module) recursive_find_attn_block(self) for module in deprecated_attention_block_modules: module.query = module.to_q module.key = module.to_k module.value = module.to_v module.proj_attn = module.to_out[0] del module.to_q del module.to_k del module.to_v del module.to_out def _undo_temp_convert_self_to_deprecated_attention_blocks(self) -> None: deprecated_attention_block_modules = [] def recursive_find_attn_block(module) -> None: if hasattr(module, '_from_deprecated_attn_block') and module._from_deprecated_attn_block: deprecated_attention_block_modules.append(module) for sub_module in module.children(): recursive_find_attn_block(sub_module) recursive_find_attn_block(self) for module in deprecated_attention_block_modules: module.to_q = module.query module.to_k = module.key module.to_v = module.value module.to_out = nn.ModuleList([module.proj_attn, nn.Dropout(module.dropout)]) del module.query del module.key del module.value del module.proj_attn class LegacyModelMixin(ModelMixin): @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): from .model_loading_utils import _fetch_remapped_cls_from_config kwargs_copy = kwargs.copy() cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) local_files_only = kwargs.pop('local_files_only', None) token = kwargs.pop('token', None) revision = kwargs.pop('revision', None) subfolder = kwargs.pop('subfolder', None) config_path = pretrained_model_name_or_path user_agent = {'diffusers': __version__, 'file_type': 'model', 'framework': 'pytorch'} (config, _, _) = cls.load_config(config_path, cache_dir=cache_dir, return_unused_kwargs=True, return_commit_hash=True, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, **kwargs) remapped_class = _fetch_remapped_cls_from_config(config, cls) return remapped_class.from_pretrained(pretrained_model_name_or_path, **kwargs_copy) # File: diffusers-main/src/diffusers/models/normalization.py import numbers from typing import Dict, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from ..utils import is_torch_version from .activations import get_activation from .embeddings import CombinedTimestepLabelEmbeddings, PixArtAlphaCombinedTimestepSizeEmbeddings class AdaLayerNorm(nn.Module): def __init__(self, embedding_dim: int, num_embeddings: Optional[int]=None, output_dim: Optional[int]=None, norm_elementwise_affine: bool=False, norm_eps: float=1e-05, chunk_dim: int=0): super().__init__() self.chunk_dim = chunk_dim output_dim = output_dim or embedding_dim * 2 if num_embeddings is not None: self.emb = nn.Embedding(num_embeddings, embedding_dim) else: self.emb = None self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, output_dim) self.norm = nn.LayerNorm(output_dim // 2, norm_eps, norm_elementwise_affine) def forward(self, x: torch.Tensor, timestep: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None) -> torch.Tensor: if self.emb is not None: temb = self.emb(timestep) temb = self.linear(self.silu(temb)) if self.chunk_dim == 1: (shift, scale) = temb.chunk(2, dim=1) shift = shift[:, None, :] scale = scale[:, None, :] else: (scale, shift) = temb.chunk(2, dim=0) x = self.norm(x) * (1 + scale) + shift return x class FP32LayerNorm(nn.LayerNorm): def forward(self, inputs: torch.Tensor) -> torch.Tensor: origin_dtype = inputs.dtype return F.layer_norm(inputs.float(), self.normalized_shape, self.weight.float() if self.weight is not None else None, self.bias.float() if self.bias is not None else None, self.eps).to(origin_dtype) class AdaLayerNormZero(nn.Module): def __init__(self, embedding_dim: int, num_embeddings: Optional[int]=None, norm_type='layer_norm', bias=True): super().__init__() if num_embeddings is not None: self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim) else: self.emb = None self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=bias) if norm_type == 'layer_norm': self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-06) elif norm_type == 'fp32_layer_norm': self.norm = FP32LayerNorm(embedding_dim, elementwise_affine=False, bias=False) else: raise ValueError(f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'.") def forward(self, x: torch.Tensor, timestep: Optional[torch.Tensor]=None, class_labels: Optional[torch.LongTensor]=None, hidden_dtype: Optional[torch.dtype]=None, emb: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: if self.emb is not None: emb = self.emb(timestep, class_labels, hidden_dtype=hidden_dtype) emb = self.linear(self.silu(emb)) (shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp) = emb.chunk(6, dim=1) x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] return (x, gate_msa, shift_mlp, scale_mlp, gate_mlp) class AdaLayerNormZeroSingle(nn.Module): def __init__(self, embedding_dim: int, norm_type='layer_norm', bias=True): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, 3 * embedding_dim, bias=bias) if norm_type == 'layer_norm': self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-06) else: raise ValueError(f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'.") def forward(self, x: torch.Tensor, emb: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: emb = self.linear(self.silu(emb)) (shift_msa, scale_msa, gate_msa) = emb.chunk(3, dim=1) x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] return (x, gate_msa) class LuminaRMSNormZero(nn.Module): def __init__(self, embedding_dim: int, norm_eps: float, norm_elementwise_affine: bool): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(min(embedding_dim, 1024), 4 * embedding_dim, bias=True) self.norm = RMSNorm(embedding_dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) def forward(self, x: torch.Tensor, emb: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: emb = self.linear(self.silu(emb)) (scale_msa, gate_msa, scale_mlp, gate_mlp) = emb.chunk(4, dim=1) x = self.norm(x) * (1 + scale_msa[:, None]) return (x, gate_msa, scale_mlp, gate_mlp) class AdaLayerNormSingle(nn.Module): def __init__(self, embedding_dim: int, use_additional_conditions: bool=False): super().__init__() self.emb = PixArtAlphaCombinedTimestepSizeEmbeddings(embedding_dim, size_emb_dim=embedding_dim // 3, use_additional_conditions=use_additional_conditions) self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True) def forward(self, timestep: torch.Tensor, added_cond_kwargs: Optional[Dict[str, torch.Tensor]]=None, batch_size: Optional[int]=None, hidden_dtype: Optional[torch.dtype]=None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: embedded_timestep = self.emb(timestep, **added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_dtype) return (self.linear(self.silu(embedded_timestep)), embedded_timestep) class AdaGroupNorm(nn.Module): def __init__(self, embedding_dim: int, out_dim: int, num_groups: int, act_fn: Optional[str]=None, eps: float=1e-05): super().__init__() self.num_groups = num_groups self.eps = eps if act_fn is None: self.act = None else: self.act = get_activation(act_fn) self.linear = nn.Linear(embedding_dim, out_dim * 2) def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor: if self.act: emb = self.act(emb) emb = self.linear(emb) emb = emb[:, :, None, None] (scale, shift) = emb.chunk(2, dim=1) x = F.group_norm(x, self.num_groups, eps=self.eps) x = x * (1 + scale) + shift return x class AdaLayerNormContinuous(nn.Module): def __init__(self, embedding_dim: int, conditioning_embedding_dim: int, elementwise_affine=True, eps=1e-05, bias=True, norm_type='layer_norm'): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias) if norm_type == 'layer_norm': self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias) elif norm_type == 'rms_norm': self.norm = RMSNorm(embedding_dim, eps, elementwise_affine) else: raise ValueError(f'unknown norm_type {norm_type}') def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor: emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) (scale, shift) = torch.chunk(emb, 2, dim=1) x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] return x class LuminaLayerNormContinuous(nn.Module): def __init__(self, embedding_dim: int, conditioning_embedding_dim: int, elementwise_affine=True, eps=1e-05, bias=True, norm_type='layer_norm', out_dim: Optional[int]=None): super().__init__() self.silu = nn.SiLU() self.linear_1 = nn.Linear(conditioning_embedding_dim, embedding_dim, bias=bias) if norm_type == 'layer_norm': self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias) else: raise ValueError(f'unknown norm_type {norm_type}') if out_dim is not None: self.linear_2 = nn.Linear(embedding_dim, out_dim, bias=bias) def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor: emb = self.linear_1(self.silu(conditioning_embedding).to(x.dtype)) scale = emb x = self.norm(x) * (1 + scale)[:, None, :] if self.linear_2 is not None: x = self.linear_2(x) return x class CogVideoXLayerNormZero(nn.Module): def __init__(self, conditioning_dim: int, embedding_dim: int, elementwise_affine: bool=True, eps: float=1e-05, bias: bool=True) -> None: super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(conditioning_dim, 6 * embedding_dim, bias=bias) self.norm = nn.LayerNorm(embedding_dim, eps=eps, elementwise_affine=elementwise_affine) def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: (shift, scale, gate, enc_shift, enc_scale, enc_gate) = self.linear(self.silu(temb)).chunk(6, dim=1) hidden_states = self.norm(hidden_states) * (1 + scale)[:, None, :] + shift[:, None, :] encoder_hidden_states = self.norm(encoder_hidden_states) * (1 + enc_scale)[:, None, :] + enc_shift[:, None, :] return (hidden_states, encoder_hidden_states, gate[:, None, :], enc_gate[:, None, :]) if is_torch_version('>=', '2.1.0'): LayerNorm = nn.LayerNorm else: class LayerNorm(nn.Module): def __init__(self, dim, eps: float=1e-05, elementwise_affine: bool=True, bias: bool=True): super().__init__() self.eps = eps if isinstance(dim, numbers.Integral): dim = (dim,) self.dim = torch.Size(dim) if elementwise_affine: self.weight = nn.Parameter(torch.ones(dim)) self.bias = nn.Parameter(torch.zeros(dim)) if bias else None else: self.weight = None self.bias = None def forward(self, input): return F.layer_norm(input, self.dim, self.weight, self.bias, self.eps) class RMSNorm(nn.Module): def __init__(self, dim, eps: float, elementwise_affine: bool=True): super().__init__() self.eps = eps if isinstance(dim, numbers.Integral): dim = (dim,) self.dim = torch.Size(dim) if elementwise_affine: self.weight = nn.Parameter(torch.ones(dim)) else: self.weight = None def forward(self, hidden_states): input_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.eps) if self.weight is not None: if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) hidden_states = hidden_states * self.weight else: hidden_states = hidden_states.to(input_dtype) return hidden_states class GlobalResponseNorm(nn.Module): def __init__(self, dim): super().__init__() self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) def forward(self, x): gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True) nx = gx / (gx.mean(dim=-1, keepdim=True) + 1e-06) return self.gamma * (x * nx) + self.beta + x # File: diffusers-main/src/diffusers/models/resnet.py from functools import partial from typing import Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from ..utils import deprecate from .activations import get_activation from .attention_processor import SpatialNorm from .downsampling import Downsample1D, Downsample2D, FirDownsample2D, KDownsample2D, downsample_2d from .normalization import AdaGroupNorm from .upsampling import FirUpsample2D, KUpsample2D, Upsample1D, Upsample2D, upfirdn2d_native, upsample_2d class ResnetBlockCondNorm2D(nn.Module): def __init__(self, *, in_channels: int, out_channels: Optional[int]=None, conv_shortcut: bool=False, dropout: float=0.0, temb_channels: int=512, groups: int=32, groups_out: Optional[int]=None, eps: float=1e-06, non_linearity: str='swish', time_embedding_norm: str='ada_group', output_scale_factor: float=1.0, use_in_shortcut: Optional[bool]=None, up: bool=False, down: bool=False, conv_shortcut_bias: bool=True, conv_2d_out_channels: Optional[int]=None): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.up = up self.down = down self.output_scale_factor = output_scale_factor self.time_embedding_norm = time_embedding_norm if groups_out is None: groups_out = groups if self.time_embedding_norm == 'ada_group': self.norm1 = AdaGroupNorm(temb_channels, in_channels, groups, eps=eps) elif self.time_embedding_norm == 'spatial': self.norm1 = SpatialNorm(in_channels, temb_channels) else: raise ValueError(f' unsupported time_embedding_norm: {self.time_embedding_norm}') self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if self.time_embedding_norm == 'ada_group': self.norm2 = AdaGroupNorm(temb_channels, out_channels, groups_out, eps=eps) elif self.time_embedding_norm == 'spatial': self.norm2 = SpatialNorm(out_channels, temb_channels) else: raise ValueError(f' unsupported time_embedding_norm: {self.time_embedding_norm}') self.dropout = torch.nn.Dropout(dropout) conv_2d_out_channels = conv_2d_out_channels or out_channels self.conv2 = nn.Conv2d(out_channels, conv_2d_out_channels, kernel_size=3, stride=1, padding=1) self.nonlinearity = get_activation(non_linearity) self.upsample = self.downsample = None if self.up: self.upsample = Upsample2D(in_channels, use_conv=False) elif self.down: self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name='op') self.use_in_shortcut = self.in_channels != conv_2d_out_channels if use_in_shortcut is None else use_in_shortcut self.conv_shortcut = None if self.use_in_shortcut: self.conv_shortcut = nn.Conv2d(in_channels, conv_2d_out_channels, kernel_size=1, stride=1, padding=0, bias=conv_shortcut_bias) def forward(self, input_tensor: torch.Tensor, temb: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) hidden_states = input_tensor hidden_states = self.norm1(hidden_states, temb) hidden_states = self.nonlinearity(hidden_states) if self.upsample is not None: if hidden_states.shape[0] >= 64: input_tensor = input_tensor.contiguous() hidden_states = hidden_states.contiguous() input_tensor = self.upsample(input_tensor) hidden_states = self.upsample(hidden_states) elif self.downsample is not None: input_tensor = self.downsample(input_tensor) hidden_states = self.downsample(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm2(hidden_states, temb) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: input_tensor = self.conv_shortcut(input_tensor) output_tensor = (input_tensor + hidden_states) / self.output_scale_factor return output_tensor class ResnetBlock2D(nn.Module): def __init__(self, *, in_channels: int, out_channels: Optional[int]=None, conv_shortcut: bool=False, dropout: float=0.0, temb_channels: int=512, groups: int=32, groups_out: Optional[int]=None, pre_norm: bool=True, eps: float=1e-06, non_linearity: str='swish', skip_time_act: bool=False, time_embedding_norm: str='default', kernel: Optional[torch.Tensor]=None, output_scale_factor: float=1.0, use_in_shortcut: Optional[bool]=None, up: bool=False, down: bool=False, conv_shortcut_bias: bool=True, conv_2d_out_channels: Optional[int]=None): super().__init__() if time_embedding_norm == 'ada_group': raise ValueError('This class cannot be used with `time_embedding_norm==ada_group`, please use `ResnetBlockCondNorm2D` instead') if time_embedding_norm == 'spatial': raise ValueError('This class cannot be used with `time_embedding_norm==spatial`, please use `ResnetBlockCondNorm2D` instead') self.pre_norm = True self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.up = up self.down = down self.output_scale_factor = output_scale_factor self.time_embedding_norm = time_embedding_norm self.skip_time_act = skip_time_act if groups_out is None: groups_out = groups self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True) self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if temb_channels is not None: if self.time_embedding_norm == 'default': self.time_emb_proj = nn.Linear(temb_channels, out_channels) elif self.time_embedding_norm == 'scale_shift': self.time_emb_proj = nn.Linear(temb_channels, 2 * out_channels) else: raise ValueError(f'unknown time_embedding_norm : {self.time_embedding_norm} ') else: self.time_emb_proj = None self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True) self.dropout = torch.nn.Dropout(dropout) conv_2d_out_channels = conv_2d_out_channels or out_channels self.conv2 = nn.Conv2d(out_channels, conv_2d_out_channels, kernel_size=3, stride=1, padding=1) self.nonlinearity = get_activation(non_linearity) self.upsample = self.downsample = None if self.up: if kernel == 'fir': fir_kernel = (1, 3, 3, 1) self.upsample = lambda x: upsample_2d(x, kernel=fir_kernel) elif kernel == 'sde_vp': self.upsample = partial(F.interpolate, scale_factor=2.0, mode='nearest') else: self.upsample = Upsample2D(in_channels, use_conv=False) elif self.down: if kernel == 'fir': fir_kernel = (1, 3, 3, 1) self.downsample = lambda x: downsample_2d(x, kernel=fir_kernel) elif kernel == 'sde_vp': self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2) else: self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name='op') self.use_in_shortcut = self.in_channels != conv_2d_out_channels if use_in_shortcut is None else use_in_shortcut self.conv_shortcut = None if self.use_in_shortcut: self.conv_shortcut = nn.Conv2d(in_channels, conv_2d_out_channels, kernel_size=1, stride=1, padding=0, bias=conv_shortcut_bias) def forward(self, input_tensor: torch.Tensor, temb: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) hidden_states = input_tensor hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) if self.upsample is not None: if hidden_states.shape[0] >= 64: input_tensor = input_tensor.contiguous() hidden_states = hidden_states.contiguous() input_tensor = self.upsample(input_tensor) hidden_states = self.upsample(hidden_states) elif self.downsample is not None: input_tensor = self.downsample(input_tensor) hidden_states = self.downsample(hidden_states) hidden_states = self.conv1(hidden_states) if self.time_emb_proj is not None: if not self.skip_time_act: temb = self.nonlinearity(temb) temb = self.time_emb_proj(temb)[:, :, None, None] if self.time_embedding_norm == 'default': if temb is not None: hidden_states = hidden_states + temb hidden_states = self.norm2(hidden_states) elif self.time_embedding_norm == 'scale_shift': if temb is None: raise ValueError(f' `temb` should not be None when `time_embedding_norm` is {self.time_embedding_norm}') (time_scale, time_shift) = torch.chunk(temb, 2, dim=1) hidden_states = self.norm2(hidden_states) hidden_states = hidden_states * (1 + time_scale) + time_shift else: hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: input_tensor = self.conv_shortcut(input_tensor) output_tensor = (input_tensor + hidden_states) / self.output_scale_factor return output_tensor def rearrange_dims(tensor: torch.Tensor) -> torch.Tensor: if len(tensor.shape) == 2: return tensor[:, :, None] if len(tensor.shape) == 3: return tensor[:, :, None, :] elif len(tensor.shape) == 4: return tensor[:, :, 0, :] else: raise ValueError(f'`len(tensor)`: {len(tensor)} has to be 2, 3 or 4.') class Conv1dBlock(nn.Module): def __init__(self, inp_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int]], n_groups: int=8, activation: str='mish'): super().__init__() self.conv1d = nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2) self.group_norm = nn.GroupNorm(n_groups, out_channels) self.mish = get_activation(activation) def forward(self, inputs: torch.Tensor) -> torch.Tensor: intermediate_repr = self.conv1d(inputs) intermediate_repr = rearrange_dims(intermediate_repr) intermediate_repr = self.group_norm(intermediate_repr) intermediate_repr = rearrange_dims(intermediate_repr) output = self.mish(intermediate_repr) return output class ResidualTemporalBlock1D(nn.Module): def __init__(self, inp_channels: int, out_channels: int, embed_dim: int, kernel_size: Union[int, Tuple[int, int]]=5, activation: str='mish'): super().__init__() self.conv_in = Conv1dBlock(inp_channels, out_channels, kernel_size) self.conv_out = Conv1dBlock(out_channels, out_channels, kernel_size) self.time_emb_act = get_activation(activation) self.time_emb = nn.Linear(embed_dim, out_channels) self.residual_conv = nn.Conv1d(inp_channels, out_channels, 1) if inp_channels != out_channels else nn.Identity() def forward(self, inputs: torch.Tensor, t: torch.Tensor) -> torch.Tensor: t = self.time_emb_act(t) t = self.time_emb(t) out = self.conv_in(inputs) + rearrange_dims(t) out = self.conv_out(out) return out + self.residual_conv(inputs) class TemporalConvLayer(nn.Module): def __init__(self, in_dim: int, out_dim: Optional[int]=None, dropout: float=0.0, norm_num_groups: int=32): super().__init__() out_dim = out_dim or in_dim self.in_dim = in_dim self.out_dim = out_dim self.conv1 = nn.Sequential(nn.GroupNorm(norm_num_groups, in_dim), nn.SiLU(), nn.Conv3d(in_dim, out_dim, (3, 1, 1), padding=(1, 0, 0))) self.conv2 = nn.Sequential(nn.GroupNorm(norm_num_groups, out_dim), nn.SiLU(), nn.Dropout(dropout), nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0))) self.conv3 = nn.Sequential(nn.GroupNorm(norm_num_groups, out_dim), nn.SiLU(), nn.Dropout(dropout), nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0))) self.conv4 = nn.Sequential(nn.GroupNorm(norm_num_groups, out_dim), nn.SiLU(), nn.Dropout(dropout), nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0))) nn.init.zeros_(self.conv4[-1].weight) nn.init.zeros_(self.conv4[-1].bias) def forward(self, hidden_states: torch.Tensor, num_frames: int=1) -> torch.Tensor: hidden_states = hidden_states[None, :].reshape((-1, num_frames) + hidden_states.shape[1:]).permute(0, 2, 1, 3, 4) identity = hidden_states hidden_states = self.conv1(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.conv3(hidden_states) hidden_states = self.conv4(hidden_states) hidden_states = identity + hidden_states hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape((hidden_states.shape[0] * hidden_states.shape[2], -1) + hidden_states.shape[3:]) return hidden_states class TemporalResnetBlock(nn.Module): def __init__(self, in_channels: int, out_channels: Optional[int]=None, temb_channels: int=512, eps: float=1e-06): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels kernel_size = (3, 1, 1) padding = [k // 2 for k in kernel_size] self.norm1 = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=eps, affine=True) self.conv1 = nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, stride=1, padding=padding) if temb_channels is not None: self.time_emb_proj = nn.Linear(temb_channels, out_channels) else: self.time_emb_proj = None self.norm2 = torch.nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=eps, affine=True) self.dropout = torch.nn.Dropout(0.0) self.conv2 = nn.Conv3d(out_channels, out_channels, kernel_size=kernel_size, stride=1, padding=padding) self.nonlinearity = get_activation('silu') self.use_in_shortcut = self.in_channels != out_channels self.conv_shortcut = None if self.use_in_shortcut: self.conv_shortcut = nn.Conv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) def forward(self, input_tensor: torch.Tensor, temb: torch.Tensor) -> torch.Tensor: hidden_states = input_tensor hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.conv1(hidden_states) if self.time_emb_proj is not None: temb = self.nonlinearity(temb) temb = self.time_emb_proj(temb)[:, :, :, None, None] temb = temb.permute(0, 2, 1, 3, 4) hidden_states = hidden_states + temb hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: input_tensor = self.conv_shortcut(input_tensor) output_tensor = input_tensor + hidden_states return output_tensor class SpatioTemporalResBlock(nn.Module): def __init__(self, in_channels: int, out_channels: Optional[int]=None, temb_channels: int=512, eps: float=1e-06, temporal_eps: Optional[float]=None, merge_factor: float=0.5, merge_strategy='learned_with_images', switch_spatial_to_temporal_mix: bool=False): super().__init__() self.spatial_res_block = ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=eps) self.temporal_res_block = TemporalResnetBlock(in_channels=out_channels if out_channels is not None else in_channels, out_channels=out_channels if out_channels is not None else in_channels, temb_channels=temb_channels, eps=temporal_eps if temporal_eps is not None else eps) self.time_mixer = AlphaBlender(alpha=merge_factor, merge_strategy=merge_strategy, switch_spatial_to_temporal_mix=switch_spatial_to_temporal_mix) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, image_only_indicator: Optional[torch.Tensor]=None): num_frames = image_only_indicator.shape[-1] hidden_states = self.spatial_res_block(hidden_states, temb) (batch_frames, channels, height, width) = hidden_states.shape batch_size = batch_frames // num_frames hidden_states_mix = hidden_states[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) if temb is not None: temb = temb.reshape(batch_size, num_frames, -1) hidden_states = self.temporal_res_block(hidden_states, temb) hidden_states = self.time_mixer(x_spatial=hidden_states_mix, x_temporal=hidden_states, image_only_indicator=image_only_indicator) hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width) return hidden_states class AlphaBlender(nn.Module): strategies = ['learned', 'fixed', 'learned_with_images'] def __init__(self, alpha: float, merge_strategy: str='learned_with_images', switch_spatial_to_temporal_mix: bool=False): super().__init__() self.merge_strategy = merge_strategy self.switch_spatial_to_temporal_mix = switch_spatial_to_temporal_mix if merge_strategy not in self.strategies: raise ValueError(f'merge_strategy needs to be in {self.strategies}') if self.merge_strategy == 'fixed': self.register_buffer('mix_factor', torch.Tensor([alpha])) elif self.merge_strategy == 'learned' or self.merge_strategy == 'learned_with_images': self.register_parameter('mix_factor', torch.nn.Parameter(torch.Tensor([alpha]))) else: raise ValueError(f'Unknown merge strategy {self.merge_strategy}') def get_alpha(self, image_only_indicator: torch.Tensor, ndims: int) -> torch.Tensor: if self.merge_strategy == 'fixed': alpha = self.mix_factor elif self.merge_strategy == 'learned': alpha = torch.sigmoid(self.mix_factor) elif self.merge_strategy == 'learned_with_images': if image_only_indicator is None: raise ValueError('Please provide image_only_indicator to use learned_with_images merge strategy') alpha = torch.where(image_only_indicator.bool(), torch.ones(1, 1, device=image_only_indicator.device), torch.sigmoid(self.mix_factor)[..., None]) if ndims == 5: alpha = alpha[:, None, :, None, None] elif ndims == 3: alpha = alpha.reshape(-1)[:, None, None] else: raise ValueError(f'Unexpected ndims {ndims}. Dimensions should be 3 or 5') else: raise NotImplementedError return alpha def forward(self, x_spatial: torch.Tensor, x_temporal: torch.Tensor, image_only_indicator: Optional[torch.Tensor]=None) -> torch.Tensor: alpha = self.get_alpha(image_only_indicator, x_spatial.ndim) alpha = alpha.to(x_spatial.dtype) if self.switch_spatial_to_temporal_mix: alpha = 1.0 - alpha x = alpha * x_spatial + (1.0 - alpha) * x_temporal return x # File: diffusers-main/src/diffusers/models/resnet_flax.py import flax.linen as nn import jax import jax.numpy as jnp class FlaxUpsample2D(nn.Module): out_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv(self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) def __call__(self, hidden_states): (batch, height, width, channels) = hidden_states.shape hidden_states = jax.image.resize(hidden_states, shape=(batch, height * 2, width * 2, channels), method='nearest') hidden_states = self.conv(hidden_states) return hidden_states class FlaxDownsample2D(nn.Module): out_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv(self.out_channels, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype) def __call__(self, hidden_states): hidden_states = self.conv(hidden_states) return hidden_states class FlaxResnetBlock2D(nn.Module): in_channels: int out_channels: int = None dropout_prob: float = 0.0 use_nin_shortcut: bool = None dtype: jnp.dtype = jnp.float32 def setup(self): out_channels = self.in_channels if self.out_channels is None else self.out_channels self.norm1 = nn.GroupNorm(num_groups=32, epsilon=1e-05) self.conv1 = nn.Conv(out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) self.time_emb_proj = nn.Dense(out_channels, dtype=self.dtype) self.norm2 = nn.GroupNorm(num_groups=32, epsilon=1e-05) self.dropout = nn.Dropout(self.dropout_prob) self.conv2 = nn.Conv(out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut self.conv_shortcut = None if use_nin_shortcut: self.conv_shortcut = nn.Conv(out_channels, kernel_size=(1, 1), strides=(1, 1), padding='VALID', dtype=self.dtype) def __call__(self, hidden_states, temb, deterministic=True): residual = hidden_states hidden_states = self.norm1(hidden_states) hidden_states = nn.swish(hidden_states) hidden_states = self.conv1(hidden_states) temb = self.time_emb_proj(nn.swish(temb)) temb = jnp.expand_dims(jnp.expand_dims(temb, 1), 1) hidden_states = hidden_states + temb hidden_states = self.norm2(hidden_states) hidden_states = nn.swish(hidden_states) hidden_states = self.dropout(hidden_states, deterministic) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: residual = self.conv_shortcut(residual) return hidden_states + residual # File: diffusers-main/src/diffusers/models/transformers/__init__.py from ...utils import is_torch_available if is_torch_available(): from .auraflow_transformer_2d import AuraFlowTransformer2DModel from .cogvideox_transformer_3d import CogVideoXTransformer3DModel from .dit_transformer_2d import DiTTransformer2DModel from .dual_transformer_2d import DualTransformer2DModel from .hunyuan_transformer_2d import HunyuanDiT2DModel from .latte_transformer_3d import LatteTransformer3DModel from .lumina_nextdit2d import LuminaNextDiT2DModel from .pixart_transformer_2d import PixArtTransformer2DModel from .prior_transformer import PriorTransformer from .stable_audio_transformer import StableAudioDiTModel from .t5_film_transformer import T5FilmDecoder from .transformer_2d import Transformer2DModel from .transformer_flux import FluxTransformer2DModel from .transformer_sd3 import SD3Transformer2DModel from .transformer_temporal import TransformerTemporalModel # File: diffusers-main/src/diffusers/models/transformers/auraflow_transformer_2d.py from typing import Any, Dict, Union import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...utils import is_torch_version, logging from ...utils.torch_utils import maybe_allow_in_graph from ..attention_processor import Attention, AttentionProcessor, AuraFlowAttnProcessor2_0, FusedAuraFlowAttnProcessor2_0 from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormZero, FP32LayerNorm logger = logging.get_logger(__name__) def find_multiple(n: int, k: int) -> int: if n % k == 0: return n return n + k - n % k class AuraFlowPatchEmbed(nn.Module): def __init__(self, height=224, width=224, patch_size=16, in_channels=3, embed_dim=768, pos_embed_max_size=None): super().__init__() self.num_patches = height // patch_size * (width // patch_size) self.pos_embed_max_size = pos_embed_max_size self.proj = nn.Linear(patch_size * patch_size * in_channels, embed_dim) self.pos_embed = nn.Parameter(torch.randn(1, pos_embed_max_size, embed_dim) * 0.1) self.patch_size = patch_size (self.height, self.width) = (height // patch_size, width // patch_size) self.base_size = height // patch_size def pe_selection_index_based_on_dim(self, h, w): (h_p, w_p) = (h // self.patch_size, w // self.patch_size) original_pe_indexes = torch.arange(self.pos_embed.shape[1]) (h_max, w_max) = (int(self.pos_embed_max_size ** 0.5), int(self.pos_embed_max_size ** 0.5)) original_pe_indexes = original_pe_indexes.view(h_max, w_max) starth = h_max // 2 - h_p // 2 endh = starth + h_p startw = w_max // 2 - w_p // 2 endw = startw + w_p original_pe_indexes = original_pe_indexes[starth:endh, startw:endw] return original_pe_indexes.flatten() def forward(self, latent): (batch_size, num_channels, height, width) = latent.size() latent = latent.view(batch_size, num_channels, height // self.patch_size, self.patch_size, width // self.patch_size, self.patch_size) latent = latent.permute(0, 2, 4, 1, 3, 5).flatten(-3).flatten(1, 2) latent = self.proj(latent) pe_index = self.pe_selection_index_based_on_dim(height, width) return latent + self.pos_embed[:, pe_index] class AuraFlowFeedForward(nn.Module): def __init__(self, dim, hidden_dim=None) -> None: super().__init__() if hidden_dim is None: hidden_dim = 4 * dim final_hidden_dim = int(2 * hidden_dim / 3) final_hidden_dim = find_multiple(final_hidden_dim, 256) self.linear_1 = nn.Linear(dim, final_hidden_dim, bias=False) self.linear_2 = nn.Linear(dim, final_hidden_dim, bias=False) self.out_projection = nn.Linear(final_hidden_dim, dim, bias=False) def forward(self, x: torch.Tensor) -> torch.Tensor: x = F.silu(self.linear_1(x)) * self.linear_2(x) x = self.out_projection(x) return x class AuraFlowPreFinalBlock(nn.Module): def __init__(self, embedding_dim: int, conditioning_embedding_dim: int): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=False) def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor: emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) (scale, shift) = torch.chunk(emb, 2, dim=1) x = x * (1 + scale)[:, None, :] + shift[:, None, :] return x @maybe_allow_in_graph class AuraFlowSingleTransformerBlock(nn.Module): def __init__(self, dim, num_attention_heads, attention_head_dim): super().__init__() self.norm1 = AdaLayerNormZero(dim, bias=False, norm_type='fp32_layer_norm') processor = AuraFlowAttnProcessor2_0() self.attn = Attention(query_dim=dim, cross_attention_dim=None, dim_head=attention_head_dim, heads=num_attention_heads, qk_norm='fp32_layer_norm', out_dim=dim, bias=False, out_bias=False, processor=processor) self.norm2 = FP32LayerNorm(dim, elementwise_affine=False, bias=False) self.ff = AuraFlowFeedForward(dim, dim * 4) def forward(self, hidden_states: torch.FloatTensor, temb: torch.FloatTensor): residual = hidden_states (norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp) = self.norm1(hidden_states, emb=temb) attn_output = self.attn(hidden_states=norm_hidden_states) hidden_states = self.norm2(residual + gate_msa.unsqueeze(1) * attn_output) hidden_states = hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] ff_output = self.ff(hidden_states) hidden_states = gate_mlp.unsqueeze(1) * ff_output hidden_states = residual + hidden_states return hidden_states @maybe_allow_in_graph class AuraFlowJointTransformerBlock(nn.Module): def __init__(self, dim, num_attention_heads, attention_head_dim): super().__init__() self.norm1 = AdaLayerNormZero(dim, bias=False, norm_type='fp32_layer_norm') self.norm1_context = AdaLayerNormZero(dim, bias=False, norm_type='fp32_layer_norm') processor = AuraFlowAttnProcessor2_0() self.attn = Attention(query_dim=dim, cross_attention_dim=None, added_kv_proj_dim=dim, added_proj_bias=False, dim_head=attention_head_dim, heads=num_attention_heads, qk_norm='fp32_layer_norm', out_dim=dim, bias=False, out_bias=False, processor=processor, context_pre_only=False) self.norm2 = FP32LayerNorm(dim, elementwise_affine=False, bias=False) self.ff = AuraFlowFeedForward(dim, dim * 4) self.norm2_context = FP32LayerNorm(dim, elementwise_affine=False, bias=False) self.ff_context = AuraFlowFeedForward(dim, dim * 4) def forward(self, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor, temb: torch.FloatTensor): residual = hidden_states residual_context = encoder_hidden_states (norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp) = self.norm1(hidden_states, emb=temb) (norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp) = self.norm1_context(encoder_hidden_states, emb=temb) (attn_output, context_attn_output) = self.attn(hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states) hidden_states = self.norm2(residual + gate_msa.unsqueeze(1) * attn_output) hidden_states = hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] hidden_states = gate_mlp.unsqueeze(1) * self.ff(hidden_states) hidden_states = residual + hidden_states encoder_hidden_states = self.norm2_context(residual_context + c_gate_msa.unsqueeze(1) * context_attn_output) encoder_hidden_states = encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] encoder_hidden_states = c_gate_mlp.unsqueeze(1) * self.ff_context(encoder_hidden_states) encoder_hidden_states = residual_context + encoder_hidden_states return (encoder_hidden_states, hidden_states) class AuraFlowTransformer2DModel(ModelMixin, ConfigMixin): _no_split_modules = ['AuraFlowJointTransformerBlock', 'AuraFlowSingleTransformerBlock', 'AuraFlowPatchEmbed'] _supports_gradient_checkpointing = True @register_to_config def __init__(self, sample_size: int=64, patch_size: int=2, in_channels: int=4, num_mmdit_layers: int=4, num_single_dit_layers: int=32, attention_head_dim: int=256, num_attention_heads: int=12, joint_attention_dim: int=2048, caption_projection_dim: int=3072, out_channels: int=4, pos_embed_max_size: int=1024): super().__init__() default_out_channels = in_channels self.out_channels = out_channels if out_channels is not None else default_out_channels self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.pos_embed = AuraFlowPatchEmbed(height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.config.in_channels, embed_dim=self.inner_dim, pos_embed_max_size=pos_embed_max_size) self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.config.caption_projection_dim, bias=False) self.time_step_embed = Timesteps(num_channels=256, downscale_freq_shift=0, scale=1000, flip_sin_to_cos=True) self.time_step_proj = TimestepEmbedding(in_channels=256, time_embed_dim=self.inner_dim) self.joint_transformer_blocks = nn.ModuleList([AuraFlowJointTransformerBlock(dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, attention_head_dim=self.config.attention_head_dim) for i in range(self.config.num_mmdit_layers)]) self.single_transformer_blocks = nn.ModuleList([AuraFlowSingleTransformerBlock(dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, attention_head_dim=self.config.attention_head_dim) for _ in range(self.config.num_single_dit_layers)]) self.norm_out = AuraFlowPreFinalBlock(self.inner_dim, self.inner_dim) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=False) self.register_tokens = nn.Parameter(torch.randn(1, 8, self.inner_dim) * 0.02) self.gradient_checkpointing = False @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAuraFlowAttnProcessor2_0()) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value def forward(self, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor=None, timestep: torch.LongTensor=None, return_dict: bool=True) -> Union[torch.FloatTensor, Transformer2DModelOutput]: (height, width) = hidden_states.shape[-2:] hidden_states = self.pos_embed(hidden_states) temb = self.time_step_embed(timestep).to(dtype=next(self.parameters()).dtype) temb = self.time_step_proj(temb) encoder_hidden_states = self.context_embedder(encoder_hidden_states) encoder_hidden_states = torch.cat([self.register_tokens.repeat(encoder_hidden_states.size(0), 1, 1), encoder_hidden_states], dim=1) for (index_block, block) in enumerate(self.joint_transformer_blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} (encoder_hidden_states, hidden_states) = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, encoder_hidden_states, temb, **ckpt_kwargs) else: (encoder_hidden_states, hidden_states) = block(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb) if len(self.single_transformer_blocks) > 0: encoder_seq_len = encoder_hidden_states.size(1) combined_hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) for (index_block, block) in enumerate(self.single_transformer_blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} combined_hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(block), combined_hidden_states, temb, **ckpt_kwargs) else: combined_hidden_states = block(hidden_states=combined_hidden_states, temb=temb) hidden_states = combined_hidden_states[:, encoder_seq_len:] hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) patch_size = self.config.patch_size out_channels = self.config.out_channels height = height // patch_size width = width // patch_size hidden_states = hidden_states.reshape(shape=(hidden_states.shape[0], height, width, patch_size, patch_size, out_channels)) hidden_states = torch.einsum('nhwpqc->nchpwq', hidden_states) output = hidden_states.reshape(shape=(hidden_states.shape[0], out_channels, height * patch_size, width * patch_size)) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) # File: diffusers-main/src/diffusers/models/transformers/cogvideox_transformer_3d.py from typing import Any, Dict, Optional, Tuple, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import is_torch_version, logging from ...utils.torch_utils import maybe_allow_in_graph from ..attention import Attention, FeedForward from ..attention_processor import AttentionProcessor, CogVideoXAttnProcessor2_0, FusedCogVideoXAttnProcessor2_0 from ..embeddings import CogVideoXPatchEmbed, TimestepEmbedding, Timesteps from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNorm, CogVideoXLayerNormZero logger = logging.get_logger(__name__) @maybe_allow_in_graph class CogVideoXBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, time_embed_dim: int, dropout: float=0.0, activation_fn: str='gelu-approximate', attention_bias: bool=False, qk_norm: bool=True, norm_elementwise_affine: bool=True, norm_eps: float=1e-05, final_dropout: bool=True, ff_inner_dim: Optional[int]=None, ff_bias: bool=True, attention_out_bias: bool=True): super().__init__() self.norm1 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True) self.attn1 = Attention(query_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, qk_norm='layer_norm' if qk_norm else None, eps=1e-06, bias=attention_bias, out_bias=attention_out_bias, processor=CogVideoXAttnProcessor2_0()) self.norm2 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True) self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout, inner_dim=ff_inner_dim, bias=ff_bias) def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]]=None) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1) (norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa) = self.norm1(hidden_states, encoder_hidden_states, temb) (attn_hidden_states, attn_encoder_hidden_states) = self.attn1(hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb) hidden_states = hidden_states + gate_msa * attn_hidden_states encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states (norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff) = self.norm2(hidden_states, encoder_hidden_states, temb) norm_hidden_states = torch.cat([norm_encoder_hidden_states, norm_hidden_states], dim=1) ff_output = self.ff(norm_hidden_states) hidden_states = hidden_states + gate_ff * ff_output[:, text_seq_length:] encoder_hidden_states = encoder_hidden_states + enc_gate_ff * ff_output[:, :text_seq_length] return (hidden_states, encoder_hidden_states) class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, num_attention_heads: int=30, attention_head_dim: int=64, in_channels: int=16, out_channels: Optional[int]=16, flip_sin_to_cos: bool=True, freq_shift: int=0, time_embed_dim: int=512, text_embed_dim: int=4096, num_layers: int=30, dropout: float=0.0, attention_bias: bool=True, sample_width: int=90, sample_height: int=60, sample_frames: int=49, patch_size: int=2, temporal_compression_ratio: int=4, max_text_seq_length: int=226, activation_fn: str='gelu-approximate', timestep_activation_fn: str='silu', norm_elementwise_affine: bool=True, norm_eps: float=1e-05, spatial_interpolation_scale: float=1.875, temporal_interpolation_scale: float=1.0, use_rotary_positional_embeddings: bool=False): super().__init__() inner_dim = num_attention_heads * attention_head_dim self.patch_embed = CogVideoXPatchEmbed(patch_size=patch_size, in_channels=in_channels, embed_dim=inner_dim, text_embed_dim=text_embed_dim, bias=True, sample_width=sample_width, sample_height=sample_height, sample_frames=sample_frames, temporal_compression_ratio=temporal_compression_ratio, max_text_seq_length=max_text_seq_length, spatial_interpolation_scale=spatial_interpolation_scale, temporal_interpolation_scale=temporal_interpolation_scale, use_positional_embeddings=not use_rotary_positional_embeddings) self.embedding_dropout = nn.Dropout(dropout) self.time_proj = Timesteps(inner_dim, flip_sin_to_cos, freq_shift) self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, timestep_activation_fn) self.transformer_blocks = nn.ModuleList([CogVideoXBlock(dim=inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, time_embed_dim=time_embed_dim, dropout=dropout, activation_fn=activation_fn, attention_bias=attention_bias, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps) for _ in range(num_layers)]) self.norm_final = nn.LayerNorm(inner_dim, norm_eps, norm_elementwise_affine) self.norm_out = AdaLayerNorm(embedding_dim=time_embed_dim, output_dim=2 * inner_dim, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps, chunk_dim=1) self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * out_channels) self.gradient_checkpointing = False def _set_gradient_checkpointing(self, module, value=False): self.gradient_checkpointing = value @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedCogVideoXAttnProcessor2_0()) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, timestep: Union[int, float, torch.LongTensor], timestep_cond: Optional[torch.Tensor]=None, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, return_dict: bool=True): (batch_size, num_frames, channels, height, width) = hidden_states.shape timesteps = timestep t_emb = self.time_proj(timesteps) t_emb = t_emb.to(dtype=hidden_states.dtype) emb = self.time_embedding(t_emb, timestep_cond) hidden_states = self.patch_embed(encoder_hidden_states, hidden_states) hidden_states = self.embedding_dropout(hidden_states) text_seq_length = encoder_hidden_states.shape[1] encoder_hidden_states = hidden_states[:, :text_seq_length] hidden_states = hidden_states[:, text_seq_length:] for (i, block) in enumerate(self.transformer_blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} (hidden_states, encoder_hidden_states) = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, encoder_hidden_states, emb, image_rotary_emb, **ckpt_kwargs) else: (hidden_states, encoder_hidden_states) = block(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=emb, image_rotary_emb=image_rotary_emb) if not self.config.use_rotary_positional_embeddings: hidden_states = self.norm_final(hidden_states) else: hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) hidden_states = self.norm_final(hidden_states) hidden_states = hidden_states[:, text_seq_length:] hidden_states = self.norm_out(hidden_states, temb=emb) hidden_states = self.proj_out(hidden_states) p = self.config.patch_size output = hidden_states.reshape(batch_size, num_frames, height // p, width // p, channels, p, p) output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) # File: diffusers-main/src/diffusers/models/transformers/dit_transformer_2d.py from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import is_torch_version, logging from ..attention import BasicTransformerBlock from ..embeddings import PatchEmbed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin logger = logging.get_logger(__name__) class DiTTransformer2DModel(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, num_attention_heads: int=16, attention_head_dim: int=72, in_channels: int=4, out_channels: Optional[int]=None, num_layers: int=28, dropout: float=0.0, norm_num_groups: int=32, attention_bias: bool=True, sample_size: int=32, patch_size: int=2, activation_fn: str='gelu-approximate', num_embeds_ada_norm: Optional[int]=1000, upcast_attention: bool=False, norm_type: str='ada_norm_zero', norm_elementwise_affine: bool=False, norm_eps: float=1e-05): super().__init__() if norm_type != 'ada_norm_zero': raise NotImplementedError(f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'.") elif norm_type == 'ada_norm_zero' and num_embeds_ada_norm is None: raise ValueError(f'When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None.') self.attention_head_dim = attention_head_dim self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.out_channels = in_channels if out_channels is None else out_channels self.gradient_checkpointing = False self.height = self.config.sample_size self.width = self.config.sample_size self.patch_size = self.config.patch_size self.pos_embed = PatchEmbed(height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.config.in_channels, embed_dim=self.inner_dim) self.transformer_blocks = nn.ModuleList([BasicTransformerBlock(self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps) for _ in range(self.config.num_layers)]) self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-06) self.proj_out_1 = nn.Linear(self.inner_dim, 2 * self.inner_dim) self.proj_out_2 = nn.Linear(self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value def forward(self, hidden_states: torch.Tensor, timestep: Optional[torch.LongTensor]=None, class_labels: Optional[torch.LongTensor]=None, cross_attention_kwargs: Dict[str, Any]=None, return_dict: bool=True): (height, width) = (hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size) hidden_states = self.pos_embed(hidden_states) for block in self.transformer_blocks: if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, None, None, None, timestep, cross_attention_kwargs, class_labels, **ckpt_kwargs) else: hidden_states = block(hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels) conditioning = self.transformer_blocks[0].norm1.emb(timestep, class_labels, hidden_dtype=hidden_states.dtype) (shift, scale) = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None] hidden_states = self.proj_out_2(hidden_states) height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape(shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)) hidden_states = torch.einsum('nhwpqc->nchpwq', hidden_states) output = hidden_states.reshape(shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) # File: diffusers-main/src/diffusers/models/transformers/dual_transformer_2d.py from typing import Optional from torch import nn from ..modeling_outputs import Transformer2DModelOutput from .transformer_2d import Transformer2DModel class DualTransformer2DModel(nn.Module): def __init__(self, num_attention_heads: int=16, attention_head_dim: int=88, in_channels: Optional[int]=None, num_layers: int=1, dropout: float=0.0, norm_num_groups: int=32, cross_attention_dim: Optional[int]=None, attention_bias: bool=False, sample_size: Optional[int]=None, num_vector_embeds: Optional[int]=None, activation_fn: str='geglu', num_embeds_ada_norm: Optional[int]=None): super().__init__() self.transformers = nn.ModuleList([Transformer2DModel(num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, in_channels=in_channels, num_layers=num_layers, dropout=dropout, norm_num_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, attention_bias=attention_bias, sample_size=sample_size, num_vector_embeds=num_vector_embeds, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm) for _ in range(2)]) self.mix_ratio = 0.5 self.condition_lengths = [77, 257] self.transformer_index_for_condition = [1, 0] def forward(self, hidden_states, encoder_hidden_states, timestep=None, attention_mask=None, cross_attention_kwargs=None, return_dict: bool=True): input_states = hidden_states encoded_states = [] tokens_start = 0 for i in range(2): condition_state = encoder_hidden_states[:, tokens_start:tokens_start + self.condition_lengths[i]] transformer_index = self.transformer_index_for_condition[i] encoded_state = self.transformers[transformer_index](input_states, encoder_hidden_states=condition_state, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] encoded_states.append(encoded_state - input_states) tokens_start += self.condition_lengths[i] output_states = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) output_states = output_states + input_states if not return_dict: return (output_states,) return Transformer2DModelOutput(sample=output_states) # File: diffusers-main/src/diffusers/models/transformers/hunyuan_transformer_2d.py from typing import Dict, Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import logging from ...utils.torch_utils import maybe_allow_in_graph from ..attention import FeedForward from ..attention_processor import Attention, AttentionProcessor, FusedHunyuanAttnProcessor2_0, HunyuanAttnProcessor2_0 from ..embeddings import HunyuanCombinedTimestepTextSizeStyleEmbedding, PatchEmbed, PixArtAlphaTextProjection from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormContinuous, FP32LayerNorm logger = logging.get_logger(__name__) class AdaLayerNormShift(nn.Module): def __init__(self, embedding_dim: int, elementwise_affine=True, eps=1e-06): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, embedding_dim) self.norm = FP32LayerNorm(embedding_dim, elementwise_affine=elementwise_affine, eps=eps) def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor: shift = self.linear(self.silu(emb.to(torch.float32)).to(emb.dtype)) x = self.norm(x) + shift.unsqueeze(dim=1) return x @maybe_allow_in_graph class HunyuanDiTBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, cross_attention_dim: int=1024, dropout=0.0, activation_fn: str='geglu', norm_elementwise_affine: bool=True, norm_eps: float=1e-06, final_dropout: bool=False, ff_inner_dim: Optional[int]=None, ff_bias: bool=True, skip: bool=False, qk_norm: bool=True): super().__init__() self.norm1 = AdaLayerNormShift(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) self.attn1 = Attention(query_dim=dim, cross_attention_dim=None, dim_head=dim // num_attention_heads, heads=num_attention_heads, qk_norm='layer_norm' if qk_norm else None, eps=1e-06, bias=True, processor=HunyuanAttnProcessor2_0()) self.norm2 = FP32LayerNorm(dim, norm_eps, norm_elementwise_affine) self.attn2 = Attention(query_dim=dim, cross_attention_dim=cross_attention_dim, dim_head=dim // num_attention_heads, heads=num_attention_heads, qk_norm='layer_norm' if qk_norm else None, eps=1e-06, bias=True, processor=HunyuanAttnProcessor2_0()) self.norm3 = FP32LayerNorm(dim, norm_eps, norm_elementwise_affine) self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout, inner_dim=ff_inner_dim, bias=ff_bias) if skip: self.skip_norm = FP32LayerNorm(2 * dim, norm_eps, elementwise_affine=True) self.skip_linear = nn.Linear(2 * dim, dim) else: self.skip_linear = None self._chunk_size = None self._chunk_dim = 0 def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int=0): self._chunk_size = chunk_size self._chunk_dim = dim def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, temb: Optional[torch.Tensor]=None, image_rotary_emb=None, skip=None) -> torch.Tensor: if self.skip_linear is not None: cat = torch.cat([hidden_states, skip], dim=-1) cat = self.skip_norm(cat) hidden_states = self.skip_linear(cat) norm_hidden_states = self.norm1(hidden_states, temb) attn_output = self.attn1(norm_hidden_states, image_rotary_emb=image_rotary_emb) hidden_states = hidden_states + attn_output hidden_states = hidden_states + self.attn2(self.norm2(hidden_states), encoder_hidden_states=encoder_hidden_states, image_rotary_emb=image_rotary_emb) mlp_inputs = self.norm3(hidden_states) hidden_states = hidden_states + self.ff(mlp_inputs) return hidden_states class HunyuanDiT2DModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, num_attention_heads: int=16, attention_head_dim: int=88, in_channels: Optional[int]=None, patch_size: Optional[int]=None, activation_fn: str='gelu-approximate', sample_size=32, hidden_size=1152, num_layers: int=28, mlp_ratio: float=4.0, learn_sigma: bool=True, cross_attention_dim: int=1024, norm_type: str='layer_norm', cross_attention_dim_t5: int=2048, pooled_projection_dim: int=1024, text_len: int=77, text_len_t5: int=256, use_style_cond_and_image_meta_size: bool=True): super().__init__() self.out_channels = in_channels * 2 if learn_sigma else in_channels self.num_heads = num_attention_heads self.inner_dim = num_attention_heads * attention_head_dim self.text_embedder = PixArtAlphaTextProjection(in_features=cross_attention_dim_t5, hidden_size=cross_attention_dim_t5 * 4, out_features=cross_attention_dim, act_fn='silu_fp32') self.text_embedding_padding = nn.Parameter(torch.randn(text_len + text_len_t5, cross_attention_dim, dtype=torch.float32)) self.pos_embed = PatchEmbed(height=sample_size, width=sample_size, in_channels=in_channels, embed_dim=hidden_size, patch_size=patch_size, pos_embed_type=None) self.time_extra_emb = HunyuanCombinedTimestepTextSizeStyleEmbedding(hidden_size, pooled_projection_dim=pooled_projection_dim, seq_len=text_len_t5, cross_attention_dim=cross_attention_dim_t5, use_style_cond_and_image_meta_size=use_style_cond_and_image_meta_size) self.blocks = nn.ModuleList([HunyuanDiTBlock(dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, activation_fn=activation_fn, ff_inner_dim=int(self.inner_dim * mlp_ratio), cross_attention_dim=cross_attention_dim, qk_norm=True, skip=layer > num_layers // 2) for layer in range(num_layers)]) self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-06) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedHunyuanAttnProcessor2_0()) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): self.set_attn_processor(HunyuanAttnProcessor2_0()) def forward(self, hidden_states, timestep, encoder_hidden_states=None, text_embedding_mask=None, encoder_hidden_states_t5=None, text_embedding_mask_t5=None, image_meta_size=None, style=None, image_rotary_emb=None, controlnet_block_samples=None, return_dict=True): (height, width) = hidden_states.shape[-2:] hidden_states = self.pos_embed(hidden_states) temb = self.time_extra_emb(timestep, encoder_hidden_states_t5, image_meta_size, style, hidden_dtype=timestep.dtype) (batch_size, sequence_length, _) = encoder_hidden_states_t5.shape encoder_hidden_states_t5 = self.text_embedder(encoder_hidden_states_t5.view(-1, encoder_hidden_states_t5.shape[-1])) encoder_hidden_states_t5 = encoder_hidden_states_t5.view(batch_size, sequence_length, -1) encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states_t5], dim=1) text_embedding_mask = torch.cat([text_embedding_mask, text_embedding_mask_t5], dim=-1) text_embedding_mask = text_embedding_mask.unsqueeze(2).bool() encoder_hidden_states = torch.where(text_embedding_mask, encoder_hidden_states, self.text_embedding_padding) skips = [] for (layer, block) in enumerate(self.blocks): if layer > self.config.num_layers // 2: if controlnet_block_samples is not None: skip = skips.pop() + controlnet_block_samples.pop() else: skip = skips.pop() hidden_states = block(hidden_states, temb=temb, encoder_hidden_states=encoder_hidden_states, image_rotary_emb=image_rotary_emb, skip=skip) else: hidden_states = block(hidden_states, temb=temb, encoder_hidden_states=encoder_hidden_states, image_rotary_emb=image_rotary_emb) if layer < self.config.num_layers // 2 - 1: skips.append(hidden_states) if controlnet_block_samples is not None and len(controlnet_block_samples) != 0: raise ValueError('The number of controls is not equal to the number of skip connections.') hidden_states = self.norm_out(hidden_states, temb.to(torch.float32)) hidden_states = self.proj_out(hidden_states) patch_size = self.pos_embed.patch_size height = height // patch_size width = width // patch_size hidden_states = hidden_states.reshape(shape=(hidden_states.shape[0], height, width, patch_size, patch_size, self.out_channels)) hidden_states = torch.einsum('nhwpqc->nchpwq', hidden_states) output = hidden_states.reshape(shape=(hidden_states.shape[0], self.out_channels, height * patch_size, width * patch_size)) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) def enable_forward_chunking(self, chunk_size: Optional[int]=None, dim: int=0) -> None: if dim not in [0, 1]: raise ValueError(f'Make sure to set `dim` to either 0 or 1, not {dim}') chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, 'set_chunk_feed_forward'): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) def disable_forward_chunking(self): def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, 'set_chunk_feed_forward'): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, None, 0) # File: diffusers-main/src/diffusers/models/transformers/latte_transformer_3d.py from typing import Optional import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.embeddings import PixArtAlphaTextProjection, get_1d_sincos_pos_embed_from_grid from ..attention import BasicTransformerBlock from ..embeddings import PatchEmbed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormSingle class LatteTransformer3DModel(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True '' @register_to_config def __init__(self, num_attention_heads: int=16, attention_head_dim: int=88, in_channels: Optional[int]=None, out_channels: Optional[int]=None, num_layers: int=1, dropout: float=0.0, cross_attention_dim: Optional[int]=None, attention_bias: bool=False, sample_size: int=64, patch_size: Optional[int]=None, activation_fn: str='geglu', num_embeds_ada_norm: Optional[int]=None, norm_type: str='layer_norm', norm_elementwise_affine: bool=True, norm_eps: float=1e-05, caption_channels: int=None, video_length: int=16): super().__init__() inner_dim = num_attention_heads * attention_head_dim self.height = sample_size self.width = sample_size interpolation_scale = self.config.sample_size // 64 interpolation_scale = max(interpolation_scale, 1) self.pos_embed = PatchEmbed(height=sample_size, width=sample_size, patch_size=patch_size, in_channels=in_channels, embed_dim=inner_dim, interpolation_scale=interpolation_scale) self.transformer_blocks = nn.ModuleList([BasicTransformerBlock(inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, norm_type=norm_type, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps) for d in range(num_layers)]) self.temporal_transformer_blocks = nn.ModuleList([BasicTransformerBlock(inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=None, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, norm_type=norm_type, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps) for d in range(num_layers)]) self.out_channels = in_channels if out_channels is None else out_channels self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-06) self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim ** 0.5) self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels) self.adaln_single = AdaLayerNormSingle(inner_dim, use_additional_conditions=False) self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=inner_dim) temp_pos_embed = get_1d_sincos_pos_embed_from_grid(inner_dim, torch.arange(0, video_length).unsqueeze(1)) self.register_buffer('temp_pos_embed', torch.from_numpy(temp_pos_embed).float().unsqueeze(0), persistent=False) self.gradient_checkpointing = False def _set_gradient_checkpointing(self, module, value=False): self.gradient_checkpointing = value def forward(self, hidden_states: torch.Tensor, timestep: Optional[torch.LongTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, enable_temporal_attentions: bool=True, return_dict: bool=True): (batch_size, channels, num_frame, height, width) = hidden_states.shape hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape(-1, channels, height, width) (height, width) = (hidden_states.shape[-2] // self.config.patch_size, hidden_states.shape[-1] // self.config.patch_size) num_patches = height * width hidden_states = self.pos_embed(hidden_states) added_cond_kwargs = {'resolution': None, 'aspect_ratio': None} (timestep, embedded_timestep) = self.adaln_single(timestep, added_cond_kwargs=added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype) encoder_hidden_states = self.caption_projection(encoder_hidden_states) encoder_hidden_states_spatial = encoder_hidden_states.repeat_interleave(num_frame, dim=0).view(-1, encoder_hidden_states.shape[-2], encoder_hidden_states.shape[-1]) timestep_spatial = timestep.repeat_interleave(num_frame, dim=0).view(-1, timestep.shape[-1]) timestep_temp = timestep.repeat_interleave(num_patches, dim=0).view(-1, timestep.shape[-1]) for (i, (spatial_block, temp_block)) in enumerate(zip(self.transformer_blocks, self.temporal_transformer_blocks)): if self.training and self.gradient_checkpointing: hidden_states = torch.utils.checkpoint.checkpoint(spatial_block, hidden_states, None, encoder_hidden_states_spatial, encoder_attention_mask, timestep_spatial, None, None, use_reentrant=False) else: hidden_states = spatial_block(hidden_states, None, encoder_hidden_states_spatial, encoder_attention_mask, timestep_spatial, None, None) if enable_temporal_attentions: hidden_states = hidden_states.reshape(batch_size, -1, hidden_states.shape[-2], hidden_states.shape[-1]).permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(-1, hidden_states.shape[-2], hidden_states.shape[-1]) if i == 0 and num_frame > 1: hidden_states = hidden_states + self.temp_pos_embed if self.training and self.gradient_checkpointing: hidden_states = torch.utils.checkpoint.checkpoint(temp_block, hidden_states, None, None, None, timestep_temp, None, None, use_reentrant=False) else: hidden_states = temp_block(hidden_states, None, None, None, timestep_temp, None, None) hidden_states = hidden_states.reshape(batch_size, -1, hidden_states.shape[-2], hidden_states.shape[-1]).permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(-1, hidden_states.shape[-2], hidden_states.shape[-1]) embedded_timestep = embedded_timestep.repeat_interleave(num_frame, dim=0).view(-1, embedded_timestep.shape[-1]) (shift, scale) = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) hidden_states = hidden_states * (1 + scale) + shift hidden_states = self.proj_out(hidden_states) if self.adaln_single is None: height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape(shape=(-1, height, width, self.config.patch_size, self.config.patch_size, self.out_channels)) hidden_states = torch.einsum('nhwpqc->nchpwq', hidden_states) output = hidden_states.reshape(shape=(-1, self.out_channels, height * self.config.patch_size, width * self.config.patch_size)) output = output.reshape(batch_size, -1, output.shape[-3], output.shape[-2], output.shape[-1]).permute(0, 2, 1, 3, 4) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) # File: diffusers-main/src/diffusers/models/transformers/lumina_nextdit2d.py from typing import Any, Dict, Optional import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import logging from ..attention import LuminaFeedForward from ..attention_processor import Attention, LuminaAttnProcessor2_0 from ..embeddings import LuminaCombinedTimestepCaptionEmbedding, LuminaPatchEmbed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import LuminaLayerNormContinuous, LuminaRMSNormZero, RMSNorm logger = logging.get_logger(__name__) class LuminaNextDiTBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, num_kv_heads: int, multiple_of: int, ffn_dim_multiplier: float, norm_eps: float, qk_norm: bool, cross_attention_dim: int, norm_elementwise_affine: bool=True) -> None: super().__init__() self.head_dim = dim // num_attention_heads self.gate = nn.Parameter(torch.zeros([num_attention_heads])) self.attn1 = Attention(query_dim=dim, cross_attention_dim=None, dim_head=dim // num_attention_heads, qk_norm='layer_norm_across_heads' if qk_norm else None, heads=num_attention_heads, kv_heads=num_kv_heads, eps=1e-05, bias=False, out_bias=False, processor=LuminaAttnProcessor2_0()) self.attn1.to_out = nn.Identity() self.attn2 = Attention(query_dim=dim, cross_attention_dim=cross_attention_dim, dim_head=dim // num_attention_heads, qk_norm='layer_norm_across_heads' if qk_norm else None, heads=num_attention_heads, kv_heads=num_kv_heads, eps=1e-05, bias=False, out_bias=False, processor=LuminaAttnProcessor2_0()) self.feed_forward = LuminaFeedForward(dim=dim, inner_dim=4 * dim, multiple_of=multiple_of, ffn_dim_multiplier=ffn_dim_multiplier) self.norm1 = LuminaRMSNormZero(embedding_dim=dim, norm_eps=norm_eps, norm_elementwise_affine=norm_elementwise_affine) self.ffn_norm1 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) self.norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) self.ffn_norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) self.norm1_context = RMSNorm(cross_attention_dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, image_rotary_emb: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_mask: torch.Tensor, temb: torch.Tensor, cross_attention_kwargs: Optional[Dict[str, Any]]=None): residual = hidden_states (norm_hidden_states, gate_msa, scale_mlp, gate_mlp) = self.norm1(hidden_states, temb) self_attn_output = self.attn1(hidden_states=norm_hidden_states, encoder_hidden_states=norm_hidden_states, attention_mask=attention_mask, query_rotary_emb=image_rotary_emb, key_rotary_emb=image_rotary_emb, **cross_attention_kwargs) norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states) cross_attn_output = self.attn2(hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, attention_mask=encoder_mask, query_rotary_emb=image_rotary_emb, key_rotary_emb=None, **cross_attention_kwargs) cross_attn_output = cross_attn_output * self.gate.tanh().view(1, 1, -1, 1) mixed_attn_output = self_attn_output + cross_attn_output mixed_attn_output = mixed_attn_output.flatten(-2) hidden_states = self.attn2.to_out[0](mixed_attn_output) hidden_states = residual + gate_msa.unsqueeze(1).tanh() * self.norm2(hidden_states) mlp_output = self.feed_forward(self.ffn_norm1(hidden_states) * (1 + scale_mlp.unsqueeze(1))) hidden_states = hidden_states + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2(mlp_output) return hidden_states class LuminaNextDiT2DModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, sample_size: int=128, patch_size: Optional[int]=2, in_channels: Optional[int]=4, hidden_size: Optional[int]=2304, num_layers: Optional[int]=32, num_attention_heads: Optional[int]=32, num_kv_heads: Optional[int]=None, multiple_of: Optional[int]=256, ffn_dim_multiplier: Optional[float]=None, norm_eps: Optional[float]=1e-05, learn_sigma: Optional[bool]=True, qk_norm: Optional[bool]=True, cross_attention_dim: Optional[int]=2048, scaling_factor: Optional[float]=1.0) -> None: super().__init__() self.sample_size = sample_size self.patch_size = patch_size self.in_channels = in_channels self.out_channels = in_channels * 2 if learn_sigma else in_channels self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.head_dim = hidden_size // num_attention_heads self.scaling_factor = scaling_factor self.patch_embedder = LuminaPatchEmbed(patch_size=patch_size, in_channels=in_channels, embed_dim=hidden_size, bias=True) self.pad_token = nn.Parameter(torch.empty(hidden_size)) self.time_caption_embed = LuminaCombinedTimestepCaptionEmbedding(hidden_size=min(hidden_size, 1024), cross_attention_dim=cross_attention_dim) self.layers = nn.ModuleList([LuminaNextDiTBlock(hidden_size, num_attention_heads, num_kv_heads, multiple_of, ffn_dim_multiplier, norm_eps, qk_norm, cross_attention_dim) for _ in range(num_layers)]) self.norm_out = LuminaLayerNormContinuous(embedding_dim=hidden_size, conditioning_embedding_dim=min(hidden_size, 1024), elementwise_affine=False, eps=1e-06, bias=True, out_dim=patch_size * patch_size * self.out_channels) assert hidden_size // num_attention_heads % 4 == 0, '2d rope needs head dim to be divisible by 4' def forward(self, hidden_states: torch.Tensor, timestep: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_mask: torch.Tensor, image_rotary_emb: torch.Tensor, cross_attention_kwargs: Dict[str, Any]=None, return_dict=True) -> torch.Tensor: (hidden_states, mask, img_size, image_rotary_emb) = self.patch_embedder(hidden_states, image_rotary_emb) image_rotary_emb = image_rotary_emb.to(hidden_states.device) temb = self.time_caption_embed(timestep, encoder_hidden_states, encoder_mask) encoder_mask = encoder_mask.bool() for layer in self.layers: hidden_states = layer(hidden_states, mask, image_rotary_emb, encoder_hidden_states, encoder_mask, temb=temb, cross_attention_kwargs=cross_attention_kwargs) hidden_states = self.norm_out(hidden_states, temb) height_tokens = width_tokens = self.patch_size (height, width) = img_size[0] batch_size = hidden_states.size(0) sequence_length = height // height_tokens * (width // width_tokens) hidden_states = hidden_states[:, :sequence_length].view(batch_size, height // height_tokens, width // width_tokens, height_tokens, width_tokens, self.out_channels) output = hidden_states.permute(0, 5, 1, 3, 2, 4).flatten(4, 5).flatten(2, 3) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) # File: diffusers-main/src/diffusers/models/transformers/pixart_transformer_2d.py from typing import Any, Dict, Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import is_torch_version, logging from ..attention import BasicTransformerBlock from ..attention_processor import Attention, AttentionProcessor, AttnProcessor, FusedAttnProcessor2_0 from ..embeddings import PatchEmbed, PixArtAlphaTextProjection from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormSingle logger = logging.get_logger(__name__) class PixArtTransformer2DModel(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True _no_split_modules = ['BasicTransformerBlock', 'PatchEmbed'] @register_to_config def __init__(self, num_attention_heads: int=16, attention_head_dim: int=72, in_channels: int=4, out_channels: Optional[int]=8, num_layers: int=28, dropout: float=0.0, norm_num_groups: int=32, cross_attention_dim: Optional[int]=1152, attention_bias: bool=True, sample_size: int=128, patch_size: int=2, activation_fn: str='gelu-approximate', num_embeds_ada_norm: Optional[int]=1000, upcast_attention: bool=False, norm_type: str='ada_norm_single', norm_elementwise_affine: bool=False, norm_eps: float=1e-06, interpolation_scale: Optional[int]=None, use_additional_conditions: Optional[bool]=None, caption_channels: Optional[int]=None, attention_type: Optional[str]='default'): super().__init__() if norm_type != 'ada_norm_single': raise NotImplementedError(f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'.") elif norm_type == 'ada_norm_single' and num_embeds_ada_norm is None: raise ValueError(f'When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None.') self.attention_head_dim = attention_head_dim self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.out_channels = in_channels if out_channels is None else out_channels if use_additional_conditions is None: if sample_size == 128: use_additional_conditions = True else: use_additional_conditions = False self.use_additional_conditions = use_additional_conditions self.gradient_checkpointing = False self.height = self.config.sample_size self.width = self.config.sample_size interpolation_scale = self.config.interpolation_scale if self.config.interpolation_scale is not None else max(self.config.sample_size // 64, 1) self.pos_embed = PatchEmbed(height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.config.in_channels, embed_dim=self.inner_dim, interpolation_scale=interpolation_scale) self.transformer_blocks = nn.ModuleList([BasicTransformerBlock(self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type) for _ in range(self.config.num_layers)]) self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-06) self.scale_shift_table = nn.Parameter(torch.randn(2, self.inner_dim) / self.inner_dim ** 0.5) self.proj_out = nn.Linear(self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels) self.adaln_single = AdaLayerNormSingle(self.inner_dim, use_additional_conditions=self.use_additional_conditions) self.caption_projection = None if self.config.caption_channels is not None: self.caption_projection = PixArtAlphaTextProjection(in_features=self.config.caption_channels, hidden_size=self.inner_dim) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): self.set_attn_processor(AttnProcessor()) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, timestep: Optional[torch.LongTensor]=None, added_cond_kwargs: Dict[str, torch.Tensor]=None, cross_attention_kwargs: Dict[str, Any]=None, attention_mask: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, return_dict: bool=True): if self.use_additional_conditions and added_cond_kwargs is None: raise ValueError('`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`.') if attention_mask is not None and attention_mask.ndim == 2: attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) batch_size = hidden_states.shape[0] (height, width) = (hidden_states.shape[-2] // self.config.patch_size, hidden_states.shape[-1] // self.config.patch_size) hidden_states = self.pos_embed(hidden_states) (timestep, embedded_timestep) = self.adaln_single(timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype) if self.caption_projection is not None: encoder_hidden_states = self.caption_projection(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1]) for block in self.transformer_blocks: if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, timestep, cross_attention_kwargs, None, **ckpt_kwargs) else: hidden_states = block(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=None) (shift, scale) = (self.scale_shift_table[None] + embedded_timestep[:, None].to(self.scale_shift_table.device)).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) hidden_states = hidden_states * (1 + scale.to(hidden_states.device)) + shift.to(hidden_states.device) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.squeeze(1) hidden_states = hidden_states.reshape(shape=(-1, height, width, self.config.patch_size, self.config.patch_size, self.out_channels)) hidden_states = torch.einsum('nhwpqc->nchpwq', hidden_states) output = hidden_states.reshape(shape=(-1, self.out_channels, height * self.config.patch_size, width * self.config.patch_size)) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) # File: diffusers-main/src/diffusers/models/transformers/prior_transformer.py from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin from ...utils import BaseOutput from ..attention import BasicTransformerBlock from ..attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin @dataclass class PriorTransformerOutput(BaseOutput): predicted_image_embedding: torch.Tensor class PriorTransformer(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin): @register_to_config def __init__(self, num_attention_heads: int=32, attention_head_dim: int=64, num_layers: int=20, embedding_dim: int=768, num_embeddings=77, additional_embeddings=4, dropout: float=0.0, time_embed_act_fn: str='silu', norm_in_type: Optional[str]=None, embedding_proj_norm_type: Optional[str]=None, encoder_hid_proj_type: Optional[str]='linear', added_emb_type: Optional[str]='prd', time_embed_dim: Optional[int]=None, embedding_proj_dim: Optional[int]=None, clip_embed_dim: Optional[int]=None): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.additional_embeddings = additional_embeddings time_embed_dim = time_embed_dim or inner_dim embedding_proj_dim = embedding_proj_dim or embedding_dim clip_embed_dim = clip_embed_dim or embedding_dim self.time_proj = Timesteps(inner_dim, True, 0) self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, out_dim=inner_dim, act_fn=time_embed_act_fn) self.proj_in = nn.Linear(embedding_dim, inner_dim) if embedding_proj_norm_type is None: self.embedding_proj_norm = None elif embedding_proj_norm_type == 'layer': self.embedding_proj_norm = nn.LayerNorm(embedding_proj_dim) else: raise ValueError(f'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}') self.embedding_proj = nn.Linear(embedding_proj_dim, inner_dim) if encoder_hid_proj_type is None: self.encoder_hidden_states_proj = None elif encoder_hid_proj_type == 'linear': self.encoder_hidden_states_proj = nn.Linear(embedding_dim, inner_dim) else: raise ValueError(f'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}') self.positional_embedding = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, inner_dim)) if added_emb_type == 'prd': self.prd_embedding = nn.Parameter(torch.zeros(1, 1, inner_dim)) elif added_emb_type is None: self.prd_embedding = None else: raise ValueError(f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.") self.transformer_blocks = nn.ModuleList([BasicTransformerBlock(inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, activation_fn='gelu', attention_bias=True) for d in range(num_layers)]) if norm_in_type == 'layer': self.norm_in = nn.LayerNorm(inner_dim) elif norm_in_type is None: self.norm_in = None else: raise ValueError(f'Unsupported norm_in_type: {norm_in_type}.') self.norm_out = nn.LayerNorm(inner_dim) self.proj_to_clip_embeddings = nn.Linear(inner_dim, clip_embed_dim) causal_attention_mask = torch.full([num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -10000.0) causal_attention_mask.triu_(1) causal_attention_mask = causal_attention_mask[None, ...] self.register_buffer('causal_attention_mask', causal_attention_mask, persistent=False) self.clip_mean = nn.Parameter(torch.zeros(1, clip_embed_dim)) self.clip_std = nn.Parameter(torch.zeros(1, clip_embed_dim)) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) def forward(self, hidden_states, timestep: Union[torch.Tensor, float, int], proj_embedding: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.BoolTensor]=None, return_dict: bool=True): batch_size = hidden_states.shape[0] timesteps = timestep if not torch.is_tensor(timesteps): timesteps = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device) elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: timesteps = timesteps[None].to(hidden_states.device) timesteps = timesteps * torch.ones(batch_size, dtype=timesteps.dtype, device=timesteps.device) timesteps_projected = self.time_proj(timesteps) timesteps_projected = timesteps_projected.to(dtype=self.dtype) time_embeddings = self.time_embedding(timesteps_projected) if self.embedding_proj_norm is not None: proj_embedding = self.embedding_proj_norm(proj_embedding) proj_embeddings = self.embedding_proj(proj_embedding) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: encoder_hidden_states = self.encoder_hidden_states_proj(encoder_hidden_states) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set') hidden_states = self.proj_in(hidden_states) positional_embeddings = self.positional_embedding.to(hidden_states.dtype) additional_embeds = [] additional_embeddings_len = 0 if encoder_hidden_states is not None: additional_embeds.append(encoder_hidden_states) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape) == 2: proj_embeddings = proj_embeddings[:, None, :] if len(hidden_states.shape) == 2: hidden_states = hidden_states[:, None, :] additional_embeds = additional_embeds + [proj_embeddings, time_embeddings[:, None, :], hidden_states] if self.prd_embedding is not None: prd_embedding = self.prd_embedding.to(hidden_states.dtype).expand(batch_size, -1, -1) additional_embeds.append(prd_embedding) hidden_states = torch.cat(additional_embeds, dim=1) additional_embeddings_len = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: positional_embeddings = F.pad(positional_embeddings, (0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0), value=0.0) hidden_states = hidden_states + positional_embeddings if attention_mask is not None: attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 attention_mask = F.pad(attention_mask, (0, self.additional_embeddings), value=0.0) attention_mask = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype) attention_mask = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0) if self.norm_in is not None: hidden_states = self.norm_in(hidden_states) for block in self.transformer_blocks: hidden_states = block(hidden_states, attention_mask=attention_mask) hidden_states = self.norm_out(hidden_states) if self.prd_embedding is not None: hidden_states = hidden_states[:, -1] else: hidden_states = hidden_states[:, additional_embeddings_len:] predicted_image_embedding = self.proj_to_clip_embeddings(hidden_states) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=predicted_image_embedding) def post_process_latents(self, prior_latents): prior_latents = prior_latents * self.clip_std + self.clip_mean return prior_latents # File: diffusers-main/src/diffusers/models/transformers/stable_audio_transformer.py from typing import Any, Dict, Optional, Union import numpy as np import torch import torch.nn as nn import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...models.attention import FeedForward from ...models.attention_processor import Attention, AttentionProcessor, StableAudioAttnProcessor2_0 from ...models.modeling_utils import ModelMixin from ...models.transformers.transformer_2d import Transformer2DModelOutput from ...utils import is_torch_version, logging from ...utils.torch_utils import maybe_allow_in_graph logger = logging.get_logger(__name__) class StableAudioGaussianFourierProjection(nn.Module): def __init__(self, embedding_size: int=256, scale: float=1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False): super().__init__() self.weight = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) self.log = log self.flip_sin_to_cos = flip_sin_to_cos if set_W_to_weight: del self.weight self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) self.weight = self.W del self.W def forward(self, x): if self.log: x = torch.log(x) x_proj = 2 * np.pi * x[:, None] @ self.weight[None, :] if self.flip_sin_to_cos: out = torch.cat([torch.cos(x_proj), torch.sin(x_proj)], dim=-1) else: out = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1) return out @maybe_allow_in_graph class StableAudioDiTBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, num_key_value_attention_heads: int, attention_head_dim: int, dropout=0.0, cross_attention_dim: Optional[int]=None, upcast_attention: bool=False, norm_eps: float=1e-05, ff_inner_dim: Optional[int]=None): super().__init__() self.norm1 = nn.LayerNorm(dim, elementwise_affine=True, eps=norm_eps) self.attn1 = Attention(query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=False, upcast_attention=upcast_attention, out_bias=False, processor=StableAudioAttnProcessor2_0()) self.norm2 = nn.LayerNorm(dim, norm_eps, True) self.attn2 = Attention(query_dim=dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, kv_heads=num_key_value_attention_heads, dropout=dropout, bias=False, upcast_attention=upcast_attention, out_bias=False, processor=StableAudioAttnProcessor2_0()) self.norm3 = nn.LayerNorm(dim, norm_eps, True) self.ff = FeedForward(dim, dropout=dropout, activation_fn='swiglu', final_dropout=False, inner_dim=ff_inner_dim, bias=True) self._chunk_size = None self._chunk_dim = 0 def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int=0): self._chunk_size = chunk_size self._chunk_dim = dim def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, rotary_embedding: Optional[torch.FloatTensor]=None) -> torch.Tensor: norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1(norm_hidden_states, attention_mask=attention_mask, rotary_emb=rotary_embedding) hidden_states = attn_output + hidden_states norm_hidden_states = self.norm2(hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask) hidden_states = attn_output + hidden_states norm_hidden_states = self.norm3(hidden_states) ff_output = self.ff(norm_hidden_states) hidden_states = ff_output + hidden_states return hidden_states class StableAudioDiTModel(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, sample_size: int=1024, in_channels: int=64, num_layers: int=24, attention_head_dim: int=64, num_attention_heads: int=24, num_key_value_attention_heads: int=12, out_channels: int=64, cross_attention_dim: int=768, time_proj_dim: int=256, global_states_input_dim: int=1536, cross_attention_input_dim: int=768): super().__init__() self.sample_size = sample_size self.out_channels = out_channels self.inner_dim = num_attention_heads * attention_head_dim self.time_proj = StableAudioGaussianFourierProjection(embedding_size=time_proj_dim // 2, flip_sin_to_cos=True, log=False, set_W_to_weight=False) self.timestep_proj = nn.Sequential(nn.Linear(time_proj_dim, self.inner_dim, bias=True), nn.SiLU(), nn.Linear(self.inner_dim, self.inner_dim, bias=True)) self.global_proj = nn.Sequential(nn.Linear(global_states_input_dim, self.inner_dim, bias=False), nn.SiLU(), nn.Linear(self.inner_dim, self.inner_dim, bias=False)) self.cross_attention_proj = nn.Sequential(nn.Linear(cross_attention_input_dim, cross_attention_dim, bias=False), nn.SiLU(), nn.Linear(cross_attention_dim, cross_attention_dim, bias=False)) self.preprocess_conv = nn.Conv1d(in_channels, in_channels, 1, bias=False) self.proj_in = nn.Linear(in_channels, self.inner_dim, bias=False) self.transformer_blocks = nn.ModuleList([StableAudioDiTBlock(dim=self.inner_dim, num_attention_heads=num_attention_heads, num_key_value_attention_heads=num_key_value_attention_heads, attention_head_dim=attention_head_dim, cross_attention_dim=cross_attention_dim) for i in range(num_layers)]) self.proj_out = nn.Linear(self.inner_dim, self.out_channels, bias=False) self.postprocess_conv = nn.Conv1d(self.out_channels, self.out_channels, 1, bias=False) self.gradient_checkpointing = False @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): self.set_attn_processor(StableAudioAttnProcessor2_0()) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value def forward(self, hidden_states: torch.FloatTensor, timestep: torch.LongTensor=None, encoder_hidden_states: torch.FloatTensor=None, global_hidden_states: torch.FloatTensor=None, rotary_embedding: torch.FloatTensor=None, return_dict: bool=True, attention_mask: Optional[torch.LongTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None) -> Union[torch.FloatTensor, Transformer2DModelOutput]: cross_attention_hidden_states = self.cross_attention_proj(encoder_hidden_states) global_hidden_states = self.global_proj(global_hidden_states) time_hidden_states = self.timestep_proj(self.time_proj(timestep.to(self.dtype))) global_hidden_states = global_hidden_states + time_hidden_states.unsqueeze(1) hidden_states = self.preprocess_conv(hidden_states) + hidden_states hidden_states = hidden_states.transpose(1, 2) hidden_states = self.proj_in(hidden_states) hidden_states = torch.cat([global_hidden_states, hidden_states], dim=-2) if attention_mask is not None: prepend_mask = torch.ones((hidden_states.shape[0], 1), device=hidden_states.device, dtype=torch.bool) attention_mask = torch.cat([prepend_mask, attention_mask], dim=-1) for block in self.transformer_blocks: if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, attention_mask, cross_attention_hidden_states, encoder_attention_mask, rotary_embedding, **ckpt_kwargs) else: hidden_states = block(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=cross_attention_hidden_states, encoder_attention_mask=encoder_attention_mask, rotary_embedding=rotary_embedding) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.transpose(1, 2)[:, :, 1:] hidden_states = self.postprocess_conv(hidden_states) + hidden_states if not return_dict: return (hidden_states,) return Transformer2DModelOutput(sample=hidden_states) # File: diffusers-main/src/diffusers/models/transformers/t5_film_transformer.py import math from typing import Optional, Tuple import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ..attention_processor import Attention from ..embeddings import get_timestep_embedding from ..modeling_utils import ModelMixin class T5FilmDecoder(ModelMixin, ConfigMixin): @register_to_config def __init__(self, input_dims: int=128, targets_length: int=256, max_decoder_noise_time: float=2000.0, d_model: int=768, num_layers: int=12, num_heads: int=12, d_kv: int=64, d_ff: int=2048, dropout_rate: float=0.1): super().__init__() self.conditioning_emb = nn.Sequential(nn.Linear(d_model, d_model * 4, bias=False), nn.SiLU(), nn.Linear(d_model * 4, d_model * 4, bias=False), nn.SiLU()) self.position_encoding = nn.Embedding(targets_length, d_model) self.position_encoding.weight.requires_grad = False self.continuous_inputs_projection = nn.Linear(input_dims, d_model, bias=False) self.dropout = nn.Dropout(p=dropout_rate) self.decoders = nn.ModuleList() for lyr_num in range(num_layers): lyr = DecoderLayer(d_model=d_model, d_kv=d_kv, num_heads=num_heads, d_ff=d_ff, dropout_rate=dropout_rate) self.decoders.append(lyr) self.decoder_norm = T5LayerNorm(d_model) self.post_dropout = nn.Dropout(p=dropout_rate) self.spec_out = nn.Linear(d_model, input_dims, bias=False) def encoder_decoder_mask(self, query_input: torch.Tensor, key_input: torch.Tensor) -> torch.Tensor: mask = torch.mul(query_input.unsqueeze(-1), key_input.unsqueeze(-2)) return mask.unsqueeze(-3) def forward(self, encodings_and_masks, decoder_input_tokens, decoder_noise_time): (batch, _, _) = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) time_steps = get_timestep_embedding(decoder_noise_time * self.config.max_decoder_noise_time, embedding_dim=self.config.d_model, max_period=self.config.max_decoder_noise_time).to(dtype=self.dtype) conditioning_emb = self.conditioning_emb(time_steps).unsqueeze(1) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) seq_length = decoder_input_tokens.shape[1] decoder_positions = torch.broadcast_to(torch.arange(seq_length, device=decoder_input_tokens.device), (batch, seq_length)) position_encodings = self.position_encoding(decoder_positions) inputs = self.continuous_inputs_projection(decoder_input_tokens) inputs += position_encodings y = self.dropout(inputs) decoder_mask = torch.ones(decoder_input_tokens.shape[:2], device=decoder_input_tokens.device, dtype=inputs.dtype) encodings_and_encdec_masks = [(x, self.encoder_decoder_mask(decoder_mask, y)) for (x, y) in encodings_and_masks] encoded = torch.cat([x[0] for x in encodings_and_encdec_masks], dim=1) encoder_decoder_mask = torch.cat([x[1] for x in encodings_and_encdec_masks], dim=-1) for lyr in self.decoders: y = lyr(y, conditioning_emb=conditioning_emb, encoder_hidden_states=encoded, encoder_attention_mask=encoder_decoder_mask)[0] y = self.decoder_norm(y) y = self.post_dropout(y) spec_out = self.spec_out(y) return spec_out class DecoderLayer(nn.Module): def __init__(self, d_model: int, d_kv: int, num_heads: int, d_ff: int, dropout_rate: float, layer_norm_epsilon: float=1e-06): super().__init__() self.layer = nn.ModuleList() self.layer.append(T5LayerSelfAttentionCond(d_model=d_model, d_kv=d_kv, num_heads=num_heads, dropout_rate=dropout_rate)) self.layer.append(T5LayerCrossAttention(d_model=d_model, d_kv=d_kv, num_heads=num_heads, dropout_rate=dropout_rate, layer_norm_epsilon=layer_norm_epsilon)) self.layer.append(T5LayerFFCond(d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate, layer_norm_epsilon=layer_norm_epsilon)) def forward(self, hidden_states: torch.Tensor, conditioning_emb: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, encoder_decoder_position_bias=None) -> Tuple[torch.Tensor]: hidden_states = self.layer[0](hidden_states, conditioning_emb=conditioning_emb, attention_mask=attention_mask) if encoder_hidden_states is not None: encoder_extended_attention_mask = torch.where(encoder_attention_mask > 0, 0, -10000000000.0).to(encoder_hidden_states.dtype) hidden_states = self.layer[1](hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_extended_attention_mask) hidden_states = self.layer[-1](hidden_states, conditioning_emb) return (hidden_states,) class T5LayerSelfAttentionCond(nn.Module): def __init__(self, d_model: int, d_kv: int, num_heads: int, dropout_rate: float): super().__init__() self.layer_norm = T5LayerNorm(d_model) self.FiLMLayer = T5FiLMLayer(in_features=d_model * 4, out_features=d_model) self.attention = Attention(query_dim=d_model, heads=num_heads, dim_head=d_kv, out_bias=False, scale_qk=False) self.dropout = nn.Dropout(dropout_rate) def forward(self, hidden_states: torch.Tensor, conditioning_emb: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: normed_hidden_states = self.layer_norm(hidden_states) if conditioning_emb is not None: normed_hidden_states = self.FiLMLayer(normed_hidden_states, conditioning_emb) attention_output = self.attention(normed_hidden_states) hidden_states = hidden_states + self.dropout(attention_output) return hidden_states class T5LayerCrossAttention(nn.Module): def __init__(self, d_model: int, d_kv: int, num_heads: int, dropout_rate: float, layer_norm_epsilon: float): super().__init__() self.attention = Attention(query_dim=d_model, heads=num_heads, dim_head=d_kv, out_bias=False, scale_qk=False) self.layer_norm = T5LayerNorm(d_model, eps=layer_norm_epsilon) self.dropout = nn.Dropout(dropout_rate) def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.attention(normed_hidden_states, encoder_hidden_states=key_value_states, attention_mask=attention_mask.squeeze(1)) layer_output = hidden_states + self.dropout(attention_output) return layer_output class T5LayerFFCond(nn.Module): def __init__(self, d_model: int, d_ff: int, dropout_rate: float, layer_norm_epsilon: float): super().__init__() self.DenseReluDense = T5DenseGatedActDense(d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate) self.film = T5FiLMLayer(in_features=d_model * 4, out_features=d_model) self.layer_norm = T5LayerNorm(d_model, eps=layer_norm_epsilon) self.dropout = nn.Dropout(dropout_rate) def forward(self, hidden_states: torch.Tensor, conditioning_emb: Optional[torch.Tensor]=None) -> torch.Tensor: forwarded_states = self.layer_norm(hidden_states) if conditioning_emb is not None: forwarded_states = self.film(forwarded_states, conditioning_emb) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class T5DenseGatedActDense(nn.Module): def __init__(self, d_model: int, d_ff: int, dropout_rate: float): super().__init__() self.wi_0 = nn.Linear(d_model, d_ff, bias=False) self.wi_1 = nn.Linear(d_model, d_ff, bias=False) self.wo = nn.Linear(d_ff, d_model, bias=False) self.dropout = nn.Dropout(dropout_rate) self.act = NewGELUActivation() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states class T5LayerNorm(nn.Module): def __init__(self, hidden_size: int, eps: float=1e-06): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states class NewGELUActivation(nn.Module): def forward(self, input: torch.Tensor) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0)))) class T5FiLMLayer(nn.Module): def __init__(self, in_features: int, out_features: int): super().__init__() self.scale_bias = nn.Linear(in_features, out_features * 2, bias=False) def forward(self, x: torch.Tensor, conditioning_emb: torch.Tensor) -> torch.Tensor: emb = self.scale_bias(conditioning_emb) (scale, shift) = torch.chunk(emb, 2, -1) x = x * (1 + scale) + shift return x # File: diffusers-main/src/diffusers/models/transformers/transformer_2d.py from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ...configuration_utils import LegacyConfigMixin, register_to_config from ...utils import deprecate, is_torch_version, logging from ..attention import BasicTransformerBlock from ..embeddings import ImagePositionalEmbeddings, PatchEmbed, PixArtAlphaTextProjection from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import LegacyModelMixin from ..normalization import AdaLayerNormSingle logger = logging.get_logger(__name__) class Transformer2DModelOutput(Transformer2DModelOutput): def __init__(self, *args, **kwargs): deprecation_message = 'Importing `Transformer2DModelOutput` from `diffusers.models.transformer_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.modeling_outputs import Transformer2DModelOutput`, instead.' deprecate('Transformer2DModelOutput', '1.0.0', deprecation_message) super().__init__(*args, **kwargs) class Transformer2DModel(LegacyModelMixin, LegacyConfigMixin): _supports_gradient_checkpointing = True _no_split_modules = ['BasicTransformerBlock'] @register_to_config def __init__(self, num_attention_heads: int=16, attention_head_dim: int=88, in_channels: Optional[int]=None, out_channels: Optional[int]=None, num_layers: int=1, dropout: float=0.0, norm_num_groups: int=32, cross_attention_dim: Optional[int]=None, attention_bias: bool=False, sample_size: Optional[int]=None, num_vector_embeds: Optional[int]=None, patch_size: Optional[int]=None, activation_fn: str='geglu', num_embeds_ada_norm: Optional[int]=None, use_linear_projection: bool=False, only_cross_attention: bool=False, double_self_attention: bool=False, upcast_attention: bool=False, norm_type: str='layer_norm', norm_elementwise_affine: bool=True, norm_eps: float=1e-05, attention_type: str='default', caption_channels: int=None, interpolation_scale: float=None, use_additional_conditions: Optional[bool]=None): super().__init__() if patch_size is not None: if norm_type not in ['ada_norm', 'ada_norm_zero', 'ada_norm_single']: raise NotImplementedError(f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'.") elif norm_type in ['ada_norm', 'ada_norm_zero'] and num_embeds_ada_norm is None: raise ValueError(f'When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None.') self.is_input_continuous = in_channels is not None and patch_size is None self.is_input_vectorized = num_vector_embeds is not None self.is_input_patches = in_channels is not None and patch_size is not None if self.is_input_continuous and self.is_input_vectorized: raise ValueError(f'Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make sure that either `in_channels` or `num_vector_embeds` is None.') elif self.is_input_vectorized and self.is_input_patches: raise ValueError(f'Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make sure that either `num_vector_embeds` or `num_patches` is None.') elif not self.is_input_continuous and (not self.is_input_vectorized) and (not self.is_input_patches): raise ValueError(f'Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size: {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None.') if norm_type == 'layer_norm' and num_embeds_ada_norm is not None: deprecation_message = f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or incorrectly set to `'layer_norm'`. Make sure to set `norm_type` to `'ada_norm'` in the config. Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `transformer/config.json` file" deprecate('norm_type!=num_embeds_ada_norm', '1.0.0', deprecation_message, standard_warn=False) norm_type = 'ada_norm' self.use_linear_projection = use_linear_projection self.interpolation_scale = interpolation_scale self.caption_channels = caption_channels self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.in_channels = in_channels self.out_channels = in_channels if out_channels is None else out_channels self.gradient_checkpointing = False if use_additional_conditions is None: if norm_type == 'ada_norm_single' and sample_size == 128: use_additional_conditions = True else: use_additional_conditions = False self.use_additional_conditions = use_additional_conditions if self.is_input_continuous: self._init_continuous_input(norm_type=norm_type) elif self.is_input_vectorized: self._init_vectorized_inputs(norm_type=norm_type) elif self.is_input_patches: self._init_patched_inputs(norm_type=norm_type) def _init_continuous_input(self, norm_type): self.norm = torch.nn.GroupNorm(num_groups=self.config.norm_num_groups, num_channels=self.in_channels, eps=1e-06, affine=True) if self.use_linear_projection: self.proj_in = torch.nn.Linear(self.in_channels, self.inner_dim) else: self.proj_in = torch.nn.Conv2d(self.in_channels, self.inner_dim, kernel_size=1, stride=1, padding=0) self.transformer_blocks = nn.ModuleList([BasicTransformerBlock(self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, only_cross_attention=self.config.only_cross_attention, double_self_attention=self.config.double_self_attention, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type) for _ in range(self.config.num_layers)]) if self.use_linear_projection: self.proj_out = torch.nn.Linear(self.inner_dim, self.out_channels) else: self.proj_out = torch.nn.Conv2d(self.inner_dim, self.out_channels, kernel_size=1, stride=1, padding=0) def _init_vectorized_inputs(self, norm_type): assert self.config.sample_size is not None, 'Transformer2DModel over discrete input must provide sample_size' assert self.config.num_vector_embeds is not None, 'Transformer2DModel over discrete input must provide num_embed' self.height = self.config.sample_size self.width = self.config.sample_size self.num_latent_pixels = self.height * self.width self.latent_image_embedding = ImagePositionalEmbeddings(num_embed=self.config.num_vector_embeds, embed_dim=self.inner_dim, height=self.height, width=self.width) self.transformer_blocks = nn.ModuleList([BasicTransformerBlock(self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, only_cross_attention=self.config.only_cross_attention, double_self_attention=self.config.double_self_attention, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type) for _ in range(self.config.num_layers)]) self.norm_out = nn.LayerNorm(self.inner_dim) self.out = nn.Linear(self.inner_dim, self.config.num_vector_embeds - 1) def _init_patched_inputs(self, norm_type): assert self.config.sample_size is not None, 'Transformer2DModel over patched input must provide sample_size' self.height = self.config.sample_size self.width = self.config.sample_size self.patch_size = self.config.patch_size interpolation_scale = self.config.interpolation_scale if self.config.interpolation_scale is not None else max(self.config.sample_size // 64, 1) self.pos_embed = PatchEmbed(height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.in_channels, embed_dim=self.inner_dim, interpolation_scale=interpolation_scale) self.transformer_blocks = nn.ModuleList([BasicTransformerBlock(self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, only_cross_attention=self.config.only_cross_attention, double_self_attention=self.config.double_self_attention, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type) for _ in range(self.config.num_layers)]) if self.config.norm_type != 'ada_norm_single': self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-06) self.proj_out_1 = nn.Linear(self.inner_dim, 2 * self.inner_dim) self.proj_out_2 = nn.Linear(self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels) elif self.config.norm_type == 'ada_norm_single': self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-06) self.scale_shift_table = nn.Parameter(torch.randn(2, self.inner_dim) / self.inner_dim ** 0.5) self.proj_out = nn.Linear(self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels) self.adaln_single = None if self.config.norm_type == 'ada_norm_single': self.adaln_single = AdaLayerNormSingle(self.inner_dim, use_additional_conditions=self.use_additional_conditions) self.caption_projection = None if self.caption_channels is not None: self.caption_projection = PixArtAlphaTextProjection(in_features=self.caption_channels, hidden_size=self.inner_dim) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, timestep: Optional[torch.LongTensor]=None, added_cond_kwargs: Dict[str, torch.Tensor]=None, class_labels: Optional[torch.LongTensor]=None, cross_attention_kwargs: Dict[str, Any]=None, attention_mask: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, return_dict: bool=True): if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') if attention_mask is not None and attention_mask.ndim == 2: attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) if self.is_input_continuous: (batch_size, _, height, width) = hidden_states.shape residual = hidden_states (hidden_states, inner_dim) = self._operate_on_continuous_inputs(hidden_states) elif self.is_input_vectorized: hidden_states = self.latent_image_embedding(hidden_states) elif self.is_input_patches: (height, width) = (hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size) (hidden_states, encoder_hidden_states, timestep, embedded_timestep) = self._operate_on_patched_inputs(hidden_states, encoder_hidden_states, timestep, added_cond_kwargs) for block in self.transformer_blocks: if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, timestep, cross_attention_kwargs, class_labels, **ckpt_kwargs) else: hidden_states = block(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels) if self.is_input_continuous: output = self._get_output_for_continuous_inputs(hidden_states=hidden_states, residual=residual, batch_size=batch_size, height=height, width=width, inner_dim=inner_dim) elif self.is_input_vectorized: output = self._get_output_for_vectorized_inputs(hidden_states) elif self.is_input_patches: output = self._get_output_for_patched_inputs(hidden_states=hidden_states, timestep=timestep, class_labels=class_labels, embedded_timestep=embedded_timestep, height=height, width=width) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) def _operate_on_continuous_inputs(self, hidden_states): (batch, _, height, width) = hidden_states.shape hidden_states = self.norm(hidden_states) if not self.use_linear_projection: hidden_states = self.proj_in(hidden_states) inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) else: inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) hidden_states = self.proj_in(hidden_states) return (hidden_states, inner_dim) def _operate_on_patched_inputs(self, hidden_states, encoder_hidden_states, timestep, added_cond_kwargs): batch_size = hidden_states.shape[0] hidden_states = self.pos_embed(hidden_states) embedded_timestep = None if self.adaln_single is not None: if self.use_additional_conditions and added_cond_kwargs is None: raise ValueError('`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`.') (timestep, embedded_timestep) = self.adaln_single(timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype) if self.caption_projection is not None: encoder_hidden_states = self.caption_projection(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1]) return (hidden_states, encoder_hidden_states, timestep, embedded_timestep) def _get_output_for_continuous_inputs(self, hidden_states, residual, batch_size, height, width, inner_dim): if not self.use_linear_projection: hidden_states = hidden_states.reshape(batch_size, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() hidden_states = self.proj_out(hidden_states) else: hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape(batch_size, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() output = hidden_states + residual return output def _get_output_for_vectorized_inputs(self, hidden_states): hidden_states = self.norm_out(hidden_states) logits = self.out(hidden_states) logits = logits.permute(0, 2, 1) output = F.log_softmax(logits.double(), dim=1).float() return output def _get_output_for_patched_inputs(self, hidden_states, timestep, class_labels, embedded_timestep, height=None, width=None): if self.config.norm_type != 'ada_norm_single': conditioning = self.transformer_blocks[0].norm1.emb(timestep, class_labels, hidden_dtype=hidden_states.dtype) (shift, scale) = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None] hidden_states = self.proj_out_2(hidden_states) elif self.config.norm_type == 'ada_norm_single': (shift, scale) = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) hidden_states = hidden_states * (1 + scale) + shift hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.squeeze(1) if self.adaln_single is None: height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape(shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)) hidden_states = torch.einsum('nhwpqc->nchpwq', hidden_states) output = hidden_states.reshape(shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)) return output # File: diffusers-main/src/diffusers/models/transformers/transformer_flux.py from typing import Any, Dict, Optional, Tuple, Union import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...models.attention import FeedForward from ...models.attention_processor import Attention, AttentionProcessor, FluxAttnProcessor2_0, FusedFluxAttnProcessor2_0 from ...models.modeling_utils import ModelMixin from ...models.normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle from ...utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import maybe_allow_in_graph from ..embeddings import CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, FluxPosEmbed from ..modeling_outputs import Transformer2DModelOutput logger = logging.get_logger(__name__) @maybe_allow_in_graph class FluxSingleTransformerBlock(nn.Module): def __init__(self, dim, num_attention_heads, attention_head_dim, mlp_ratio=4.0): super().__init__() self.mlp_hidden_dim = int(dim * mlp_ratio) self.norm = AdaLayerNormZeroSingle(dim) self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim) self.act_mlp = nn.GELU(approximate='tanh') self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim) processor = FluxAttnProcessor2_0() self.attn = Attention(query_dim=dim, cross_attention_dim=None, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, bias=True, processor=processor, qk_norm='rms_norm', eps=1e-06, pre_only=True) def forward(self, hidden_states: torch.FloatTensor, temb: torch.FloatTensor, image_rotary_emb=None): residual = hidden_states (norm_hidden_states, gate) = self.norm(hidden_states, emb=temb) mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) attn_output = self.attn(hidden_states=norm_hidden_states, image_rotary_emb=image_rotary_emb) hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) gate = gate.unsqueeze(1) hidden_states = gate * self.proj_out(hidden_states) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16: hidden_states = hidden_states.clip(-65504, 65504) return hidden_states @maybe_allow_in_graph class FluxTransformerBlock(nn.Module): def __init__(self, dim, num_attention_heads, attention_head_dim, qk_norm='rms_norm', eps=1e-06): super().__init__() self.norm1 = AdaLayerNormZero(dim) self.norm1_context = AdaLayerNormZero(dim) if hasattr(F, 'scaled_dot_product_attention'): processor = FluxAttnProcessor2_0() else: raise ValueError('The current PyTorch version does not support the `scaled_dot_product_attention` function.') self.attn = Attention(query_dim=dim, cross_attention_dim=None, added_kv_proj_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, context_pre_only=False, bias=True, processor=processor, qk_norm=qk_norm, eps=eps) self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-06) self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn='gelu-approximate') self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-06) self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn='gelu-approximate') self._chunk_size = None self._chunk_dim = 0 def forward(self, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor, temb: torch.FloatTensor, image_rotary_emb=None): (norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp) = self.norm1(hidden_states, emb=temb) (norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp) = self.norm1_context(encoder_hidden_states, emb=temb) (attn_output, context_attn_output) = self.attn(hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb) attn_output = gate_msa.unsqueeze(1) * attn_output hidden_states = hidden_states + attn_output norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] ff_output = self.ff(norm_hidden_states) ff_output = gate_mlp.unsqueeze(1) * ff_output hidden_states = hidden_states + ff_output context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output encoder_hidden_states = encoder_hidden_states + context_attn_output norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] context_ff_output = self.ff_context(norm_encoder_hidden_states) encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output if encoder_hidden_states.dtype == torch.float16: encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) return (encoder_hidden_states, hidden_states) class FluxTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): _supports_gradient_checkpointing = True _no_split_modules = ['FluxTransformerBlock', 'FluxSingleTransformerBlock'] @register_to_config def __init__(self, patch_size: int=1, in_channels: int=64, num_layers: int=19, num_single_layers: int=38, attention_head_dim: int=128, num_attention_heads: int=24, joint_attention_dim: int=4096, pooled_projection_dim: int=768, guidance_embeds: bool=False, axes_dims_rope: Tuple[int]=(16, 56, 56)): super().__init__() self.out_channels = in_channels self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope) text_time_guidance_cls = CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings self.time_text_embed = text_time_guidance_cls(embedding_dim=self.inner_dim, pooled_projection_dim=self.config.pooled_projection_dim) self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.inner_dim) self.x_embedder = torch.nn.Linear(self.config.in_channels, self.inner_dim) self.transformer_blocks = nn.ModuleList([FluxTransformerBlock(dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, attention_head_dim=self.config.attention_head_dim) for i in range(self.config.num_layers)]) self.single_transformer_blocks = nn.ModuleList([FluxSingleTransformerBlock(dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, attention_head_dim=self.config.attention_head_dim) for i in range(self.config.num_single_layers)]) self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-06) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) self.gradient_checkpointing = False @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedFluxAttnProcessor2_0()) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor=None, pooled_projections: torch.Tensor=None, timestep: torch.LongTensor=None, img_ids: torch.Tensor=None, txt_ids: torch.Tensor=None, guidance: torch.Tensor=None, joint_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_block_samples=None, controlnet_single_block_samples=None, return_dict: bool=True) -> Union[torch.FloatTensor, Transformer2DModelOutput]: if joint_attention_kwargs is not None: joint_attention_kwargs = joint_attention_kwargs.copy() lora_scale = joint_attention_kwargs.pop('scale', 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: scale_lora_layers(self, lora_scale) elif joint_attention_kwargs is not None and joint_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective.') hidden_states = self.x_embedder(hidden_states) timestep = timestep.to(hidden_states.dtype) * 1000 if guidance is not None: guidance = guidance.to(hidden_states.dtype) * 1000 else: guidance = None temb = self.time_text_embed(timestep, pooled_projections) if guidance is None else self.time_text_embed(timestep, guidance, pooled_projections) encoder_hidden_states = self.context_embedder(encoder_hidden_states) if txt_ids.ndim == 3: logger.warning('Passing `txt_ids` 3d torch.Tensor is deprecated.Please remove the batch dimension and pass it as a 2d torch Tensor') txt_ids = txt_ids[0] if img_ids.ndim == 3: logger.warning('Passing `img_ids` 3d torch.Tensor is deprecated.Please remove the batch dimension and pass it as a 2d torch Tensor') img_ids = img_ids[0] ids = torch.cat((txt_ids, img_ids), dim=0) image_rotary_emb = self.pos_embed(ids) for (index_block, block) in enumerate(self.transformer_blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} (encoder_hidden_states, hidden_states) = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, encoder_hidden_states, temb, image_rotary_emb, **ckpt_kwargs) else: (encoder_hidden_states, hidden_states) = block(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb) if controlnet_block_samples is not None: interval_control = len(self.transformer_blocks) / len(controlnet_block_samples) interval_control = int(np.ceil(interval_control)) hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control] hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) for (index_block, block) in enumerate(self.single_transformer_blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, temb, image_rotary_emb, **ckpt_kwargs) else: hidden_states = block(hidden_states=hidden_states, temb=temb, image_rotary_emb=image_rotary_emb) if controlnet_single_block_samples is not None: interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples) interval_control = int(np.ceil(interval_control)) hidden_states[:, encoder_hidden_states.shape[1]:, ...] = hidden_states[:, encoder_hidden_states.shape[1]:, ...] + controlnet_single_block_samples[index_block // interval_control] hidden_states = hidden_states[:, encoder_hidden_states.shape[1]:, ...] hidden_states = self.norm_out(hidden_states, temb) output = self.proj_out(hidden_states) if USE_PEFT_BACKEND: unscale_lora_layers(self, lora_scale) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) # File: diffusers-main/src/diffusers/models/transformers/transformer_sd3.py from typing import Any, Dict, List, Optional, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...models.attention import JointTransformerBlock from ...models.attention_processor import Attention, AttentionProcessor, FusedJointAttnProcessor2_0 from ...models.modeling_utils import ModelMixin from ...models.normalization import AdaLayerNormContinuous from ...utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers from ..embeddings import CombinedTimestepTextProjEmbeddings, PatchEmbed from ..modeling_outputs import Transformer2DModelOutput logger = logging.get_logger(__name__) class SD3Transformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, sample_size: int=128, patch_size: int=2, in_channels: int=16, num_layers: int=18, attention_head_dim: int=64, num_attention_heads: int=18, joint_attention_dim: int=4096, caption_projection_dim: int=1152, pooled_projection_dim: int=2048, out_channels: int=16, pos_embed_max_size: int=96): super().__init__() default_out_channels = in_channels self.out_channels = out_channels if out_channels is not None else default_out_channels self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.pos_embed = PatchEmbed(height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.config.in_channels, embed_dim=self.inner_dim, pos_embed_max_size=pos_embed_max_size) self.time_text_embed = CombinedTimestepTextProjEmbeddings(embedding_dim=self.inner_dim, pooled_projection_dim=self.config.pooled_projection_dim) self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.config.caption_projection_dim) self.transformer_blocks = nn.ModuleList([JointTransformerBlock(dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, attention_head_dim=self.config.attention_head_dim, context_pre_only=i == num_layers - 1) for i in range(self.config.num_layers)]) self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-06) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) self.gradient_checkpointing = False def enable_forward_chunking(self, chunk_size: Optional[int]=None, dim: int=0) -> None: if dim not in [0, 1]: raise ValueError(f'Make sure to set `dim` to either 0 or 1, not {dim}') chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, 'set_chunk_feed_forward'): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) def disable_forward_chunking(self): def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, 'set_chunk_feed_forward'): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, None, 0) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedJointAttnProcessor2_0()) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value def forward(self, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor=None, pooled_projections: torch.FloatTensor=None, timestep: torch.LongTensor=None, block_controlnet_hidden_states: List=None, joint_attention_kwargs: Optional[Dict[str, Any]]=None, return_dict: bool=True) -> Union[torch.FloatTensor, Transformer2DModelOutput]: if joint_attention_kwargs is not None: joint_attention_kwargs = joint_attention_kwargs.copy() lora_scale = joint_attention_kwargs.pop('scale', 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: scale_lora_layers(self, lora_scale) elif joint_attention_kwargs is not None and joint_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective.') (height, width) = hidden_states.shape[-2:] hidden_states = self.pos_embed(hidden_states) temb = self.time_text_embed(timestep, pooled_projections) encoder_hidden_states = self.context_embedder(encoder_hidden_states) for (index_block, block) in enumerate(self.transformer_blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} (encoder_hidden_states, hidden_states) = torch.utils.checkpoint.checkpoint(create_custom_forward(block), hidden_states, encoder_hidden_states, temb, **ckpt_kwargs) else: (encoder_hidden_states, hidden_states) = block(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb) if block_controlnet_hidden_states is not None and block.context_pre_only is False: interval_control = len(self.transformer_blocks) // len(block_controlnet_hidden_states) hidden_states = hidden_states + block_controlnet_hidden_states[index_block // interval_control] hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) patch_size = self.config.patch_size height = height // patch_size width = width // patch_size hidden_states = hidden_states.reshape(shape=(hidden_states.shape[0], height, width, patch_size, patch_size, self.out_channels)) hidden_states = torch.einsum('nhwpqc->nchpwq', hidden_states) output = hidden_states.reshape(shape=(hidden_states.shape[0], self.out_channels, height * patch_size, width * patch_size)) if USE_PEFT_BACKEND: unscale_lora_layers(self, lora_scale) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) # File: diffusers-main/src/diffusers/models/transformers/transformer_temporal.py from dataclasses import dataclass from typing import Any, Dict, Optional import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ..attention import BasicTransformerBlock, TemporalBasicTransformerBlock from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from ..resnet import AlphaBlender @dataclass class TransformerTemporalModelOutput(BaseOutput): sample: torch.Tensor class TransformerTemporalModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, num_attention_heads: int=16, attention_head_dim: int=88, in_channels: Optional[int]=None, out_channels: Optional[int]=None, num_layers: int=1, dropout: float=0.0, norm_num_groups: int=32, cross_attention_dim: Optional[int]=None, attention_bias: bool=False, sample_size: Optional[int]=None, activation_fn: str='geglu', norm_elementwise_affine: bool=True, double_self_attention: bool=True, positional_embeddings: Optional[str]=None, num_positional_embeddings: Optional[int]=None): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-06, affine=True) self.proj_in = nn.Linear(in_channels, inner_dim) self.transformer_blocks = nn.ModuleList([BasicTransformerBlock(inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, attention_bias=attention_bias, double_self_attention=double_self_attention, norm_elementwise_affine=norm_elementwise_affine, positional_embeddings=positional_embeddings, num_positional_embeddings=num_positional_embeddings) for d in range(num_layers)]) self.proj_out = nn.Linear(inner_dim, in_channels) def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.LongTensor]=None, timestep: Optional[torch.LongTensor]=None, class_labels: torch.LongTensor=None, num_frames: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, return_dict: bool=True) -> TransformerTemporalModelOutput: (batch_frames, channel, height, width) = hidden_states.shape batch_size = batch_frames // num_frames residual = hidden_states hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width) hidden_states = hidden_states.permute(0, 2, 1, 3, 4) hidden_states = self.norm(hidden_states) hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel) hidden_states = self.proj_in(hidden_states) for block in self.transformer_blocks: hidden_states = block(hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states[None, None, :].reshape(batch_size, height, width, num_frames, channel).permute(0, 3, 4, 1, 2).contiguous() hidden_states = hidden_states.reshape(batch_frames, channel, height, width) output = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=output) class TransformerSpatioTemporalModel(nn.Module): def __init__(self, num_attention_heads: int=16, attention_head_dim: int=88, in_channels: int=320, out_channels: Optional[int]=None, num_layers: int=1, cross_attention_dim: Optional[int]=None): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.inner_dim = inner_dim self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-06) self.proj_in = nn.Linear(in_channels, inner_dim) self.transformer_blocks = nn.ModuleList([BasicTransformerBlock(inner_dim, num_attention_heads, attention_head_dim, cross_attention_dim=cross_attention_dim) for d in range(num_layers)]) time_mix_inner_dim = inner_dim self.temporal_transformer_blocks = nn.ModuleList([TemporalBasicTransformerBlock(inner_dim, time_mix_inner_dim, num_attention_heads, attention_head_dim, cross_attention_dim=cross_attention_dim) for _ in range(num_layers)]) time_embed_dim = in_channels * 4 self.time_pos_embed = TimestepEmbedding(in_channels, time_embed_dim, out_dim=in_channels) self.time_proj = Timesteps(in_channels, True, 0) self.time_mixer = AlphaBlender(alpha=0.5, merge_strategy='learned_with_images') self.out_channels = in_channels if out_channels is None else out_channels self.proj_out = nn.Linear(inner_dim, in_channels) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, image_only_indicator: Optional[torch.Tensor]=None, return_dict: bool=True): (batch_frames, _, height, width) = hidden_states.shape num_frames = image_only_indicator.shape[-1] batch_size = batch_frames // num_frames time_context = encoder_hidden_states time_context_first_timestep = time_context[None, :].reshape(batch_size, num_frames, -1, time_context.shape[-1])[:, 0] time_context = time_context_first_timestep[:, None].broadcast_to(batch_size, height * width, time_context.shape[-2], time_context.shape[-1]) time_context = time_context.reshape(batch_size * height * width, -1, time_context.shape[-1]) residual = hidden_states hidden_states = self.norm(hidden_states) inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch_frames, height * width, inner_dim) hidden_states = self.proj_in(hidden_states) num_frames_emb = torch.arange(num_frames, device=hidden_states.device) num_frames_emb = num_frames_emb.repeat(batch_size, 1) num_frames_emb = num_frames_emb.reshape(-1) t_emb = self.time_proj(num_frames_emb) t_emb = t_emb.to(dtype=hidden_states.dtype) emb = self.time_pos_embed(t_emb) emb = emb[:, None, :] for (block, temporal_block) in zip(self.transformer_blocks, self.temporal_transformer_blocks): if self.training and self.gradient_checkpointing: hidden_states = torch.utils.checkpoint.checkpoint(block, hidden_states, None, encoder_hidden_states, None, use_reentrant=False) else: hidden_states = block(hidden_states, encoder_hidden_states=encoder_hidden_states) hidden_states_mix = hidden_states hidden_states_mix = hidden_states_mix + emb hidden_states_mix = temporal_block(hidden_states_mix, num_frames=num_frames, encoder_hidden_states=time_context) hidden_states = self.time_mixer(x_spatial=hidden_states, x_temporal=hidden_states_mix, image_only_indicator=image_only_indicator) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape(batch_frames, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() output = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=output) # File: diffusers-main/src/diffusers/models/unets/__init__.py from ...utils import is_flax_available, is_torch_available if is_torch_available(): from .unet_1d import UNet1DModel from .unet_2d import UNet2DModel from .unet_2d_condition import UNet2DConditionModel from .unet_3d_condition import UNet3DConditionModel from .unet_i2vgen_xl import I2VGenXLUNet from .unet_kandinsky3 import Kandinsky3UNet from .unet_motion_model import MotionAdapter, UNetMotionModel from .unet_spatio_temporal_condition import UNetSpatioTemporalConditionModel from .unet_stable_cascade import StableCascadeUNet from .uvit_2d import UVit2DModel if is_flax_available(): from .unet_2d_condition_flax import FlaxUNet2DConditionModel # File: diffusers-main/src/diffusers/models/unets/unet_1d.py from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ..embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from .unet_1d_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class UNet1DOutput(BaseOutput): sample: torch.Tensor class UNet1DModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, sample_size: int=65536, sample_rate: Optional[int]=None, in_channels: int=2, out_channels: int=2, extra_in_channels: int=0, time_embedding_type: str='fourier', flip_sin_to_cos: bool=True, use_timestep_embedding: bool=False, freq_shift: float=0.0, down_block_types: Tuple[str]=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D'), up_block_types: Tuple[str]=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip'), mid_block_type: Tuple[str]='UNetMidBlock1D', out_block_type: str=None, block_out_channels: Tuple[int]=(32, 32, 64), act_fn: str=None, norm_num_groups: int=8, layers_per_block: int=1, downsample_each_block: bool=False): super().__init__() self.sample_size = sample_size if time_embedding_type == 'fourier': self.time_proj = GaussianFourierProjection(embedding_size=8, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos) timestep_input_dim = 2 * block_out_channels[0] elif time_embedding_type == 'positional': self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos=flip_sin_to_cos, downscale_freq_shift=freq_shift) timestep_input_dim = block_out_channels[0] if use_timestep_embedding: time_embed_dim = block_out_channels[0] * 4 self.time_mlp = TimestepEmbedding(in_channels=timestep_input_dim, time_embed_dim=time_embed_dim, act_fn=act_fn, out_dim=block_out_channels[0]) self.down_blocks = nn.ModuleList([]) self.mid_block = None self.up_blocks = nn.ModuleList([]) self.out_block = None output_channel = in_channels for (i, down_block_type) in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] if i == 0: input_channel += extra_in_channels is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block(down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block) self.down_blocks.append(down_block) self.mid_block = get_mid_block(mid_block_type, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=layers_per_block, add_downsample=downsample_each_block) reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] if out_block_type is None: final_upsample_channels = out_channels else: final_upsample_channels = block_out_channels[0] for (i, up_block_type) in enumerate(up_block_types): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i + 1] if i < len(up_block_types) - 1 else final_upsample_channels is_final_block = i == len(block_out_channels) - 1 up_block = get_up_block(up_block_type, num_layers=layers_per_block, in_channels=prev_output_channel, out_channels=output_channel, temb_channels=block_out_channels[0], add_upsample=not is_final_block) self.up_blocks.append(up_block) prev_output_channel = output_channel num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) self.out_block = get_out_block(out_block_type=out_block_type, num_groups_out=num_groups_out, embed_dim=block_out_channels[0], out_channels=out_channels, act_fn=act_fn, fc_dim=block_out_channels[-1] // 4) def forward(self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], return_dict: bool=True) -> Union[UNet1DOutput, Tuple]: timesteps = timestep if not torch.is_tensor(timesteps): timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) timestep_embed = self.time_proj(timesteps) if self.config.use_timestep_embedding: timestep_embed = self.time_mlp(timestep_embed) else: timestep_embed = timestep_embed[..., None] timestep_embed = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) timestep_embed = timestep_embed.broadcast_to(sample.shape[:1] + timestep_embed.shape[1:]) down_block_res_samples = () for downsample_block in self.down_blocks: (sample, res_samples) = downsample_block(hidden_states=sample, temb=timestep_embed) down_block_res_samples += res_samples if self.mid_block: sample = self.mid_block(sample, timestep_embed) for (i, upsample_block) in enumerate(self.up_blocks): res_samples = down_block_res_samples[-1:] down_block_res_samples = down_block_res_samples[:-1] sample = upsample_block(sample, res_hidden_states_tuple=res_samples, temb=timestep_embed) if self.out_block: sample = self.out_block(sample, timestep_embed) if not return_dict: return (sample,) return UNet1DOutput(sample=sample) # File: diffusers-main/src/diffusers/models/unets/unet_1d_blocks.py import math from typing import Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from ..activations import get_activation from ..resnet import Downsample1D, ResidualTemporalBlock1D, Upsample1D, rearrange_dims class DownResnetBlock1D(nn.Module): def __init__(self, in_channels: int, out_channels: Optional[int]=None, num_layers: int=1, conv_shortcut: bool=False, temb_channels: int=32, groups: int=32, groups_out: Optional[int]=None, non_linearity: Optional[str]=None, time_embedding_norm: str='default', output_scale_factor: float=1.0, add_downsample: bool=True): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.time_embedding_norm = time_embedding_norm self.add_downsample = add_downsample self.output_scale_factor = output_scale_factor if groups_out is None: groups_out = groups resnets = [ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=temb_channels)] for _ in range(num_layers): resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels)) self.resnets = nn.ModuleList(resnets) if non_linearity is None: self.nonlinearity = None else: self.nonlinearity = get_activation(non_linearity) self.downsample = None if add_downsample: self.downsample = Downsample1D(out_channels, use_conv=True, padding=1) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: output_states = () hidden_states = self.resnets[0](hidden_states, temb) for resnet in self.resnets[1:]: hidden_states = resnet(hidden_states, temb) output_states += (hidden_states,) if self.nonlinearity is not None: hidden_states = self.nonlinearity(hidden_states) if self.downsample is not None: hidden_states = self.downsample(hidden_states) return (hidden_states, output_states) class UpResnetBlock1D(nn.Module): def __init__(self, in_channels: int, out_channels: Optional[int]=None, num_layers: int=1, temb_channels: int=32, groups: int=32, groups_out: Optional[int]=None, non_linearity: Optional[str]=None, time_embedding_norm: str='default', output_scale_factor: float=1.0, add_upsample: bool=True): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.time_embedding_norm = time_embedding_norm self.add_upsample = add_upsample self.output_scale_factor = output_scale_factor if groups_out is None: groups_out = groups resnets = [ResidualTemporalBlock1D(2 * in_channels, out_channels, embed_dim=temb_channels)] for _ in range(num_layers): resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=temb_channels)) self.resnets = nn.ModuleList(resnets) if non_linearity is None: self.nonlinearity = None else: self.nonlinearity = get_activation(non_linearity) self.upsample = None if add_upsample: self.upsample = Upsample1D(out_channels, use_conv_transpose=True) def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Optional[Tuple[torch.Tensor, ...]]=None, temb: Optional[torch.Tensor]=None) -> torch.Tensor: if res_hidden_states_tuple is not None: res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat((hidden_states, res_hidden_states), dim=1) hidden_states = self.resnets[0](hidden_states, temb) for resnet in self.resnets[1:]: hidden_states = resnet(hidden_states, temb) if self.nonlinearity is not None: hidden_states = self.nonlinearity(hidden_states) if self.upsample is not None: hidden_states = self.upsample(hidden_states) return hidden_states class ValueFunctionMidBlock1D(nn.Module): def __init__(self, in_channels: int, out_channels: int, embed_dim: int): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.embed_dim = embed_dim self.res1 = ResidualTemporalBlock1D(in_channels, in_channels // 2, embed_dim=embed_dim) self.down1 = Downsample1D(out_channels // 2, use_conv=True) self.res2 = ResidualTemporalBlock1D(in_channels // 2, in_channels // 4, embed_dim=embed_dim) self.down2 = Downsample1D(out_channels // 4, use_conv=True) def forward(self, x: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: x = self.res1(x, temb) x = self.down1(x) x = self.res2(x, temb) x = self.down2(x) return x class MidResTemporalBlock1D(nn.Module): def __init__(self, in_channels: int, out_channels: int, embed_dim: int, num_layers: int=1, add_downsample: bool=False, add_upsample: bool=False, non_linearity: Optional[str]=None): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.add_downsample = add_downsample resnets = [ResidualTemporalBlock1D(in_channels, out_channels, embed_dim=embed_dim)] for _ in range(num_layers): resnets.append(ResidualTemporalBlock1D(out_channels, out_channels, embed_dim=embed_dim)) self.resnets = nn.ModuleList(resnets) if non_linearity is None: self.nonlinearity = None else: self.nonlinearity = get_activation(non_linearity) self.upsample = None if add_upsample: self.upsample = Upsample1D(out_channels, use_conv=True) self.downsample = None if add_downsample: self.downsample = Downsample1D(out_channels, use_conv=True) if self.upsample and self.downsample: raise ValueError('Block cannot downsample and upsample') def forward(self, hidden_states: torch.Tensor, temb: torch.Tensor) -> torch.Tensor: hidden_states = self.resnets[0](hidden_states, temb) for resnet in self.resnets[1:]: hidden_states = resnet(hidden_states, temb) if self.upsample: hidden_states = self.upsample(hidden_states) if self.downsample: self.downsample = self.downsample(hidden_states) return hidden_states class OutConv1DBlock(nn.Module): def __init__(self, num_groups_out: int, out_channels: int, embed_dim: int, act_fn: str): super().__init__() self.final_conv1d_1 = nn.Conv1d(embed_dim, embed_dim, 5, padding=2) self.final_conv1d_gn = nn.GroupNorm(num_groups_out, embed_dim) self.final_conv1d_act = get_activation(act_fn) self.final_conv1d_2 = nn.Conv1d(embed_dim, out_channels, 1) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.final_conv1d_1(hidden_states) hidden_states = rearrange_dims(hidden_states) hidden_states = self.final_conv1d_gn(hidden_states) hidden_states = rearrange_dims(hidden_states) hidden_states = self.final_conv1d_act(hidden_states) hidden_states = self.final_conv1d_2(hidden_states) return hidden_states class OutValueFunctionBlock(nn.Module): def __init__(self, fc_dim: int, embed_dim: int, act_fn: str='mish'): super().__init__() self.final_block = nn.ModuleList([nn.Linear(fc_dim + embed_dim, fc_dim // 2), get_activation(act_fn), nn.Linear(fc_dim // 2, 1)]) def forward(self, hidden_states: torch.Tensor, temb: torch.Tensor) -> torch.Tensor: hidden_states = hidden_states.view(hidden_states.shape[0], -1) hidden_states = torch.cat((hidden_states, temb), dim=-1) for layer in self.final_block: hidden_states = layer(hidden_states) return hidden_states _kernels = {'linear': [1 / 8, 3 / 8, 3 / 8, 1 / 8], 'cubic': [-0.01171875, -0.03515625, 0.11328125, 0.43359375, 0.43359375, 0.11328125, -0.03515625, -0.01171875], 'lanczos3': [0.003689131001010537, 0.015056144446134567, -0.03399861603975296, -0.066637322306633, 0.13550527393817902, 0.44638532400131226, 0.44638532400131226, 0.13550527393817902, -0.066637322306633, -0.03399861603975296, 0.015056144446134567, 0.003689131001010537]} class Downsample1d(nn.Module): def __init__(self, kernel: str='linear', pad_mode: str='reflect'): super().__init__() self.pad_mode = pad_mode kernel_1d = torch.tensor(_kernels[kernel]) self.pad = kernel_1d.shape[0] // 2 - 1 self.register_buffer('kernel', kernel_1d) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = F.pad(hidden_states, (self.pad,) * 2, self.pad_mode) weight = hidden_states.new_zeros([hidden_states.shape[1], hidden_states.shape[1], self.kernel.shape[0]]) indices = torch.arange(hidden_states.shape[1], device=hidden_states.device) kernel = self.kernel.to(weight)[None, :].expand(hidden_states.shape[1], -1) weight[indices, indices] = kernel return F.conv1d(hidden_states, weight, stride=2) class Upsample1d(nn.Module): def __init__(self, kernel: str='linear', pad_mode: str='reflect'): super().__init__() self.pad_mode = pad_mode kernel_1d = torch.tensor(_kernels[kernel]) * 2 self.pad = kernel_1d.shape[0] // 2 - 1 self.register_buffer('kernel', kernel_1d) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = F.pad(hidden_states, ((self.pad + 1) // 2,) * 2, self.pad_mode) weight = hidden_states.new_zeros([hidden_states.shape[1], hidden_states.shape[1], self.kernel.shape[0]]) indices = torch.arange(hidden_states.shape[1], device=hidden_states.device) kernel = self.kernel.to(weight)[None, :].expand(hidden_states.shape[1], -1) weight[indices, indices] = kernel return F.conv_transpose1d(hidden_states, weight, stride=2, padding=self.pad * 2 + 1) class SelfAttention1d(nn.Module): def __init__(self, in_channels: int, n_head: int=1, dropout_rate: float=0.0): super().__init__() self.channels = in_channels self.group_norm = nn.GroupNorm(1, num_channels=in_channels) self.num_heads = n_head self.query = nn.Linear(self.channels, self.channels) self.key = nn.Linear(self.channels, self.channels) self.value = nn.Linear(self.channels, self.channels) self.proj_attn = nn.Linear(self.channels, self.channels, bias=True) self.dropout = nn.Dropout(dropout_rate, inplace=True) def transpose_for_scores(self, projection: torch.Tensor) -> torch.Tensor: new_projection_shape = projection.size()[:-1] + (self.num_heads, -1) new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3) return new_projection def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: residual = hidden_states (batch, channel_dim, seq) = hidden_states.shape hidden_states = self.group_norm(hidden_states) hidden_states = hidden_states.transpose(1, 2) query_proj = self.query(hidden_states) key_proj = self.key(hidden_states) value_proj = self.value(hidden_states) query_states = self.transpose_for_scores(query_proj) key_states = self.transpose_for_scores(key_proj) value_states = self.transpose_for_scores(value_proj) scale = 1 / math.sqrt(math.sqrt(key_states.shape[-1])) attention_scores = torch.matmul(query_states * scale, key_states.transpose(-1, -2) * scale) attention_probs = torch.softmax(attention_scores, dim=-1) hidden_states = torch.matmul(attention_probs, value_states) hidden_states = hidden_states.permute(0, 2, 1, 3).contiguous() new_hidden_states_shape = hidden_states.size()[:-2] + (self.channels,) hidden_states = hidden_states.view(new_hidden_states_shape) hidden_states = self.proj_attn(hidden_states) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.dropout(hidden_states) output = hidden_states + residual return output class ResConvBlock(nn.Module): def __init__(self, in_channels: int, mid_channels: int, out_channels: int, is_last: bool=False): super().__init__() self.is_last = is_last self.has_conv_skip = in_channels != out_channels if self.has_conv_skip: self.conv_skip = nn.Conv1d(in_channels, out_channels, 1, bias=False) self.conv_1 = nn.Conv1d(in_channels, mid_channels, 5, padding=2) self.group_norm_1 = nn.GroupNorm(1, mid_channels) self.gelu_1 = nn.GELU() self.conv_2 = nn.Conv1d(mid_channels, out_channels, 5, padding=2) if not self.is_last: self.group_norm_2 = nn.GroupNorm(1, out_channels) self.gelu_2 = nn.GELU() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: residual = self.conv_skip(hidden_states) if self.has_conv_skip else hidden_states hidden_states = self.conv_1(hidden_states) hidden_states = self.group_norm_1(hidden_states) hidden_states = self.gelu_1(hidden_states) hidden_states = self.conv_2(hidden_states) if not self.is_last: hidden_states = self.group_norm_2(hidden_states) hidden_states = self.gelu_2(hidden_states) output = hidden_states + residual return output class UNetMidBlock1D(nn.Module): def __init__(self, mid_channels: int, in_channels: int, out_channels: Optional[int]=None): super().__init__() out_channels = in_channels if out_channels is None else out_channels self.down = Downsample1d('cubic') resnets = [ResConvBlock(in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels)] attentions = [SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(out_channels, out_channels // 32)] self.up = Upsample1d(kernel='cubic') self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.down(hidden_states) for (attn, resnet) in zip(self.attentions, self.resnets): hidden_states = resnet(hidden_states) hidden_states = attn(hidden_states) hidden_states = self.up(hidden_states) return hidden_states class AttnDownBlock1D(nn.Module): def __init__(self, out_channels: int, in_channels: int, mid_channels: Optional[int]=None): super().__init__() mid_channels = out_channels if mid_channels is None else mid_channels self.down = Downsample1d('cubic') resnets = [ResConvBlock(in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels)] attentions = [SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(out_channels, out_channels // 32)] self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.down(hidden_states) for (resnet, attn) in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states) hidden_states = attn(hidden_states) return (hidden_states, (hidden_states,)) class DownBlock1D(nn.Module): def __init__(self, out_channels: int, in_channels: int, mid_channels: Optional[int]=None): super().__init__() mid_channels = out_channels if mid_channels is None else mid_channels self.down = Downsample1d('cubic') resnets = [ResConvBlock(in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels)] self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.down(hidden_states) for resnet in self.resnets: hidden_states = resnet(hidden_states) return (hidden_states, (hidden_states,)) class DownBlock1DNoSkip(nn.Module): def __init__(self, out_channels: int, in_channels: int, mid_channels: Optional[int]=None): super().__init__() mid_channels = out_channels if mid_channels is None else mid_channels resnets = [ResConvBlock(in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels)] self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = torch.cat([hidden_states, temb], dim=1) for resnet in self.resnets: hidden_states = resnet(hidden_states) return (hidden_states, (hidden_states,)) class AttnUpBlock1D(nn.Module): def __init__(self, in_channels: int, out_channels: int, mid_channels: Optional[int]=None): super().__init__() mid_channels = out_channels if mid_channels is None else mid_channels resnets = [ResConvBlock(2 * in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels)] attentions = [SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(mid_channels, mid_channels // 32), SelfAttention1d(out_channels, out_channels // 32)] self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.up = Upsample1d(kernel='cubic') def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None) -> torch.Tensor: res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) for (resnet, attn) in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states) hidden_states = attn(hidden_states) hidden_states = self.up(hidden_states) return hidden_states class UpBlock1D(nn.Module): def __init__(self, in_channels: int, out_channels: int, mid_channels: Optional[int]=None): super().__init__() mid_channels = in_channels if mid_channels is None else mid_channels resnets = [ResConvBlock(2 * in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels)] self.resnets = nn.ModuleList(resnets) self.up = Upsample1d(kernel='cubic') def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None) -> torch.Tensor: res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) for resnet in self.resnets: hidden_states = resnet(hidden_states) hidden_states = self.up(hidden_states) return hidden_states class UpBlock1DNoSkip(nn.Module): def __init__(self, in_channels: int, out_channels: int, mid_channels: Optional[int]=None): super().__init__() mid_channels = in_channels if mid_channels is None else mid_channels resnets = [ResConvBlock(2 * in_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, mid_channels), ResConvBlock(mid_channels, mid_channels, out_channels, is_last=True)] self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None) -> torch.Tensor: res_hidden_states = res_hidden_states_tuple[-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) for resnet in self.resnets: hidden_states = resnet(hidden_states) return hidden_states DownBlockType = Union[DownResnetBlock1D, DownBlock1D, AttnDownBlock1D, DownBlock1DNoSkip] MidBlockType = Union[MidResTemporalBlock1D, ValueFunctionMidBlock1D, UNetMidBlock1D] OutBlockType = Union[OutConv1DBlock, OutValueFunctionBlock] UpBlockType = Union[UpResnetBlock1D, UpBlock1D, AttnUpBlock1D, UpBlock1DNoSkip] def get_down_block(down_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_downsample: bool) -> DownBlockType: if down_block_type == 'DownResnetBlock1D': return DownResnetBlock1D(in_channels=in_channels, num_layers=num_layers, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample) elif down_block_type == 'DownBlock1D': return DownBlock1D(out_channels=out_channels, in_channels=in_channels) elif down_block_type == 'AttnDownBlock1D': return AttnDownBlock1D(out_channels=out_channels, in_channels=in_channels) elif down_block_type == 'DownBlock1DNoSkip': return DownBlock1DNoSkip(out_channels=out_channels, in_channels=in_channels) raise ValueError(f'{down_block_type} does not exist.') def get_up_block(up_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_upsample: bool) -> UpBlockType: if up_block_type == 'UpResnetBlock1D': return UpResnetBlock1D(in_channels=in_channels, num_layers=num_layers, out_channels=out_channels, temb_channels=temb_channels, add_upsample=add_upsample) elif up_block_type == 'UpBlock1D': return UpBlock1D(in_channels=in_channels, out_channels=out_channels) elif up_block_type == 'AttnUpBlock1D': return AttnUpBlock1D(in_channels=in_channels, out_channels=out_channels) elif up_block_type == 'UpBlock1DNoSkip': return UpBlock1DNoSkip(in_channels=in_channels, out_channels=out_channels) raise ValueError(f'{up_block_type} does not exist.') def get_mid_block(mid_block_type: str, num_layers: int, in_channels: int, mid_channels: int, out_channels: int, embed_dim: int, add_downsample: bool) -> MidBlockType: if mid_block_type == 'MidResTemporalBlock1D': return MidResTemporalBlock1D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, embed_dim=embed_dim, add_downsample=add_downsample) elif mid_block_type == 'ValueFunctionMidBlock1D': return ValueFunctionMidBlock1D(in_channels=in_channels, out_channels=out_channels, embed_dim=embed_dim) elif mid_block_type == 'UNetMidBlock1D': return UNetMidBlock1D(in_channels=in_channels, mid_channels=mid_channels, out_channels=out_channels) raise ValueError(f'{mid_block_type} does not exist.') def get_out_block(*, out_block_type: str, num_groups_out: int, embed_dim: int, out_channels: int, act_fn: str, fc_dim: int) -> Optional[OutBlockType]: if out_block_type == 'OutConv1DBlock': return OutConv1DBlock(num_groups_out, out_channels, embed_dim, act_fn) elif out_block_type == 'ValueFunction': return OutValueFunctionBlock(fc_dim, embed_dim, act_fn) return None # File: diffusers-main/src/diffusers/models/unets/unet_2d.py from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ..embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from .unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block @dataclass class UNet2DOutput(BaseOutput): sample: torch.Tensor class UNet2DModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, sample_size: Optional[Union[int, Tuple[int, int]]]=None, in_channels: int=3, out_channels: int=3, center_input_sample: bool=False, time_embedding_type: str='positional', freq_shift: int=0, flip_sin_to_cos: bool=True, down_block_types: Tuple[str, ...]=('DownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D'), up_block_types: Tuple[str, ...]=('AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'UpBlock2D'), block_out_channels: Tuple[int, ...]=(224, 448, 672, 896), layers_per_block: int=2, mid_block_scale_factor: float=1, downsample_padding: int=1, downsample_type: str='conv', upsample_type: str='conv', dropout: float=0.0, act_fn: str='silu', attention_head_dim: Optional[int]=8, norm_num_groups: int=32, attn_norm_num_groups: Optional[int]=None, norm_eps: float=1e-05, resnet_time_scale_shift: str='default', add_attention: bool=True, class_embed_type: Optional[str]=None, num_class_embeds: Optional[int]=None, num_train_timesteps: Optional[int]=None): super().__init__() self.sample_size = sample_size time_embed_dim = block_out_channels[0] * 4 if len(down_block_types) != len(up_block_types): raise ValueError(f'Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.') if len(block_out_channels) != len(down_block_types): raise ValueError(f'Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.') self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1)) if time_embedding_type == 'fourier': self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=16) timestep_input_dim = 2 * block_out_channels[0] elif time_embedding_type == 'positional': self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] elif time_embedding_type == 'learned': self.time_proj = nn.Embedding(num_train_timesteps, block_out_channels[0]) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == 'timestep': self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) elif class_embed_type == 'identity': self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) else: self.class_embedding = None self.down_blocks = nn.ModuleList([]) self.mid_block = None self.up_blocks = nn.ModuleList([]) output_channel = block_out_channels[0] for (i, down_block_type) in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block(down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, downsample_type=downsample_type, dropout=dropout) self.down_blocks.append(down_block) self.mid_block = UNetMidBlock2D(in_channels=block_out_channels[-1], temb_channels=time_embed_dim, dropout=dropout, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, attention_head_dim=attention_head_dim if attention_head_dim is not None else block_out_channels[-1], resnet_groups=norm_num_groups, attn_groups=attn_norm_num_groups, add_attention=add_attention) reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] for (i, up_block_type) in enumerate(up_block_types): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] is_final_block = i == len(block_out_channels) - 1 up_block = get_up_block(up_block_type, num_layers=layers_per_block + 1, in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, add_upsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel, resnet_time_scale_shift=resnet_time_scale_shift, upsample_type=upsample_type, dropout=dropout) self.up_blocks.append(up_block) prev_output_channel = output_channel num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=num_groups_out, eps=norm_eps) self.conv_act = nn.SiLU() self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1) def forward(self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], class_labels: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[UNet2DOutput, Tuple]: if self.config.center_input_sample: sample = 2 * sample - 1.0 timesteps = timestep if not torch.is_tensor(timesteps): timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) timesteps = timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device) t_emb = self.time_proj(timesteps) t_emb = t_emb.to(dtype=self.dtype) emb = self.time_embedding(t_emb) if self.class_embedding is not None: if class_labels is None: raise ValueError('class_labels should be provided when doing class conditioning') if self.config.class_embed_type == 'timestep': class_labels = self.time_proj(class_labels) class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) emb = emb + class_emb elif self.class_embedding is None and class_labels is not None: raise ValueError('class_embedding needs to be initialized in order to use class conditioning') skip_sample = sample sample = self.conv_in(sample) down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, 'skip_conv'): (sample, res_samples, skip_sample) = downsample_block(hidden_states=sample, temb=emb, skip_sample=skip_sample) else: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb) down_block_res_samples += res_samples sample = self.mid_block(sample, emb) skip_sample = None for upsample_block in self.up_blocks: res_samples = down_block_res_samples[-len(upsample_block.resnets):] down_block_res_samples = down_block_res_samples[:-len(upsample_block.resnets)] if hasattr(upsample_block, 'skip_conv'): (sample, skip_sample) = upsample_block(sample, res_samples, emb, skip_sample) else: sample = upsample_block(sample, res_samples, emb) sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) if skip_sample is not None: sample += skip_sample if self.config.time_embedding_type == 'fourier': timesteps = timesteps.reshape((sample.shape[0], *[1] * len(sample.shape[1:]))) sample = sample / timesteps if not return_dict: return (sample,) return UNet2DOutput(sample=sample) # File: diffusers-main/src/diffusers/models/unets/unet_2d_blocks.py from typing import Any, Dict, Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F from torch import nn from ...utils import deprecate, is_torch_version, logging from ...utils.torch_utils import apply_freeu from ..activations import get_activation from ..attention_processor import Attention, AttnAddedKVProcessor, AttnAddedKVProcessor2_0 from ..normalization import AdaGroupNorm from ..resnet import Downsample2D, FirDownsample2D, FirUpsample2D, KDownsample2D, KUpsample2D, ResnetBlock2D, ResnetBlockCondNorm2D, Upsample2D from ..transformers.dual_transformer_2d import DualTransformer2DModel from ..transformers.transformer_2d import Transformer2DModel logger = logging.get_logger(__name__) def get_down_block(down_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_downsample: bool, resnet_eps: float, resnet_act_fn: str, transformer_layers_per_block: int=1, num_attention_heads: Optional[int]=None, resnet_groups: Optional[int]=None, cross_attention_dim: Optional[int]=None, downsample_padding: Optional[int]=None, dual_cross_attention: bool=False, use_linear_projection: bool=False, only_cross_attention: bool=False, upcast_attention: bool=False, resnet_time_scale_shift: str='default', attention_type: str='default', resnet_skip_time_act: bool=False, resnet_out_scale_factor: float=1.0, cross_attention_norm: Optional[str]=None, attention_head_dim: Optional[int]=None, downsample_type: Optional[str]=None, dropout: float=0.0): if attention_head_dim is None: logger.warning(f'It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}.') attention_head_dim = num_attention_heads down_block_type = down_block_type[7:] if down_block_type.startswith('UNetRes') else down_block_type if down_block_type == 'DownBlock2D': return DownBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift) elif down_block_type == 'ResnetDownsampleBlock2D': return ResnetDownsampleBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor) elif down_block_type == 'AttnDownBlock2D': if add_downsample is False: downsample_type = None else: downsample_type = downsample_type or 'conv' return AttnDownBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, downsample_type=downsample_type) elif down_block_type == 'CrossAttnDownBlock2D': if cross_attention_dim is None: raise ValueError('cross_attention_dim must be specified for CrossAttnDownBlock2D') return CrossAttnDownBlock2D(num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type) elif down_block_type == 'SimpleCrossAttnDownBlock2D': if cross_attention_dim is None: raise ValueError('cross_attention_dim must be specified for SimpleCrossAttnDownBlock2D') return SimpleCrossAttnDownBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm) elif down_block_type == 'SkipDownBlock2D': return SkipDownBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift) elif down_block_type == 'AttnSkipDownBlock2D': return AttnSkipDownBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift) elif down_block_type == 'DownEncoderBlock2D': return DownEncoderBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift) elif down_block_type == 'AttnDownEncoderBlock2D': return AttnDownEncoderBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift) elif down_block_type == 'KDownBlock2D': return KDownBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn) elif down_block_type == 'KCrossAttnDownBlock2D': return KCrossAttnDownBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, add_self_attention=True if not add_downsample else False) raise ValueError(f'{down_block_type} does not exist.') def get_mid_block(mid_block_type: str, temb_channels: int, in_channels: int, resnet_eps: float, resnet_act_fn: str, resnet_groups: int, output_scale_factor: float=1.0, transformer_layers_per_block: int=1, num_attention_heads: Optional[int]=None, cross_attention_dim: Optional[int]=None, dual_cross_attention: bool=False, use_linear_projection: bool=False, mid_block_only_cross_attention: bool=False, upcast_attention: bool=False, resnet_time_scale_shift: str='default', attention_type: str='default', resnet_skip_time_act: bool=False, cross_attention_norm: Optional[str]=None, attention_head_dim: Optional[int]=1, dropout: float=0.0): if mid_block_type == 'UNetMidBlock2DCrossAttn': return UNetMidBlock2DCrossAttn(transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, temb_channels=temb_channels, dropout=dropout, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, output_scale_factor=output_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, resnet_groups=resnet_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type) elif mid_block_type == 'UNetMidBlock2DSimpleCrossAttn': return UNetMidBlock2DSimpleCrossAttn(in_channels=in_channels, temb_channels=temb_channels, dropout=dropout, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, output_scale_factor=output_scale_factor, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm) elif mid_block_type == 'UNetMidBlock2D': return UNetMidBlock2D(in_channels=in_channels, temb_channels=temb_channels, dropout=dropout, num_layers=0, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, output_scale_factor=output_scale_factor, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, add_attention=False) elif mid_block_type is None: return None else: raise ValueError(f'unknown mid_block_type : {mid_block_type}') def get_up_block(up_block_type: str, num_layers: int, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, add_upsample: bool, resnet_eps: float, resnet_act_fn: str, resolution_idx: Optional[int]=None, transformer_layers_per_block: int=1, num_attention_heads: Optional[int]=None, resnet_groups: Optional[int]=None, cross_attention_dim: Optional[int]=None, dual_cross_attention: bool=False, use_linear_projection: bool=False, only_cross_attention: bool=False, upcast_attention: bool=False, resnet_time_scale_shift: str='default', attention_type: str='default', resnet_skip_time_act: bool=False, resnet_out_scale_factor: float=1.0, cross_attention_norm: Optional[str]=None, attention_head_dim: Optional[int]=None, upsample_type: Optional[str]=None, dropout: float=0.0) -> nn.Module: if attention_head_dim is None: logger.warning(f'It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}.') attention_head_dim = num_attention_heads up_block_type = up_block_type[7:] if up_block_type.startswith('UNetRes') else up_block_type if up_block_type == 'UpBlock2D': return UpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift) elif up_block_type == 'ResnetUpsampleBlock2D': return ResnetUpsampleBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor) elif up_block_type == 'CrossAttnUpBlock2D': if cross_attention_dim is None: raise ValueError('cross_attention_dim must be specified for CrossAttnUpBlock2D') return CrossAttnUpBlock2D(num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type) elif up_block_type == 'SimpleCrossAttnUpBlock2D': if cross_attention_dim is None: raise ValueError('cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D') return SimpleCrossAttnUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm) elif up_block_type == 'AttnUpBlock2D': if add_upsample is False: upsample_type = None else: upsample_type = upsample_type or 'conv' return AttnUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, upsample_type=upsample_type) elif up_block_type == 'SkipUpBlock2D': return SkipUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_time_scale_shift=resnet_time_scale_shift) elif up_block_type == 'AttnSkipUpBlock2D': return AttnSkipUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift) elif up_block_type == 'UpDecoderBlock2D': return UpDecoderBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, temb_channels=temb_channels) elif up_block_type == 'AttnUpDecoderBlock2D': return AttnUpDecoderBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, temb_channels=temb_channels) elif up_block_type == 'KUpBlock2D': return KUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn) elif up_block_type == 'KCrossAttnUpBlock2D': return KCrossAttnUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim) raise ValueError(f'{up_block_type} does not exist.') class AutoencoderTinyBlock(nn.Module): def __init__(self, in_channels: int, out_channels: int, act_fn: str): super().__init__() act_fn = get_activation(act_fn) self.conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), act_fn, nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), act_fn, nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)) self.skip = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) if in_channels != out_channels else nn.Identity() self.fuse = nn.ReLU() def forward(self, x: torch.Tensor) -> torch.Tensor: return self.fuse(self.conv(x) + self.skip(x)) class UNetMidBlock2D(nn.Module): def __init__(self, in_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, attn_groups: Optional[int]=None, resnet_pre_norm: bool=True, add_attention: bool=True, attention_head_dim: int=1, output_scale_factor: float=1.0): super().__init__() resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) self.add_attention = add_attention if attn_groups is None: attn_groups = resnet_groups if resnet_time_scale_shift == 'default' else None if resnet_time_scale_shift == 'spatial': resnets = [ResnetBlockCondNorm2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm='spatial', non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor)] else: resnets = [ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)] attentions = [] if attention_head_dim is None: logger.warning(f'It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}.') attention_head_dim = in_channels for _ in range(num_layers): if self.add_attention: attentions.append(Attention(in_channels, heads=in_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=attn_groups, spatial_norm_dim=temb_channels if resnet_time_scale_shift == 'spatial' else None, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True)) else: attentions.append(None) if resnet_time_scale_shift == 'spatial': resnets.append(ResnetBlockCondNorm2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm='spatial', non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor)) else: resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.resnets[0](hidden_states, temb) for (attn, resnet) in zip(self.attentions, self.resnets[1:]): if attn is not None: hidden_states = attn(hidden_states, temb=temb) hidden_states = resnet(hidden_states, temb) return hidden_states class UNetMidBlock2DCrossAttn(nn.Module): def __init__(self, in_channels: int, temb_channels: int, out_channels: Optional[int]=None, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: Union[int, Tuple[int]]=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_groups_out: Optional[int]=None, resnet_pre_norm: bool=True, num_attention_heads: int=1, output_scale_factor: float=1.0, cross_attention_dim: int=1280, dual_cross_attention: bool=False, use_linear_projection: bool=False, upcast_attention: bool=False, attention_type: str='default'): super().__init__() out_channels = out_channels or in_channels self.in_channels = in_channels self.out_channels = out_channels self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers resnet_groups_out = resnet_groups_out or resnet_groups resnets = [ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, groups_out=resnet_groups_out, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)] attentions = [] for i in range(num_layers): if not dual_cross_attention: attentions.append(Transformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups_out, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type)) else: attentions.append(DualTransformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups)) resnets.append(ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups_out, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') hidden_states = self.resnets[0](hidden_states, temb) for (attn, resnet) in zip(self.attentions, self.resnets[1:]): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs) else: hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] hidden_states = resnet(hidden_states, temb) return hidden_states class UNetMidBlock2DSimpleCrossAttn(nn.Module): def __init__(self, in_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attention_head_dim: int=1, output_scale_factor: float=1.0, cross_attention_dim: int=1280, skip_time_act: bool=False, only_cross_attention: bool=False, cross_attention_norm: Optional[str]=None): super().__init__() self.has_cross_attention = True self.attention_head_dim = attention_head_dim resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) self.num_heads = in_channels // self.attention_head_dim resnets = [ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act)] attentions = [] for _ in range(num_layers): processor = AttnAddedKVProcessor2_0() if hasattr(F, 'scaled_dot_product_attention') else AttnAddedKVProcessor() attentions.append(Attention(query_dim=in_channels, cross_attention_dim=in_channels, heads=self.num_heads, dim_head=self.attention_head_dim, added_kv_proj_dim=cross_attention_dim, norm_num_groups=resnet_groups, bias=True, upcast_softmax=True, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm, processor=processor)) resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') if attention_mask is None: mask = None if encoder_hidden_states is None else encoder_attention_mask else: mask = attention_mask hidden_states = self.resnets[0](hidden_states, temb) for (attn, resnet) in zip(self.attentions, self.resnets[1:]): hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs) hidden_states = resnet(hidden_states, temb) return hidden_states class AttnDownBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attention_head_dim: int=1, output_scale_factor: float=1.0, downsample_padding: int=1, downsample_type: str='conv'): super().__init__() resnets = [] attentions = [] self.downsample_type = downsample_type if attention_head_dim is None: logger.warning(f'It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}.') attention_head_dim = out_channels for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) attentions.append(Attention(out_channels, heads=out_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if downsample_type == 'conv': self.downsamplers = nn.ModuleList([Downsample2D(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')]) elif downsample_type == 'resnet': self.downsamplers = nn.ModuleList([ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, down=True)]) else: self.downsamplers = None def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, upsample_size: Optional[int]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') output_states = () for (resnet, attn) in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states, **cross_attention_kwargs) output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: if self.downsample_type == 'resnet': hidden_states = downsampler(hidden_states, temb=temb) else: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return (hidden_states, output_states) class CrossAttnDownBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: Union[int, Tuple[int]]=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads: int=1, cross_attention_dim: int=1280, output_scale_factor: float=1.0, downsample_padding: int=1, add_downsample: bool=True, dual_cross_attention: bool=False, use_linear_projection: bool=False, only_cross_attention: bool=False, upcast_attention: bool=False, attention_type: str='default'): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) if not dual_cross_attention: attentions.append(Transformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, attention_type=attention_type)) else: attentions.append(DualTransformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList([Downsample2D(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None, additional_residuals: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') output_states = () blocks = list(zip(self.resnets, self.attentions)) for (i, (resnet, attn)) in enumerate(blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] if i == len(blocks) - 1 and additional_residuals is not None: hidden_states = hidden_states + additional_residuals output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return (hidden_states, output_states) class DownBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor: float=1.0, add_downsample: bool=True, downsample_padding: int=1): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList([Downsample2D(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, *args, **kwargs) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) output_states = () for resnet in self.resnets: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False) else: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return (hidden_states, output_states) class DownEncoderBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor: float=1.0, add_downsample: bool=True, downsample_padding: int=1): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels if resnet_time_scale_shift == 'spatial': resnets.append(ResnetBlockCondNorm2D(in_channels=in_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm='spatial', non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor)) else: resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList([Downsample2D(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')]) else: self.downsamplers = None def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) for resnet in self.resnets: hidden_states = resnet(hidden_states, temb=None) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states class AttnDownEncoderBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attention_head_dim: int=1, output_scale_factor: float=1.0, add_downsample: bool=True, downsample_padding: int=1): super().__init__() resnets = [] attentions = [] if attention_head_dim is None: logger.warning(f'It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}.') attention_head_dim = out_channels for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels if resnet_time_scale_shift == 'spatial': resnets.append(ResnetBlockCondNorm2D(in_channels=in_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm='spatial', non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor)) else: resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) attentions.append(Attention(out_channels, heads=out_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList([Downsample2D(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')]) else: self.downsamplers = None def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) for (resnet, attn) in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb=None) hidden_states = attn(hidden_states) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states class AttnSkipDownBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_pre_norm: bool=True, attention_head_dim: int=1, output_scale_factor: float=np.sqrt(2.0), add_downsample: bool=True): super().__init__() self.attentions = nn.ModuleList([]) self.resnets = nn.ModuleList([]) if attention_head_dim is None: logger.warning(f'It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}.') attention_head_dim = out_channels for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels self.resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(in_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) self.attentions.append(Attention(out_channels, heads=out_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=32, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True)) if add_downsample: self.resnet_down = ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, use_in_shortcut=True, down=True, kernel='fir') self.downsamplers = nn.ModuleList([FirDownsample2D(out_channels, out_channels=out_channels)]) self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) else: self.resnet_down = None self.downsamplers = None self.skip_conv = None def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, skip_sample: Optional[torch.Tensor]=None, *args, **kwargs) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...], torch.Tensor]: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) output_states = () for (resnet, attn) in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states) output_states += (hidden_states,) if self.downsamplers is not None: hidden_states = self.resnet_down(hidden_states, temb) for downsampler in self.downsamplers: skip_sample = downsampler(skip_sample) hidden_states = self.skip_conv(skip_sample) + hidden_states output_states += (hidden_states,) return (hidden_states, output_states, skip_sample) class SkipDownBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_pre_norm: bool=True, output_scale_factor: float=np.sqrt(2.0), add_downsample: bool=True, downsample_padding: int=1): super().__init__() self.resnets = nn.ModuleList([]) for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels self.resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(in_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) if add_downsample: self.resnet_down = ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, use_in_shortcut=True, down=True, kernel='fir') self.downsamplers = nn.ModuleList([FirDownsample2D(out_channels, out_channels=out_channels)]) self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) else: self.resnet_down = None self.downsamplers = None self.skip_conv = None def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, skip_sample: Optional[torch.Tensor]=None, *args, **kwargs) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...], torch.Tensor]: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) output_states = () for resnet in self.resnets: hidden_states = resnet(hidden_states, temb) output_states += (hidden_states,) if self.downsamplers is not None: hidden_states = self.resnet_down(hidden_states, temb) for downsampler in self.downsamplers: skip_sample = downsampler(skip_sample) hidden_states = self.skip_conv(skip_sample) + hidden_states output_states += (hidden_states,) return (hidden_states, output_states, skip_sample) class ResnetDownsampleBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor: float=1.0, add_downsample: bool=True, skip_time_act: bool=False): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act)) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList([ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, down=True)]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, *args, **kwargs) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) output_states = () for resnet in self.resnets: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False) else: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states, temb) output_states = output_states + (hidden_states,) return (hidden_states, output_states) class SimpleCrossAttnDownBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attention_head_dim: int=1, cross_attention_dim: int=1280, output_scale_factor: float=1.0, add_downsample: bool=True, skip_time_act: bool=False, only_cross_attention: bool=False, cross_attention_norm: Optional[str]=None): super().__init__() self.has_cross_attention = True resnets = [] attentions = [] self.attention_head_dim = attention_head_dim self.num_heads = out_channels // self.attention_head_dim for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act)) processor = AttnAddedKVProcessor2_0() if hasattr(F, 'scaled_dot_product_attention') else AttnAddedKVProcessor() attentions.append(Attention(query_dim=out_channels, cross_attention_dim=out_channels, heads=self.num_heads, dim_head=attention_head_dim, added_kv_proj_dim=cross_attention_dim, norm_num_groups=resnet_groups, bias=True, upcast_softmax=True, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm, processor=processor)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList([ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, down=True)]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') output_states = () if attention_mask is None: mask = None if encoder_hidden_states is None else encoder_attention_mask else: mask = attention_mask for (resnet, attn) in zip(self.resnets, self.attentions): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs) else: hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs) output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states, temb) output_states = output_states + (hidden_states,) return (hidden_states, output_states) class KDownBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=4, resnet_eps: float=1e-05, resnet_act_fn: str='gelu', resnet_group_size: int=32, add_downsample: bool=False): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels groups = in_channels // resnet_group_size groups_out = out_channels // resnet_group_size resnets.append(ResnetBlockCondNorm2D(in_channels=in_channels, out_channels=out_channels, dropout=dropout, temb_channels=temb_channels, groups=groups, groups_out=groups_out, eps=resnet_eps, non_linearity=resnet_act_fn, time_embedding_norm='ada_group', conv_shortcut_bias=False)) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList([KDownsample2D()]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, *args, **kwargs) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) output_states = () for resnet in self.resnets: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False) else: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return (hidden_states, output_states) class KCrossAttnDownBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, cross_attention_dim: int, dropout: float=0.0, num_layers: int=4, resnet_group_size: int=32, add_downsample: bool=True, attention_head_dim: int=64, add_self_attention: bool=False, resnet_eps: float=1e-05, resnet_act_fn: str='gelu'): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels groups = in_channels // resnet_group_size groups_out = out_channels // resnet_group_size resnets.append(ResnetBlockCondNorm2D(in_channels=in_channels, out_channels=out_channels, dropout=dropout, temb_channels=temb_channels, groups=groups, groups_out=groups_out, eps=resnet_eps, non_linearity=resnet_act_fn, time_embedding_norm='ada_group', conv_shortcut_bias=False)) attentions.append(KAttentionBlock(out_channels, out_channels // attention_head_dim, attention_head_dim, cross_attention_dim=cross_attention_dim, temb_channels=temb_channels, attention_bias=True, add_self_attention=add_self_attention, cross_attention_norm='layer_norm', group_size=resnet_group_size)) self.resnets = nn.ModuleList(resnets) self.attentions = nn.ModuleList(attentions) if add_downsample: self.downsamplers = nn.ModuleList([KDownsample2D()]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') output_states = () for (resnet, attn) in zip(self.resnets, self.attentions): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, emb=temb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask) else: hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, emb=temb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask) if self.downsamplers is None: output_states += (None,) else: output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return (hidden_states, output_states) class AttnUpBlock2D(nn.Module): def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: int=None, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attention_head_dim: int=1, output_scale_factor: float=1.0, upsample_type: str='conv'): super().__init__() resnets = [] attentions = [] self.upsample_type = upsample_type if attention_head_dim is None: logger.warning(f'It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}.') attention_head_dim = out_channels for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) attentions.append(Attention(out_channels, heads=out_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if upsample_type == 'conv': self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) elif upsample_type == 'resnet': self.upsamplers = nn.ModuleList([ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, up=True)]) else: self.upsamplers = None self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, upsample_size: Optional[int]=None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) for (resnet, attn) in zip(self.resnets, self.attentions): res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states) if self.upsamplers is not None: for upsampler in self.upsamplers: if self.upsample_type == 'resnet': hidden_states = upsampler(hidden_states, temb=temb) else: hidden_states = upsampler(hidden_states) return hidden_states class CrossAttnUpBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, resolution_idx: Optional[int]=None, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: Union[int, Tuple[int]]=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads: int=1, cross_attention_dim: int=1280, output_scale_factor: float=1.0, add_upsample: bool=True, dual_cross_attention: bool=False, use_linear_projection: bool=False, only_cross_attention: bool=False, upcast_attention: bool=False, attention_type: str='default'): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) if not dual_cross_attention: attentions.append(Transformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, attention_type=attention_type)) else: attentions.append(DualTransformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, upsample_size: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') is_freeu_enabled = getattr(self, 's1', None) and getattr(self, 's2', None) and getattr(self, 'b1', None) and getattr(self, 'b2', None) for (resnet, attn) in zip(self.resnets, self.attentions): res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] if is_freeu_enabled: (hidden_states, res_hidden_states) = apply_freeu(self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states class UpBlock2D(nn.Module): def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int]=None, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor: float=1.0, add_upsample: bool=True): super().__init__() resnets = [] for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, upsample_size: Optional[int]=None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) is_freeu_enabled = getattr(self, 's1', None) and getattr(self, 's2', None) and getattr(self, 'b1', None) and getattr(self, 'b2', None) for resnet in self.resnets: res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] if is_freeu_enabled: (hidden_states, res_hidden_states) = apply_freeu(self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False) else: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states class UpDecoderBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, resolution_idx: Optional[int]=None, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor: float=1.0, add_upsample: bool=True, temb_channels: Optional[int]=None): super().__init__() resnets = [] for i in range(num_layers): input_channels = in_channels if i == 0 else out_channels if resnet_time_scale_shift == 'spatial': resnets.append(ResnetBlockCondNorm2D(in_channels=input_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm='spatial', non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor)) else: resnets.append(ResnetBlock2D(in_channels=input_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: for resnet in self.resnets: hidden_states = resnet(hidden_states, temb=temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class AttnUpDecoderBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, resolution_idx: Optional[int]=None, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attention_head_dim: int=1, output_scale_factor: float=1.0, add_upsample: bool=True, temb_channels: Optional[int]=None): super().__init__() resnets = [] attentions = [] if attention_head_dim is None: logger.warning(f'It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `out_channels`: {out_channels}.') attention_head_dim = out_channels for i in range(num_layers): input_channels = in_channels if i == 0 else out_channels if resnet_time_scale_shift == 'spatial': resnets.append(ResnetBlockCondNorm2D(in_channels=input_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm='spatial', non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor)) else: resnets.append(ResnetBlock2D(in_channels=input_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) attentions.append(Attention(out_channels, heads=out_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups if resnet_time_scale_shift != 'spatial' else None, spatial_norm_dim=temb_channels if resnet_time_scale_shift == 'spatial' else None, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: for (resnet, attn) in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb=temb) hidden_states = attn(hidden_states, temb=temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class AttnSkipUpBlock2D(nn.Module): def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int]=None, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_pre_norm: bool=True, attention_head_dim: int=1, output_scale_factor: float=np.sqrt(2.0), add_upsample: bool=True): super().__init__() self.attentions = nn.ModuleList([]) self.resnets = nn.ModuleList([]) for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels self.resnets.append(ResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(resnet_in_channels + res_skip_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) if attention_head_dim is None: logger.warning(f'It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `out_channels`: {out_channels}.') attention_head_dim = out_channels self.attentions.append(Attention(out_channels, heads=out_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=32, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True)) self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels) if add_upsample: self.resnet_up = ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(out_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, use_in_shortcut=True, up=True, kernel='fir') self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) self.skip_norm = torch.nn.GroupNorm(num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True) self.act = nn.SiLU() else: self.resnet_up = None self.skip_conv = None self.skip_norm = None self.act = None self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, skip_sample=None, *args, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) for resnet in self.resnets: res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) hidden_states = self.attentions[0](hidden_states) if skip_sample is not None: skip_sample = self.upsampler(skip_sample) else: skip_sample = 0 if self.resnet_up is not None: skip_sample_states = self.skip_norm(hidden_states) skip_sample_states = self.act(skip_sample_states) skip_sample_states = self.skip_conv(skip_sample_states) skip_sample = skip_sample + skip_sample_states hidden_states = self.resnet_up(hidden_states, temb) return (hidden_states, skip_sample) class SkipUpBlock2D(nn.Module): def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int]=None, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_pre_norm: bool=True, output_scale_factor: float=np.sqrt(2.0), add_upsample: bool=True, upsample_padding: int=1): super().__init__() self.resnets = nn.ModuleList([]) for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels self.resnets.append(ResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min((resnet_in_channels + res_skip_channels) // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels) if add_upsample: self.resnet_up = ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(out_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, use_in_shortcut=True, up=True, kernel='fir') self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) self.skip_norm = torch.nn.GroupNorm(num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True) self.act = nn.SiLU() else: self.resnet_up = None self.skip_conv = None self.skip_norm = None self.act = None self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, skip_sample=None, *args, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) for resnet in self.resnets: res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) if skip_sample is not None: skip_sample = self.upsampler(skip_sample) else: skip_sample = 0 if self.resnet_up is not None: skip_sample_states = self.skip_norm(hidden_states) skip_sample_states = self.act(skip_sample_states) skip_sample_states = self.skip_conv(skip_sample_states) skip_sample = skip_sample + skip_sample_states hidden_states = self.resnet_up(hidden_states, temb) return (hidden_states, skip_sample) class ResnetUpsampleBlock2D(nn.Module): def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int]=None, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor: float=1.0, add_upsample: bool=True, skip_time_act: bool=False): super().__init__() resnets = [] for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act)) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, up=True)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, upsample_size: Optional[int]=None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) for resnet in self.resnets: res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False) else: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, temb) return hidden_states class SimpleCrossAttnUpBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, resolution_idx: Optional[int]=None, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attention_head_dim: int=1, cross_attention_dim: int=1280, output_scale_factor: float=1.0, add_upsample: bool=True, skip_time_act: bool=False, only_cross_attention: bool=False, cross_attention_norm: Optional[str]=None): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.attention_head_dim = attention_head_dim self.num_heads = out_channels // self.attention_head_dim for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act)) processor = AttnAddedKVProcessor2_0() if hasattr(F, 'scaled_dot_product_attention') else AttnAddedKVProcessor() attentions.append(Attention(query_dim=out_channels, cross_attention_dim=out_channels, heads=self.num_heads, dim_head=self.attention_head_dim, added_kv_proj_dim=cross_attention_dim, norm_num_groups=resnet_groups, bias=True, upcast_softmax=True, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm, processor=processor)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, up=True)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, upsample_size: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') if attention_mask is None: mask = None if encoder_hidden_states is None else encoder_attention_mask else: mask = attention_mask for (resnet, attn) in zip(self.resnets, self.attentions): res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs) else: hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, temb) return hidden_states class KUpBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, resolution_idx: int, dropout: float=0.0, num_layers: int=5, resnet_eps: float=1e-05, resnet_act_fn: str='gelu', resnet_group_size: Optional[int]=32, add_upsample: bool=True): super().__init__() resnets = [] k_in_channels = 2 * out_channels k_out_channels = in_channels num_layers = num_layers - 1 for i in range(num_layers): in_channels = k_in_channels if i == 0 else out_channels groups = in_channels // resnet_group_size groups_out = out_channels // resnet_group_size resnets.append(ResnetBlockCondNorm2D(in_channels=in_channels, out_channels=k_out_channels if i == num_layers - 1 else out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=groups, groups_out=groups_out, dropout=dropout, non_linearity=resnet_act_fn, time_embedding_norm='ada_group', conv_shortcut_bias=False)) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([KUpsample2D()]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, upsample_size: Optional[int]=None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) res_hidden_states_tuple = res_hidden_states_tuple[-1] if res_hidden_states_tuple is not None: hidden_states = torch.cat([hidden_states, res_hidden_states_tuple], dim=1) for resnet in self.resnets: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False) else: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class KCrossAttnUpBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, resolution_idx: int, dropout: float=0.0, num_layers: int=4, resnet_eps: float=1e-05, resnet_act_fn: str='gelu', resnet_group_size: int=32, attention_head_dim: int=1, cross_attention_dim: int=768, add_upsample: bool=True, upcast_attention: bool=False): super().__init__() resnets = [] attentions = [] is_first_block = in_channels == out_channels == temb_channels is_middle_block = in_channels != out_channels add_self_attention = True if is_first_block else False self.has_cross_attention = True self.attention_head_dim = attention_head_dim k_in_channels = out_channels if is_first_block else 2 * out_channels k_out_channels = in_channels num_layers = num_layers - 1 for i in range(num_layers): in_channels = k_in_channels if i == 0 else out_channels groups = in_channels // resnet_group_size groups_out = out_channels // resnet_group_size if is_middle_block and i == num_layers - 1: conv_2d_out_channels = k_out_channels else: conv_2d_out_channels = None resnets.append(ResnetBlockCondNorm2D(in_channels=in_channels, out_channels=out_channels, conv_2d_out_channels=conv_2d_out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=groups, groups_out=groups_out, dropout=dropout, non_linearity=resnet_act_fn, time_embedding_norm='ada_group', conv_shortcut_bias=False)) attentions.append(KAttentionBlock(k_out_channels if i == num_layers - 1 else out_channels, k_out_channels // attention_head_dim if i == num_layers - 1 else out_channels // attention_head_dim, attention_head_dim, cross_attention_dim=cross_attention_dim, temb_channels=temb_channels, attention_bias=True, add_self_attention=add_self_attention, cross_attention_norm='layer_norm', upcast_attention=upcast_attention)) self.resnets = nn.ModuleList(resnets) self.attentions = nn.ModuleList(attentions) if add_upsample: self.upsamplers = nn.ModuleList([KUpsample2D()]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, upsample_size: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: res_hidden_states_tuple = res_hidden_states_tuple[-1] if res_hidden_states_tuple is not None: hidden_states = torch.cat([hidden_states, res_hidden_states_tuple], dim=1) for (resnet, attn) in zip(self.resnets, self.attentions): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, emb=temb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask) else: hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, emb=temb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class KAttentionBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout: float=0.0, cross_attention_dim: Optional[int]=None, attention_bias: bool=False, upcast_attention: bool=False, temb_channels: int=768, add_self_attention: bool=False, cross_attention_norm: Optional[str]=None, group_size: int=32): super().__init__() self.add_self_attention = add_self_attention if add_self_attention: self.norm1 = AdaGroupNorm(temb_channels, dim, max(1, dim // group_size)) self.attn1 = Attention(query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=None, cross_attention_norm=None) self.norm2 = AdaGroupNorm(temb_channels, dim, max(1, dim // group_size)) self.attn2 = Attention(query_dim=dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention, cross_attention_norm=cross_attention_norm) def _to_3d(self, hidden_states: torch.Tensor, height: int, weight: int) -> torch.Tensor: return hidden_states.permute(0, 2, 3, 1).reshape(hidden_states.shape[0], height * weight, -1) def _to_4d(self, hidden_states: torch.Tensor, height: int, weight: int) -> torch.Tensor: return hidden_states.permute(0, 2, 1).reshape(hidden_states.shape[0], -1, height, weight) def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor]=None, emb: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') if self.add_self_attention: norm_hidden_states = self.norm1(hidden_states, emb) (height, weight) = norm_hidden_states.shape[2:] norm_hidden_states = self._to_3d(norm_hidden_states, height, weight) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None, attention_mask=attention_mask, **cross_attention_kwargs) attn_output = self._to_4d(attn_output, height, weight) hidden_states = attn_output + hidden_states norm_hidden_states = self.norm2(hidden_states, emb) (height, weight) = norm_hidden_states.shape[2:] norm_hidden_states = self._to_3d(norm_hidden_states, height, weight) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask if encoder_hidden_states is None else encoder_attention_mask, **cross_attention_kwargs) attn_output = self._to_4d(attn_output, height, weight) hidden_states = attn_output + hidden_states return hidden_states # File: diffusers-main/src/diffusers/models/unets/unet_2d_blocks_flax.py import flax.linen as nn import jax.numpy as jnp from ..attention_flax import FlaxTransformer2DModel from ..resnet_flax import FlaxDownsample2D, FlaxResnetBlock2D, FlaxUpsample2D class FlaxCrossAttnDownBlock2D(nn.Module): in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 num_attention_heads: int = 1 add_downsample: bool = True use_linear_projection: bool = False only_cross_attention: bool = False use_memory_efficient_attention: bool = False split_head_dim: bool = False dtype: jnp.dtype = jnp.float32 transformer_layers_per_block: int = 1 def setup(self): resnets = [] attentions = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D(in_channels=in_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype) resnets.append(res_block) attn_block = FlaxTransformer2DModel(in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=self.transformer_layers_per_block, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim, dtype=self.dtype) attentions.append(attn_block) self.resnets = resnets self.attentions = attentions if self.add_downsample: self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True): output_states = () for (resnet, attn) in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb, deterministic=deterministic) hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic) output_states += (hidden_states,) if self.add_downsample: hidden_states = self.downsamplers_0(hidden_states) output_states += (hidden_states,) return (hidden_states, output_states) class FlaxDownBlock2D(nn.Module): in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 add_downsample: bool = True dtype: jnp.dtype = jnp.float32 def setup(self): resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D(in_channels=in_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype) resnets.append(res_block) self.resnets = resnets if self.add_downsample: self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, temb, deterministic=True): output_states = () for resnet in self.resnets: hidden_states = resnet(hidden_states, temb, deterministic=deterministic) output_states += (hidden_states,) if self.add_downsample: hidden_states = self.downsamplers_0(hidden_states) output_states += (hidden_states,) return (hidden_states, output_states) class FlaxCrossAttnUpBlock2D(nn.Module): in_channels: int out_channels: int prev_output_channel: int dropout: float = 0.0 num_layers: int = 1 num_attention_heads: int = 1 add_upsample: bool = True use_linear_projection: bool = False only_cross_attention: bool = False use_memory_efficient_attention: bool = False split_head_dim: bool = False dtype: jnp.dtype = jnp.float32 transformer_layers_per_block: int = 1 def setup(self): resnets = [] attentions = [] for i in range(self.num_layers): res_skip_channels = self.in_channels if i == self.num_layers - 1 else self.out_channels resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels res_block = FlaxResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype) resnets.append(res_block) attn_block = FlaxTransformer2DModel(in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=self.transformer_layers_per_block, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim, dtype=self.dtype) attentions.append(attn_block) self.resnets = resnets self.attentions = attentions if self.add_upsample: self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, res_hidden_states_tuple, temb, encoder_hidden_states, deterministic=True): for (resnet, attn) in zip(self.resnets, self.attentions): res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1) hidden_states = resnet(hidden_states, temb, deterministic=deterministic) hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic) if self.add_upsample: hidden_states = self.upsamplers_0(hidden_states) return hidden_states class FlaxUpBlock2D(nn.Module): in_channels: int out_channels: int prev_output_channel: int dropout: float = 0.0 num_layers: int = 1 add_upsample: bool = True dtype: jnp.dtype = jnp.float32 def setup(self): resnets = [] for i in range(self.num_layers): res_skip_channels = self.in_channels if i == self.num_layers - 1 else self.out_channels resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels res_block = FlaxResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype) resnets.append(res_block) self.resnets = resnets if self.add_upsample: self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, res_hidden_states_tuple, temb, deterministic=True): for resnet in self.resnets: res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1) hidden_states = resnet(hidden_states, temb, deterministic=deterministic) if self.add_upsample: hidden_states = self.upsamplers_0(hidden_states) return hidden_states class FlaxUNetMidBlock2DCrossAttn(nn.Module): in_channels: int dropout: float = 0.0 num_layers: int = 1 num_attention_heads: int = 1 use_linear_projection: bool = False use_memory_efficient_attention: bool = False split_head_dim: bool = False dtype: jnp.dtype = jnp.float32 transformer_layers_per_block: int = 1 def setup(self): resnets = [FlaxResnetBlock2D(in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype)] attentions = [] for _ in range(self.num_layers): attn_block = FlaxTransformer2DModel(in_channels=self.in_channels, n_heads=self.num_attention_heads, d_head=self.in_channels // self.num_attention_heads, depth=self.transformer_layers_per_block, use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim, dtype=self.dtype) attentions.append(attn_block) res_block = FlaxResnetBlock2D(in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype) resnets.append(res_block) self.resnets = resnets self.attentions = attentions def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True): hidden_states = self.resnets[0](hidden_states, temb) for (attn, resnet) in zip(self.attentions, self.resnets[1:]): hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic) hidden_states = resnet(hidden_states, temb, deterministic=deterministic) return hidden_states # File: diffusers-main/src/diffusers/models/unets/unet_2d_condition.py from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin from ...loaders.single_file_model import FromOriginalModelMixin from ...utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers from ..activations import get_activation from ..attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, FusedAttnProcessor2_0 from ..embeddings import GaussianFourierProjection, GLIGENTextBoundingboxProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from .unet_2d_blocks import get_down_block, get_mid_block, get_up_block logger = logging.get_logger(__name__) @dataclass class UNet2DConditionOutput(BaseOutput): sample: torch.Tensor = None class UNet2DConditionModel(ModelMixin, ConfigMixin, FromOriginalModelMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin): _supports_gradient_checkpointing = True _no_split_modules = ['BasicTransformerBlock', 'ResnetBlock2D', 'CrossAttnUpBlock2D'] @register_to_config def __init__(self, sample_size: Optional[int]=None, in_channels: int=4, out_channels: int=4, center_input_sample: bool=False, flip_sin_to_cos: bool=True, freq_shift: int=0, down_block_types: Tuple[str]=('CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'DownBlock2D'), mid_block_type: Optional[str]='UNetMidBlock2DCrossAttn', up_block_types: Tuple[str]=('UpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D'), only_cross_attention: Union[bool, Tuple[bool]]=False, block_out_channels: Tuple[int]=(320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]]=2, downsample_padding: int=1, mid_block_scale_factor: float=1, dropout: float=0.0, act_fn: str='silu', norm_num_groups: Optional[int]=32, norm_eps: float=1e-05, cross_attention_dim: Union[int, Tuple[int]]=1280, transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]]=1, reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]]=None, encoder_hid_dim: Optional[int]=None, encoder_hid_dim_type: Optional[str]=None, attention_head_dim: Union[int, Tuple[int]]=8, num_attention_heads: Optional[Union[int, Tuple[int]]]=None, dual_cross_attention: bool=False, use_linear_projection: bool=False, class_embed_type: Optional[str]=None, addition_embed_type: Optional[str]=None, addition_time_embed_dim: Optional[int]=None, num_class_embeds: Optional[int]=None, upcast_attention: bool=False, resnet_time_scale_shift: str='default', resnet_skip_time_act: bool=False, resnet_out_scale_factor: float=1.0, time_embedding_type: str='positional', time_embedding_dim: Optional[int]=None, time_embedding_act_fn: Optional[str]=None, timestep_post_act: Optional[str]=None, time_cond_proj_dim: Optional[int]=None, conv_in_kernel: int=3, conv_out_kernel: int=3, projection_class_embeddings_input_dim: Optional[int]=None, attention_type: str='default', class_embeddings_concat: bool=False, mid_block_only_cross_attention: Optional[bool]=None, cross_attention_norm: Optional[str]=None, addition_embed_type_num_heads: int=64): super().__init__() self.sample_size = sample_size if num_attention_heads is not None: raise ValueError('At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.') num_attention_heads = num_attention_heads or attention_head_dim self._check_config(down_block_types=down_block_types, up_block_types=up_block_types, only_cross_attention=only_cross_attention, block_out_channels=block_out_channels, layers_per_block=layers_per_block, cross_attention_dim=cross_attention_dim, transformer_layers_per_block=transformer_layers_per_block, reverse_transformer_layers_per_block=reverse_transformer_layers_per_block, attention_head_dim=attention_head_dim, num_attention_heads=num_attention_heads) conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding) (time_embed_dim, timestep_input_dim) = self._set_time_proj(time_embedding_type, block_out_channels=block_out_channels, flip_sin_to_cos=flip_sin_to_cos, freq_shift=freq_shift, time_embedding_dim=time_embedding_dim) self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim) self._set_encoder_hid_proj(encoder_hid_dim_type, cross_attention_dim=cross_attention_dim, encoder_hid_dim=encoder_hid_dim) self._set_class_embedding(class_embed_type, act_fn=act_fn, num_class_embeds=num_class_embeds, projection_class_embeddings_input_dim=projection_class_embeddings_input_dim, time_embed_dim=time_embed_dim, timestep_input_dim=timestep_input_dim) self._set_add_embedding(addition_embed_type, addition_embed_type_num_heads=addition_embed_type_num_heads, addition_time_embed_dim=addition_time_embed_dim, cross_attention_dim=cross_attention_dim, encoder_hid_dim=encoder_hid_dim, flip_sin_to_cos=flip_sin_to_cos, freq_shift=freq_shift, projection_class_embeddings_input_dim=projection_class_embeddings_input_dim, time_embed_dim=time_embed_dim) if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim output_channel = block_out_channels[0] for (i, down_block_type) in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block(down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, dropout=dropout) self.down_blocks.append(down_block) self.mid_block = get_mid_block(mid_block_type, temb_channels=blocks_time_embed_dim, in_channels=block_out_channels[-1], resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, output_scale_factor=mid_block_scale_factor, transformer_layers_per_block=transformer_layers_per_block[-1], num_attention_heads=num_attention_heads[-1], cross_attention_dim=cross_attention_dim[-1], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, mid_block_only_cross_attention=mid_block_only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[-1], dropout=dropout) self.num_upsamplers = 0 reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) if reverse_transformer_layers_per_block is None else reverse_transformer_layers_per_block only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for (i, up_block_type) in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block(up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resolution_idx=i, resnet_groups=norm_num_groups, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, dropout=dropout) self.up_blocks.append(up_block) if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps) self.conv_act = get_activation(act_fn) else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding) self._set_pos_net_if_use_gligen(attention_type=attention_type, cross_attention_dim=cross_attention_dim) def _check_config(self, down_block_types: Tuple[str], up_block_types: Tuple[str], only_cross_attention: Union[bool, Tuple[bool]], block_out_channels: Tuple[int], layers_per_block: Union[int, Tuple[int]], cross_attention_dim: Union[int, Tuple[int]], transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple[int]]], reverse_transformer_layers_per_block: bool, attention_head_dim: int, num_attention_heads: Optional[Union[int, Tuple[int]]]): if len(down_block_types) != len(up_block_types): raise ValueError(f'Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.') if len(block_out_channels) != len(down_block_types): raise ValueError(f'Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.') if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError(f'Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.') if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError(f'Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}.') if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError(f'Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.') if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError(f'Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}.') if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError(f'Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}.') if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None: for layer_number_per_block in transformer_layers_per_block: if isinstance(layer_number_per_block, list): raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.") def _set_time_proj(self, time_embedding_type: str, block_out_channels: int, flip_sin_to_cos: bool, freq_shift: float, time_embedding_dim: int) -> Tuple[int, int]: if time_embedding_type == 'fourier': time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f'`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.') self.time_proj = GaussianFourierProjection(time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos) timestep_input_dim = time_embed_dim elif time_embedding_type == 'positional': time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError(f'{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`.') return (time_embed_dim, timestep_input_dim) def _set_encoder_hid_proj(self, encoder_hid_dim_type: Optional[str], cross_attention_dim: Union[int, Tuple[int]], encoder_hid_dim: Optional[int]): if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = 'text_proj' self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError(f'`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}.') if encoder_hid_dim_type == 'text_proj': self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == 'text_image_proj': self.encoder_hid_proj = TextImageProjection(text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim) elif encoder_hid_dim_type == 'image_proj': self.encoder_hid_proj = ImageProjection(image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim) elif encoder_hid_dim_type is not None: raise ValueError(f"`encoder_hid_dim_type`: {encoder_hid_dim_type} must be None, 'text_proj', 'text_image_proj', or 'image_proj'.") else: self.encoder_hid_proj = None def _set_class_embedding(self, class_embed_type: Optional[str], act_fn: str, num_class_embeds: Optional[int], projection_class_embeddings_input_dim: Optional[int], time_embed_dim: int, timestep_input_dim: int): if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == 'timestep': self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == 'identity': self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == 'projection': if projection_class_embeddings_input_dim is None: raise ValueError("`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set") self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == 'simple_projection': if projection_class_embeddings_input_dim is None: raise ValueError("`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set") self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None def _set_add_embedding(self, addition_embed_type: str, addition_embed_type_num_heads: int, addition_time_embed_dim: Optional[int], flip_sin_to_cos: bool, freq_shift: float, cross_attention_dim: Optional[int], encoder_hid_dim: Optional[int], projection_class_embeddings_input_dim: Optional[int], time_embed_dim: int): if addition_embed_type == 'text': if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding(text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads) elif addition_embed_type == 'text_image': self.add_embedding = TextImageTimeEmbedding(text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == 'text_time': self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == 'image': self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == 'image_hint': self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"`addition_embed_type`: {addition_embed_type} must be None, 'text', 'text_image', 'text_time', 'image', or 'image_hint'.") def _set_pos_net_if_use_gligen(self, attention_type: str, cross_attention_dim: int): if attention_type in ['gated', 'gated-text-image']: positive_len = 768 if isinstance(cross_attention_dim, int): positive_len = cross_attention_dim elif isinstance(cross_attention_dim, (list, tuple)): positive_len = cross_attention_dim[0] feature_type = 'text-only' if attention_type == 'gated' else 'text-image' self.position_net = GLIGENTextBoundingboxProjection(positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) def set_attention_slice(self, slice_size: Union[str, int, List[int]]='auto'): sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, 'set_attention_slice'): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == 'auto': slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == 'max': slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError(f'You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.') for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f'size {size} has to be smaller or equal to {dim}.') def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, 'set_attention_slice'): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): for (i, upsample_block) in enumerate(self.up_blocks): setattr(upsample_block, 's1', s1) setattr(upsample_block, 's2', s2) setattr(upsample_block, 'b1', b1) setattr(upsample_block, 'b2', b2) def disable_freeu(self): freeu_keys = {'s1', 's2', 'b1', 'b2'} for (i, upsample_block) in enumerate(self.up_blocks): for k in freeu_keys: if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: setattr(upsample_block, k, None) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def get_time_embed(self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int]) -> Optional[torch.Tensor]: timesteps = timestep if not torch.is_tensor(timesteps): is_mps = sample.device.type == 'mps' if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) t_emb = t_emb.to(dtype=sample.dtype) return t_emb def get_class_embed(self, sample: torch.Tensor, class_labels: Optional[torch.Tensor]) -> Optional[torch.Tensor]: class_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError('class_labels should be provided when num_class_embeds > 0') if self.config.class_embed_type == 'timestep': class_labels = self.time_proj(class_labels) class_labels = class_labels.to(dtype=sample.dtype) class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) return class_emb def get_aug_embed(self, emb: torch.Tensor, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]) -> Optional[torch.Tensor]: aug_emb = None if self.config.addition_embed_type == 'text': aug_emb = self.add_embedding(encoder_hidden_states) elif self.config.addition_embed_type == 'text_image': if 'image_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`") image_embs = added_cond_kwargs.get('image_embeds') text_embs = added_cond_kwargs.get('text_embeds', encoder_hidden_states) aug_emb = self.add_embedding(text_embs, image_embs) elif self.config.addition_embed_type == 'text_time': if 'text_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`") text_embeds = added_cond_kwargs.get('text_embeds') if 'time_ids' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`") time_ids = added_cond_kwargs.get('time_ids') time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) elif self.config.addition_embed_type == 'image': if 'image_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`") image_embs = added_cond_kwargs.get('image_embeds') aug_emb = self.add_embedding(image_embs) elif self.config.addition_embed_type == 'image_hint': if 'image_embeds' not in added_cond_kwargs or 'hint' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`") image_embs = added_cond_kwargs.get('image_embeds') hint = added_cond_kwargs.get('hint') aug_emb = self.add_embedding(image_embs, hint) return aug_emb def process_encoder_hidden_states(self, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]) -> torch.Tensor: if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == 'text_proj': encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == 'text_image_proj': if 'image_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`") image_embeds = added_cond_kwargs.get('image_embeds') encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == 'image_proj': if 'image_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`") image_embeds = added_cond_kwargs.get('image_embeds') encoder_hidden_states = self.encoder_hid_proj(image_embeds) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == 'ip_image_proj': if 'image_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`") if hasattr(self, 'text_encoder_hid_proj') and self.text_encoder_hid_proj is not None: encoder_hidden_states = self.text_encoder_hid_proj(encoder_hidden_states) image_embeds = added_cond_kwargs.get('image_embeds') image_embeds = self.encoder_hid_proj(image_embeds) encoder_hidden_states = (encoder_hidden_states, image_embeds) return encoder_hidden_states def forward(self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, class_labels: Optional[torch.Tensor]=None, timestep_cond: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]]=None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]]=None, mid_block_additional_residual: Optional[torch.Tensor]=None, down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]]=None, encoder_attention_mask: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[UNet2DConditionOutput, Tuple]: default_overall_up_factor = 2 ** self.num_upsamplers forward_upsample_size = False upsample_size = None for dim in sample.shape[-2:]: if dim % default_overall_up_factor != 0: forward_upsample_size = True break if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) if encoder_attention_mask is not None: encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) if self.config.center_input_sample: sample = 2 * sample - 1.0 t_emb = self.get_time_embed(sample=sample, timestep=timestep) emb = self.time_embedding(t_emb, timestep_cond) class_emb = self.get_class_embed(sample=sample, class_labels=class_labels) if class_emb is not None: if self.config.class_embeddings_concat: emb = torch.cat([emb, class_emb], dim=-1) else: emb = emb + class_emb aug_emb = self.get_aug_embed(emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs) if self.config.addition_embed_type == 'image_hint': (aug_emb, hint) = aug_emb sample = torch.cat([sample, hint], dim=1) emb = emb + aug_emb if aug_emb is not None else emb if self.time_embed_act is not None: emb = self.time_embed_act(emb) encoder_hidden_states = self.process_encoder_hidden_states(encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs) sample = self.conv_in(sample) if cross_attention_kwargs is not None and cross_attention_kwargs.get('gligen', None) is not None: cross_attention_kwargs = cross_attention_kwargs.copy() gligen_args = cross_attention_kwargs.pop('gligen') cross_attention_kwargs['gligen'] = {'objs': self.position_net(**gligen_args)} if cross_attention_kwargs is not None: cross_attention_kwargs = cross_attention_kwargs.copy() lora_scale = cross_attention_kwargs.pop('scale', 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: scale_lora_layers(self, lora_scale) is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None is_adapter = down_intrablock_additional_residuals is not None if not is_adapter and mid_block_additional_residual is None and (down_block_additional_residuals is not None): deprecate('T2I should not use down_block_additional_residuals', '1.3.0', 'Passing intrablock residual connections with `down_block_additional_residuals` is deprecated and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ', standard_warn=False) down_intrablock_additional_residuals = down_block_additional_residuals is_adapter = True down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, 'has_cross_attention') and downsample_block.has_cross_attention: additional_residuals = {} if is_adapter and len(down_intrablock_additional_residuals) > 0: additional_residuals['additional_residuals'] = down_intrablock_additional_residuals.pop(0) (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, **additional_residuals) else: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb) if is_adapter and len(down_intrablock_additional_residuals) > 0: sample += down_intrablock_additional_residuals.pop(0) down_block_res_samples += res_samples if is_controlnet: new_down_block_res_samples = () for (down_block_res_sample, down_block_additional_residual) in zip(down_block_res_samples, down_block_additional_residuals): down_block_res_sample = down_block_res_sample + down_block_additional_residual new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = new_down_block_res_samples if self.mid_block is not None: if hasattr(self.mid_block, 'has_cross_attention') and self.mid_block.has_cross_attention: sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask) else: sample = self.mid_block(sample, emb) if is_adapter and len(down_intrablock_additional_residuals) > 0 and (sample.shape == down_intrablock_additional_residuals[0].shape): sample += down_intrablock_additional_residuals.pop(0) if is_controlnet: sample = sample + mid_block_additional_residual for (i, upsample_block) in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets):] down_block_res_samples = down_block_res_samples[:-len(upsample_block.resnets)] if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, 'has_cross_attention') and upsample_block.has_cross_attention: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, upsample_size=upsample_size, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask) else: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size) if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) if USE_PEFT_BACKEND: unscale_lora_layers(self, lora_scale) if not return_dict: return (sample,) return UNet2DConditionOutput(sample=sample) # File: diffusers-main/src/diffusers/models/unets/unet_2d_condition_flax.py from typing import Dict, Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ...configuration_utils import ConfigMixin, flax_register_to_config from ...utils import BaseOutput from ..embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from ..modeling_flax_utils import FlaxModelMixin from .unet_2d_blocks_flax import FlaxCrossAttnDownBlock2D, FlaxCrossAttnUpBlock2D, FlaxDownBlock2D, FlaxUNetMidBlock2DCrossAttn, FlaxUpBlock2D @flax.struct.dataclass class FlaxUNet2DConditionOutput(BaseOutput): sample: jnp.ndarray @flax_register_to_config class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin): sample_size: int = 32 in_channels: int = 4 out_channels: int = 4 down_block_types: Tuple[str, ...] = ('CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'DownBlock2D') up_block_types: Tuple[str, ...] = ('UpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D') mid_block_type: Optional[str] = 'UNetMidBlock2DCrossAttn' only_cross_attention: Union[bool, Tuple[bool]] = False block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[int, Tuple[int, ...]] = 8 num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 use_memory_efficient_attention: bool = False split_head_dim: bool = False transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1 addition_embed_type: Optional[str] = None addition_time_embed_dim: Optional[int] = None addition_embed_type_num_heads: int = 64 projection_class_embeddings_input_dim: Optional[int] = None def init_weights(self, rng: jax.Array) -> FrozenDict: sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} added_cond_kwargs = None if self.addition_embed_type == 'text_time': is_refiner = 5 * self.config.addition_time_embed_dim + self.config.cross_attention_dim == self.config.projection_class_embeddings_input_dim num_micro_conditions = 5 if is_refiner else 6 text_embeds_dim = self.config.projection_class_embeddings_input_dim - num_micro_conditions * self.config.addition_time_embed_dim time_ids_channels = self.projection_class_embeddings_input_dim - text_embeds_dim time_ids_dims = time_ids_channels // self.addition_time_embed_dim added_cond_kwargs = {'text_embeds': jnp.zeros((1, text_embeds_dim), dtype=jnp.float32), 'time_ids': jnp.zeros((1, time_ids_dims), dtype=jnp.float32)} return self.init(rngs, sample, timesteps, encoder_hidden_states, added_cond_kwargs)['params'] def setup(self) -> None: block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError('At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.') num_attention_heads = self.num_attention_heads or self.attention_head_dim self.conv_in = nn.Conv(block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) self.time_proj = FlaxTimesteps(block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift) self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype) only_cross_attention = self.only_cross_attention if isinstance(only_cross_attention, bool): only_cross_attention = (only_cross_attention,) * len(self.down_block_types) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(self.down_block_types) transformer_layers_per_block = self.transformer_layers_per_block if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(self.down_block_types) if self.addition_embed_type is None: self.add_embedding = None elif self.addition_embed_type == 'text_time': if self.addition_time_embed_dim is None: raise ValueError(f'addition_embed_type {self.addition_embed_type} requires `addition_time_embed_dim` to not be None') self.add_time_proj = FlaxTimesteps(self.addition_time_embed_dim, self.flip_sin_to_cos, self.freq_shift) self.add_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype) else: raise ValueError(f'addition_embed_type: {self.addition_embed_type} must be None or `text_time`.') down_blocks = [] output_channel = block_out_channels[0] for (i, down_block_type) in enumerate(self.down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 if down_block_type == 'CrossAttnDownBlock2D': down_block = FlaxCrossAttnDownBlock2D(in_channels=input_channel, out_channels=output_channel, dropout=self.dropout, num_layers=self.layers_per_block, transformer_layers_per_block=transformer_layers_per_block[i], num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim, dtype=self.dtype) else: down_block = FlaxDownBlock2D(in_channels=input_channel, out_channels=output_channel, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype) down_blocks.append(down_block) self.down_blocks = down_blocks if self.config.mid_block_type == 'UNetMidBlock2DCrossAttn': self.mid_block = FlaxUNetMidBlock2DCrossAttn(in_channels=block_out_channels[-1], dropout=self.dropout, num_attention_heads=num_attention_heads[-1], transformer_layers_per_block=transformer_layers_per_block[-1], use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim, dtype=self.dtype) elif self.config.mid_block_type is None: self.mid_block = None else: raise ValueError(f'Unexpected mid_block_type {self.config.mid_block_type}') up_blocks = [] reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) for (i, up_block_type) in enumerate(self.up_block_types): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] is_final_block = i == len(block_out_channels) - 1 if up_block_type == 'CrossAttnUpBlock2D': up_block = FlaxCrossAttnUpBlock2D(in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, num_layers=self.layers_per_block + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], num_attention_heads=reversed_num_attention_heads[i], add_upsample=not is_final_block, dropout=self.dropout, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim, dtype=self.dtype) else: up_block = FlaxUpBlock2D(in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, num_layers=self.layers_per_block + 1, add_upsample=not is_final_block, dropout=self.dropout, dtype=self.dtype) up_blocks.append(up_block) prev_output_channel = output_channel self.up_blocks = up_blocks self.conv_norm_out = nn.GroupNorm(num_groups=32, epsilon=1e-05) self.conv_out = nn.Conv(self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) def __call__(self, sample: jnp.ndarray, timesteps: Union[jnp.ndarray, float, int], encoder_hidden_states: jnp.ndarray, added_cond_kwargs: Optional[Union[Dict, FrozenDict]]=None, down_block_additional_residuals: Optional[Tuple[jnp.ndarray, ...]]=None, mid_block_additional_residual: Optional[jnp.ndarray]=None, return_dict: bool=True, train: bool=False) -> Union[FlaxUNet2DConditionOutput, Tuple[jnp.ndarray]]: if not isinstance(timesteps, jnp.ndarray): timesteps = jnp.array([timesteps], dtype=jnp.int32) elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0: timesteps = timesteps.astype(dtype=jnp.float32) timesteps = jnp.expand_dims(timesteps, 0) t_emb = self.time_proj(timesteps) t_emb = self.time_embedding(t_emb) aug_emb = None if self.addition_embed_type == 'text_time': if added_cond_kwargs is None: raise ValueError(f'Need to provide argument `added_cond_kwargs` for {self.__class__} when using `addition_embed_type={self.addition_embed_type}`') text_embeds = added_cond_kwargs.get('text_embeds') if text_embeds is None: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`") time_ids = added_cond_kwargs.get('time_ids') if time_ids is None: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`") time_embeds = self.add_time_proj(jnp.ravel(time_ids)) time_embeds = jnp.reshape(time_embeds, (text_embeds.shape[0], -1)) add_embeds = jnp.concatenate([text_embeds, time_embeds], axis=-1) aug_emb = self.add_embedding(add_embeds) t_emb = t_emb + aug_emb if aug_emb is not None else t_emb sample = jnp.transpose(sample, (0, 2, 3, 1)) sample = self.conv_in(sample) down_block_res_samples = (sample,) for down_block in self.down_blocks: if isinstance(down_block, FlaxCrossAttnDownBlock2D): (sample, res_samples) = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train) else: (sample, res_samples) = down_block(sample, t_emb, deterministic=not train) down_block_res_samples += res_samples if down_block_additional_residuals is not None: new_down_block_res_samples = () for (down_block_res_sample, down_block_additional_residual) in zip(down_block_res_samples, down_block_additional_residuals): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) down_block_res_samples = new_down_block_res_samples if self.mid_block is not None: sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train) if mid_block_additional_residual is not None: sample += mid_block_additional_residual for up_block in self.up_blocks: res_samples = down_block_res_samples[-(self.layers_per_block + 1):] down_block_res_samples = down_block_res_samples[:-(self.layers_per_block + 1)] if isinstance(up_block, FlaxCrossAttnUpBlock2D): sample = up_block(sample, temb=t_emb, encoder_hidden_states=encoder_hidden_states, res_hidden_states_tuple=res_samples, deterministic=not train) else: sample = up_block(sample, temb=t_emb, res_hidden_states_tuple=res_samples, deterministic=not train) sample = self.conv_norm_out(sample) sample = nn.silu(sample) sample = self.conv_out(sample) sample = jnp.transpose(sample, (0, 3, 1, 2)) if not return_dict: return (sample,) return FlaxUNet2DConditionOutput(sample=sample) # File: diffusers-main/src/diffusers/models/unets/unet_3d_blocks.py from typing import Any, Dict, Optional, Tuple, Union import torch from torch import nn from ...utils import deprecate, is_torch_version, logging from ...utils.torch_utils import apply_freeu from ..attention import Attention from ..resnet import Downsample2D, ResnetBlock2D, SpatioTemporalResBlock, TemporalConvLayer, Upsample2D from ..transformers.transformer_2d import Transformer2DModel from ..transformers.transformer_temporal import TransformerSpatioTemporalModel, TransformerTemporalModel from .unet_motion_model import CrossAttnDownBlockMotion, CrossAttnUpBlockMotion, DownBlockMotion, UNetMidBlockCrossAttnMotion, UpBlockMotion logger = logging.get_logger(__name__) class DownBlockMotion(DownBlockMotion): def __init__(self, *args, **kwargs): deprecation_message = 'Importing `DownBlockMotion` from `diffusers.models.unets.unet_3d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_motion_model import DownBlockMotion` instead.' deprecate('DownBlockMotion', '1.0.0', deprecation_message) super().__init__(*args, **kwargs) class CrossAttnDownBlockMotion(CrossAttnDownBlockMotion): def __init__(self, *args, **kwargs): deprecation_message = 'Importing `CrossAttnDownBlockMotion` from `diffusers.models.unets.unet_3d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_motion_model import CrossAttnDownBlockMotion` instead.' deprecate('CrossAttnDownBlockMotion', '1.0.0', deprecation_message) super().__init__(*args, **kwargs) class UpBlockMotion(UpBlockMotion): def __init__(self, *args, **kwargs): deprecation_message = 'Importing `UpBlockMotion` from `diffusers.models.unets.unet_3d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_motion_model import UpBlockMotion` instead.' deprecate('UpBlockMotion', '1.0.0', deprecation_message) super().__init__(*args, **kwargs) class CrossAttnUpBlockMotion(CrossAttnUpBlockMotion): def __init__(self, *args, **kwargs): deprecation_message = 'Importing `CrossAttnUpBlockMotion` from `diffusers.models.unets.unet_3d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_motion_model import CrossAttnUpBlockMotion` instead.' deprecate('CrossAttnUpBlockMotion', '1.0.0', deprecation_message) super().__init__(*args, **kwargs) class UNetMidBlockCrossAttnMotion(UNetMidBlockCrossAttnMotion): def __init__(self, *args, **kwargs): deprecation_message = 'Importing `UNetMidBlockCrossAttnMotion` from `diffusers.models.unets.unet_3d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_motion_model import UNetMidBlockCrossAttnMotion` instead.' deprecate('UNetMidBlockCrossAttnMotion', '1.0.0', deprecation_message) super().__init__(*args, **kwargs) def get_down_block(down_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_downsample: bool, resnet_eps: float, resnet_act_fn: str, num_attention_heads: int, resnet_groups: Optional[int]=None, cross_attention_dim: Optional[int]=None, downsample_padding: Optional[int]=None, dual_cross_attention: bool=False, use_linear_projection: bool=True, only_cross_attention: bool=False, upcast_attention: bool=False, resnet_time_scale_shift: str='default', temporal_num_attention_heads: int=8, temporal_max_seq_length: int=32, transformer_layers_per_block: Union[int, Tuple[int]]=1, temporal_transformer_layers_per_block: Union[int, Tuple[int]]=1, dropout: float=0.0) -> Union['DownBlock3D', 'CrossAttnDownBlock3D', 'DownBlockSpatioTemporal', 'CrossAttnDownBlockSpatioTemporal']: if down_block_type == 'DownBlock3D': return DownBlock3D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, dropout=dropout) elif down_block_type == 'CrossAttnDownBlock3D': if cross_attention_dim is None: raise ValueError('cross_attention_dim must be specified for CrossAttnDownBlock3D') return CrossAttnDownBlock3D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, dropout=dropout) elif down_block_type == 'DownBlockSpatioTemporal': return DownBlockSpatioTemporal(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample) elif down_block_type == 'CrossAttnDownBlockSpatioTemporal': if cross_attention_dim is None: raise ValueError('cross_attention_dim must be specified for CrossAttnDownBlockSpatioTemporal') return CrossAttnDownBlockSpatioTemporal(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, add_downsample=add_downsample, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads) raise ValueError(f'{down_block_type} does not exist.') def get_up_block(up_block_type: str, num_layers: int, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, add_upsample: bool, resnet_eps: float, resnet_act_fn: str, num_attention_heads: int, resolution_idx: Optional[int]=None, resnet_groups: Optional[int]=None, cross_attention_dim: Optional[int]=None, dual_cross_attention: bool=False, use_linear_projection: bool=True, only_cross_attention: bool=False, upcast_attention: bool=False, resnet_time_scale_shift: str='default', temporal_num_attention_heads: int=8, temporal_cross_attention_dim: Optional[int]=None, temporal_max_seq_length: int=32, transformer_layers_per_block: Union[int, Tuple[int]]=1, temporal_transformer_layers_per_block: Union[int, Tuple[int]]=1, dropout: float=0.0) -> Union['UpBlock3D', 'CrossAttnUpBlock3D', 'UpBlockSpatioTemporal', 'CrossAttnUpBlockSpatioTemporal']: if up_block_type == 'UpBlock3D': return UpBlock3D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, dropout=dropout) elif up_block_type == 'CrossAttnUpBlock3D': if cross_attention_dim is None: raise ValueError('cross_attention_dim must be specified for CrossAttnUpBlock3D') return CrossAttnUpBlock3D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, dropout=dropout) elif up_block_type == 'UpBlockSpatioTemporal': return UpBlockSpatioTemporal(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, add_upsample=add_upsample) elif up_block_type == 'CrossAttnUpBlockSpatioTemporal': if cross_attention_dim is None: raise ValueError('cross_attention_dim must be specified for CrossAttnUpBlockSpatioTemporal') return CrossAttnUpBlockSpatioTemporal(in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, add_upsample=add_upsample, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, resolution_idx=resolution_idx) raise ValueError(f'{up_block_type} does not exist.') class UNetMidBlock3DCrossAttn(nn.Module): def __init__(self, in_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads: int=1, output_scale_factor: float=1.0, cross_attention_dim: int=1280, dual_cross_attention: bool=False, use_linear_projection: bool=True, upcast_attention: bool=False): super().__init__() self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) resnets = [ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)] temp_convs = [TemporalConvLayer(in_channels, in_channels, dropout=0.1, norm_num_groups=resnet_groups)] attentions = [] temp_attentions = [] for _ in range(num_layers): attentions.append(Transformer2DModel(in_channels // num_attention_heads, num_attention_heads, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention)) temp_attentions.append(TransformerTemporalModel(in_channels // num_attention_heads, num_attention_heads, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups)) resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) temp_convs.append(TemporalConvLayer(in_channels, in_channels, dropout=0.1, norm_num_groups=resnet_groups)) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, num_frames: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None) -> torch.Tensor: hidden_states = self.resnets[0](hidden_states, temb) hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames) for (attn, temp_attn, resnet, temp_conv) in zip(self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:]): hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] hidden_states = temp_attn(hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) return hidden_states class CrossAttnDownBlock3D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads: int=1, cross_attention_dim: int=1280, output_scale_factor: float=1.0, downsample_padding: int=1, add_downsample: bool=True, dual_cross_attention: bool=False, use_linear_projection: bool=False, only_cross_attention: bool=False, upcast_attention: bool=False): super().__init__() resnets = [] attentions = [] temp_attentions = [] temp_convs = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) temp_convs.append(TemporalConvLayer(out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups)) attentions.append(Transformer2DModel(out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention)) temp_attentions.append(TransformerTemporalModel(out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups)) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions) if add_downsample: self.downsamplers = nn.ModuleList([Downsample2D(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, num_frames: int=1, cross_attention_kwargs: Dict[str, Any]=None) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]: output_states = () for (resnet, temp_conv, attn, temp_attn) in zip(self.resnets, self.temp_convs, self.attentions, self.temp_attentions): hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] hidden_states = temp_attn(hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return (hidden_states, output_states) class DownBlock3D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor: float=1.0, add_downsample: bool=True, downsample_padding: int=1): super().__init__() resnets = [] temp_convs = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) temp_convs.append(TemporalConvLayer(out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups)) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) if add_downsample: self.downsamplers = nn.ModuleList([Downsample2D(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, num_frames: int=1) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]: output_states = () for (resnet, temp_conv) in zip(self.resnets, self.temp_convs): hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return (hidden_states, output_states) class CrossAttnUpBlock3D(nn.Module): def __init__(self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads: int=1, cross_attention_dim: int=1280, output_scale_factor: float=1.0, add_upsample: bool=True, dual_cross_attention: bool=False, use_linear_projection: bool=False, only_cross_attention: bool=False, upcast_attention: bool=False, resolution_idx: Optional[int]=None): super().__init__() resnets = [] temp_convs = [] attentions = [] temp_attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) temp_convs.append(TemporalConvLayer(out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups)) attentions.append(Transformer2DModel(out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention)) temp_attentions.append(TransformerTemporalModel(out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups)) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, upsample_size: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, num_frames: int=1, cross_attention_kwargs: Dict[str, Any]=None) -> torch.Tensor: is_freeu_enabled = getattr(self, 's1', None) and getattr(self, 's2', None) and getattr(self, 'b1', None) and getattr(self, 'b2', None) for (resnet, temp_conv, attn, temp_attn) in zip(self.resnets, self.temp_convs, self.attentions, self.temp_attentions): res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] if is_freeu_enabled: (hidden_states, res_hidden_states) = apply_freeu(self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] hidden_states = temp_attn(hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states class UpBlock3D(nn.Module): def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor: float=1.0, add_upsample: bool=True, resolution_idx: Optional[int]=None): super().__init__() resnets = [] temp_convs = [] for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) temp_convs.append(TemporalConvLayer(out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups)) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, upsample_size: Optional[int]=None, num_frames: int=1) -> torch.Tensor: is_freeu_enabled = getattr(self, 's1', None) and getattr(self, 's2', None) and getattr(self, 'b1', None) and getattr(self, 'b2', None) for (resnet, temp_conv) in zip(self.resnets, self.temp_convs): res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] if is_freeu_enabled: (hidden_states, res_hidden_states) = apply_freeu(self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states class MidBlockTemporalDecoder(nn.Module): def __init__(self, in_channels: int, out_channels: int, attention_head_dim: int=512, num_layers: int=1, upcast_attention: bool=False): super().__init__() resnets = [] attentions = [] for i in range(num_layers): input_channels = in_channels if i == 0 else out_channels resnets.append(SpatioTemporalResBlock(in_channels=input_channels, out_channels=out_channels, temb_channels=None, eps=1e-06, temporal_eps=1e-05, merge_factor=0.0, merge_strategy='learned', switch_spatial_to_temporal_mix=True)) attentions.append(Attention(query_dim=in_channels, heads=in_channels // attention_head_dim, dim_head=attention_head_dim, eps=1e-06, upcast_attention=upcast_attention, norm_num_groups=32, bias=True, residual_connection=True)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, image_only_indicator: torch.Tensor): hidden_states = self.resnets[0](hidden_states, image_only_indicator=image_only_indicator) for (resnet, attn) in zip(self.resnets[1:], self.attentions): hidden_states = attn(hidden_states) hidden_states = resnet(hidden_states, image_only_indicator=image_only_indicator) return hidden_states class UpBlockTemporalDecoder(nn.Module): def __init__(self, in_channels: int, out_channels: int, num_layers: int=1, add_upsample: bool=True): super().__init__() resnets = [] for i in range(num_layers): input_channels = in_channels if i == 0 else out_channels resnets.append(SpatioTemporalResBlock(in_channels=input_channels, out_channels=out_channels, temb_channels=None, eps=1e-06, temporal_eps=1e-05, merge_factor=0.0, merge_strategy='learned', switch_spatial_to_temporal_mix=True)) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None def forward(self, hidden_states: torch.Tensor, image_only_indicator: torch.Tensor) -> torch.Tensor: for resnet in self.resnets: hidden_states = resnet(hidden_states, image_only_indicator=image_only_indicator) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class UNetMidBlockSpatioTemporal(nn.Module): def __init__(self, in_channels: int, temb_channels: int, num_layers: int=1, transformer_layers_per_block: Union[int, Tuple[int]]=1, num_attention_heads: int=1, cross_attention_dim: int=1280): super().__init__() self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers resnets = [SpatioTemporalResBlock(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=1e-05)] attentions = [] for i in range(num_layers): attentions.append(TransformerSpatioTemporalModel(num_attention_heads, in_channels // num_attention_heads, in_channels=in_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim)) resnets.append(SpatioTemporalResBlock(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=1e-05)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, image_only_indicator: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.resnets[0](hidden_states, temb, image_only_indicator=image_only_indicator) for (attn, resnet) in zip(self.attentions, self.resnets[1:]): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, return_dict=False)[0] hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, image_only_indicator, **ckpt_kwargs) else: hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, return_dict=False)[0] hidden_states = resnet(hidden_states, temb, image_only_indicator=image_only_indicator) return hidden_states class DownBlockSpatioTemporal(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, num_layers: int=1, add_downsample: bool=True): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(SpatioTemporalResBlock(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=1e-05)) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList([Downsample2D(out_channels, use_conv=True, out_channels=out_channels, name='op')]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, image_only_indicator: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: output_states = () for resnet in self.resnets: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, image_only_indicator, use_reentrant=False) else: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, image_only_indicator) else: hidden_states = resnet(hidden_states, temb, image_only_indicator=image_only_indicator) output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return (hidden_states, output_states) class CrossAttnDownBlockSpatioTemporal(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, num_layers: int=1, transformer_layers_per_block: Union[int, Tuple[int]]=1, num_attention_heads: int=1, cross_attention_dim: int=1280, add_downsample: bool=True): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(SpatioTemporalResBlock(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=1e-06)) attentions.append(TransformerSpatioTemporalModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList([Downsample2D(out_channels, use_conv=True, out_channels=out_channels, padding=1, name='op')]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, image_only_indicator: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: output_states = () blocks = list(zip(self.resnets, self.attentions)) for (resnet, attn) in blocks: if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, image_only_indicator, **ckpt_kwargs) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, return_dict=False)[0] else: hidden_states = resnet(hidden_states, temb, image_only_indicator=image_only_indicator) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, return_dict=False)[0] output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return (hidden_states, output_states) class UpBlockSpatioTemporal(nn.Module): def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int]=None, num_layers: int=1, resnet_eps: float=1e-06, add_upsample: bool=True): super().__init__() resnets = [] for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(SpatioTemporalResBlock(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps)) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, image_only_indicator: Optional[torch.Tensor]=None) -> torch.Tensor: for resnet in self.resnets: res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, image_only_indicator, use_reentrant=False) else: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, image_only_indicator) else: hidden_states = resnet(hidden_states, temb, image_only_indicator=image_only_indicator) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class CrossAttnUpBlockSpatioTemporal(nn.Module): def __init__(self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, resolution_idx: Optional[int]=None, num_layers: int=1, transformer_layers_per_block: Union[int, Tuple[int]]=1, resnet_eps: float=1e-06, num_attention_heads: int=1, cross_attention_dim: int=1280, add_upsample: bool=True): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(SpatioTemporalResBlock(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps)) attentions.append(TransformerSpatioTemporalModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, image_only_indicator: Optional[torch.Tensor]=None) -> torch.Tensor: for (resnet, attn) in zip(self.resnets, self.attentions): res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, image_only_indicator, **ckpt_kwargs) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, return_dict=False)[0] else: hidden_states = resnet(hidden_states, temb, image_only_indicator=image_only_indicator) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator, return_dict=False)[0] if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states # File: diffusers-main/src/diffusers/models/unets/unet_3d_condition.py from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import UNet2DConditionLoadersMixin from ...utils import BaseOutput, logging from ..activations import get_activation from ..attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, FusedAttnProcessor2_0 from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from ..transformers.transformer_temporal import TransformerTemporalModel from .unet_3d_blocks import CrossAttnDownBlock3D, CrossAttnUpBlock3D, DownBlock3D, UNetMidBlock3DCrossAttn, UpBlock3D, get_down_block, get_up_block logger = logging.get_logger(__name__) @dataclass class UNet3DConditionOutput(BaseOutput): sample: torch.Tensor class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): _supports_gradient_checkpointing = False @register_to_config def __init__(self, sample_size: Optional[int]=None, in_channels: int=4, out_channels: int=4, down_block_types: Tuple[str, ...]=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'), up_block_types: Tuple[str, ...]=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'), block_out_channels: Tuple[int, ...]=(320, 640, 1280, 1280), layers_per_block: int=2, downsample_padding: int=1, mid_block_scale_factor: float=1, act_fn: str='silu', norm_num_groups: Optional[int]=32, norm_eps: float=1e-05, cross_attention_dim: int=1024, attention_head_dim: Union[int, Tuple[int]]=64, num_attention_heads: Optional[Union[int, Tuple[int]]]=None, time_cond_proj_dim: Optional[int]=None): super().__init__() self.sample_size = sample_size if num_attention_heads is not None: raise NotImplementedError('At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.') num_attention_heads = num_attention_heads or attention_head_dim if len(down_block_types) != len(up_block_types): raise ValueError(f'Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.') if len(block_out_channels) != len(down_block_types): raise ValueError(f'Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.') if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError(f'Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}.') conv_in_kernel = 3 conv_out_kernel = 3 conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding) time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], True, 0) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn, cond_proj_dim=time_cond_proj_dim) self.transformer_in = TransformerTemporalModel(num_attention_heads=8, attention_head_dim=attention_head_dim, in_channels=block_out_channels[0], num_layers=1, norm_num_groups=norm_num_groups) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) output_channel = block_out_channels[0] for (i, down_block_type) in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block(down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=False) self.down_blocks.append(down_block) self.mid_block = UNetMidBlock3DCrossAttn(in_channels=block_out_channels[-1], temb_channels=time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=False) self.num_upsamplers = 0 reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) output_channel = reversed_block_out_channels[0] for (i, up_block_type) in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block(up_block_type, num_layers=layers_per_block + 1, in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=False, resolution_idx=i) self.up_blocks.append(up_block) prev_output_channel = output_channel if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps) self.conv_act = get_activation('silu') else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None: sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, 'set_attention_slice'): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == 'auto': slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == 'max': slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError(f'You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.') for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f'size {size} has to be smaller or equal to {dim}.') def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, 'set_attention_slice'): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def enable_forward_chunking(self, chunk_size: Optional[int]=None, dim: int=0) -> None: if dim not in [0, 1]: raise ValueError(f'Make sure to set `dim` to either 0 or 1, not {dim}') chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, 'set_chunk_feed_forward'): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) def disable_forward_chunking(self): def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, 'set_chunk_feed_forward'): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, None, 0) def set_default_attn_processor(self): if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) def _set_gradient_checkpointing(self, module, value: bool=False) -> None: if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): module.gradient_checkpointing = value def enable_freeu(self, s1, s2, b1, b2): for (i, upsample_block) in enumerate(self.up_blocks): setattr(upsample_block, 's1', s1) setattr(upsample_block, 's2', s2) setattr(upsample_block, 'b1', b1) setattr(upsample_block, 'b2', b2) def disable_freeu(self): freeu_keys = {'s1', 's2', 'b1', 'b2'} for (i, upsample_block) in enumerate(self.up_blocks): for k in freeu_keys: if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: setattr(upsample_block, k, None) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def forward(self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, class_labels: Optional[torch.Tensor]=None, timestep_cond: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]]=None, mid_block_additional_residual: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[UNet3DConditionOutput, Tuple[torch.Tensor]]: default_overall_up_factor = 2 ** self.num_upsamplers forward_upsample_size = False upsample_size = None if any((s % default_overall_up_factor != 0 for s in sample.shape[-2:])): logger.info('Forward upsample size to force interpolation output size.') forward_upsample_size = True if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) timesteps = timestep if not torch.is_tensor(timesteps): is_mps = sample.device.type == 'mps' if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) num_frames = sample.shape[2] timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) t_emb = t_emb.to(dtype=self.dtype) emb = self.time_embedding(t_emb, timestep_cond) emb = emb.repeat_interleave(repeats=num_frames, dim=0) encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0) sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:]) sample = self.conv_in(sample) sample = self.transformer_in(sample, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, 'has_cross_attention') and downsample_block.has_cross_attention: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs) else: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames) down_block_res_samples += res_samples if down_block_additional_residuals is not None: new_down_block_res_samples = () for (down_block_res_sample, down_block_additional_residual) in zip(down_block_res_samples, down_block_additional_residuals): down_block_res_sample = down_block_res_sample + down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) down_block_res_samples = new_down_block_res_samples if self.mid_block is not None: sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs) if mid_block_additional_residual is not None: sample = sample + mid_block_additional_residual for (i, upsample_block) in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets):] down_block_res_samples = down_block_res_samples[:-len(upsample_block.resnets)] if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, 'has_cross_attention') and upsample_block.has_cross_attention: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, upsample_size=upsample_size, attention_mask=attention_mask, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs) else: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, num_frames=num_frames) if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4) if not return_dict: return (sample,) return UNet3DConditionOutput(sample=sample) # File: diffusers-main/src/diffusers/models/unets/unet_i2vgen_xl.py from typing import Any, Dict, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import UNet2DConditionLoadersMixin from ...utils import logging from ..activations import get_activation from ..attention import Attention, FeedForward from ..attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, FusedAttnProcessor2_0 from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from ..transformers.transformer_temporal import TransformerTemporalModel from .unet_3d_blocks import CrossAttnDownBlock3D, CrossAttnUpBlock3D, DownBlock3D, UNetMidBlock3DCrossAttn, UpBlock3D, get_down_block, get_up_block from .unet_3d_condition import UNet3DConditionOutput logger = logging.get_logger(__name__) class I2VGenXLTransformerTemporalEncoder(nn.Module): def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, activation_fn: str='geglu', upcast_attention: bool=False, ff_inner_dim: Optional[int]=None, dropout: int=0.0): super().__init__() self.norm1 = nn.LayerNorm(dim, elementwise_affine=True, eps=1e-05) self.attn1 = Attention(query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=False, upcast_attention=upcast_attention, out_bias=True) self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=False, inner_dim=ff_inner_dim, bias=True) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) hidden_states = attn_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) ff_output = self.ff(hidden_states) hidden_states = ff_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) return hidden_states class I2VGenXLUNet(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): _supports_gradient_checkpointing = False @register_to_config def __init__(self, sample_size: Optional[int]=None, in_channels: int=4, out_channels: int=4, down_block_types: Tuple[str, ...]=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'), up_block_types: Tuple[str, ...]=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'), block_out_channels: Tuple[int, ...]=(320, 640, 1280, 1280), layers_per_block: int=2, norm_num_groups: Optional[int]=32, cross_attention_dim: int=1024, attention_head_dim: Union[int, Tuple[int]]=64, num_attention_heads: Optional[Union[int, Tuple[int]]]=None): super().__init__() num_attention_heads = attention_head_dim if len(down_block_types) != len(up_block_types): raise ValueError(f'Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.') if len(block_out_channels) != len(down_block_types): raise ValueError(f'Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.') if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError(f'Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}.') self.conv_in = nn.Conv2d(in_channels + in_channels, block_out_channels[0], kernel_size=3, padding=1) self.transformer_in = TransformerTemporalModel(num_attention_heads=8, attention_head_dim=num_attention_heads, in_channels=block_out_channels[0], num_layers=1, norm_num_groups=norm_num_groups) self.image_latents_proj_in = nn.Sequential(nn.Conv2d(4, in_channels * 4, 3, padding=1), nn.SiLU(), nn.Conv2d(in_channels * 4, in_channels * 4, 3, stride=1, padding=1), nn.SiLU(), nn.Conv2d(in_channels * 4, in_channels, 3, stride=1, padding=1)) self.image_latents_temporal_encoder = I2VGenXLTransformerTemporalEncoder(dim=in_channels, num_attention_heads=2, ff_inner_dim=in_channels * 4, attention_head_dim=in_channels, activation_fn='gelu') self.image_latents_context_embedding = nn.Sequential(nn.Conv2d(4, in_channels * 8, 3, padding=1), nn.SiLU(), nn.AdaptiveAvgPool2d((32, 32)), nn.Conv2d(in_channels * 8, in_channels * 16, 3, stride=2, padding=1), nn.SiLU(), nn.Conv2d(in_channels * 16, cross_attention_dim, 3, stride=2, padding=1)) time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], True, 0) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn='silu') self.context_embedding = nn.Sequential(nn.Linear(cross_attention_dim, time_embed_dim), nn.SiLU(), nn.Linear(time_embed_dim, cross_attention_dim * in_channels)) self.fps_embedding = nn.Sequential(nn.Linear(timestep_input_dim, time_embed_dim), nn.SiLU(), nn.Linear(time_embed_dim, time_embed_dim)) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) output_channel = block_out_channels[0] for (i, down_block_type) in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block(down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=not is_final_block, resnet_eps=1e-05, resnet_act_fn='silu', resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[i], downsample_padding=1, dual_cross_attention=False) self.down_blocks.append(down_block) self.mid_block = UNetMidBlock3DCrossAttn(in_channels=block_out_channels[-1], temb_channels=time_embed_dim, resnet_eps=1e-05, resnet_act_fn='silu', output_scale_factor=1, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=False) self.num_upsamplers = 0 reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) output_channel = reversed_block_out_channels[0] for (i, up_block_type) in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block(up_block_type, num_layers=layers_per_block + 1, in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, add_upsample=add_upsample, resnet_eps=1e-05, resnet_act_fn='silu', resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=False, resolution_idx=i) self.up_blocks.append(up_block) prev_output_channel = output_channel self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-05) self.conv_act = get_activation('silu') self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def enable_forward_chunking(self, chunk_size: Optional[int]=None, dim: int=0) -> None: if dim not in [0, 1]: raise ValueError(f'Make sure to set `dim` to either 0 or 1, not {dim}') chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, 'set_chunk_feed_forward'): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) def disable_forward_chunking(self): def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, 'set_chunk_feed_forward'): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, None, 0) def set_default_attn_processor(self): if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) def _set_gradient_checkpointing(self, module, value: bool=False) -> None: if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): module.gradient_checkpointing = value def enable_freeu(self, s1, s2, b1, b2): for (i, upsample_block) in enumerate(self.up_blocks): setattr(upsample_block, 's1', s1) setattr(upsample_block, 's2', s2) setattr(upsample_block, 'b1', b1) setattr(upsample_block, 'b2', b2) def disable_freeu(self): freeu_keys = {'s1', 's2', 'b1', 'b2'} for (i, upsample_block) in enumerate(self.up_blocks): for k in freeu_keys: if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: setattr(upsample_block, k, None) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def forward(self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], fps: torch.Tensor, image_latents: torch.Tensor, image_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, timestep_cond: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, return_dict: bool=True) -> Union[UNet3DConditionOutput, Tuple[torch.Tensor]]: (batch_size, channels, num_frames, height, width) = sample.shape default_overall_up_factor = 2 ** self.num_upsamplers forward_upsample_size = False upsample_size = None if any((s % default_overall_up_factor != 0 for s in sample.shape[-2:])): logger.info('Forward upsample size to force interpolation output size.') forward_upsample_size = True timesteps = timestep if not torch.is_tensor(timesteps): is_mps = sample.device.type == 'mps' if isinstance(timesteps, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) t_emb = t_emb.to(dtype=self.dtype) t_emb = self.time_embedding(t_emb, timestep_cond) fps = fps.expand(fps.shape[0]) fps_emb = self.fps_embedding(self.time_proj(fps).to(dtype=self.dtype)) emb = t_emb + fps_emb emb = emb.repeat_interleave(repeats=num_frames, dim=0) context_emb = sample.new_zeros(batch_size, 0, self.config.cross_attention_dim) context_emb = torch.cat([context_emb, encoder_hidden_states], dim=1) image_latents_for_context_embds = image_latents[:, :, :1, :] image_latents_context_embs = image_latents_for_context_embds.permute(0, 2, 1, 3, 4).reshape(image_latents_for_context_embds.shape[0] * image_latents_for_context_embds.shape[2], image_latents_for_context_embds.shape[1], image_latents_for_context_embds.shape[3], image_latents_for_context_embds.shape[4]) image_latents_context_embs = self.image_latents_context_embedding(image_latents_context_embs) (_batch_size, _channels, _height, _width) = image_latents_context_embs.shape image_latents_context_embs = image_latents_context_embs.permute(0, 2, 3, 1).reshape(_batch_size, _height * _width, _channels) context_emb = torch.cat([context_emb, image_latents_context_embs], dim=1) image_emb = self.context_embedding(image_embeddings) image_emb = image_emb.view(-1, self.config.in_channels, self.config.cross_attention_dim) context_emb = torch.cat([context_emb, image_emb], dim=1) context_emb = context_emb.repeat_interleave(repeats=num_frames, dim=0) image_latents = image_latents.permute(0, 2, 1, 3, 4).reshape(image_latents.shape[0] * image_latents.shape[2], image_latents.shape[1], image_latents.shape[3], image_latents.shape[4]) image_latents = self.image_latents_proj_in(image_latents) image_latents = image_latents[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 3, 4, 1, 2).reshape(batch_size * height * width, num_frames, channels) image_latents = self.image_latents_temporal_encoder(image_latents) image_latents = image_latents.reshape(batch_size, height, width, num_frames, channels).permute(0, 4, 3, 1, 2) sample = torch.cat([sample, image_latents], dim=1) sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:]) sample = self.conv_in(sample) sample = self.transformer_in(sample, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, 'has_cross_attention') and downsample_block.has_cross_attention: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=context_emb, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs) else: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames) down_block_res_samples += res_samples if self.mid_block is not None: sample = self.mid_block(sample, emb, encoder_hidden_states=context_emb, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs) for (i, upsample_block) in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets):] down_block_res_samples = down_block_res_samples[:-len(upsample_block.resnets)] if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, 'has_cross_attention') and upsample_block.has_cross_attention: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=context_emb, upsample_size=upsample_size, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs) else: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, num_frames=num_frames) sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4) if not return_dict: return (sample,) return UNet3DConditionOutput(sample=sample) # File: diffusers-main/src/diffusers/models/unets/unet_kandinsky3.py from dataclasses import dataclass from typing import Dict, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput, logging from ..attention_processor import Attention, AttentionProcessor, AttnProcessor from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin logger = logging.get_logger(__name__) @dataclass class Kandinsky3UNetOutput(BaseOutput): sample: torch.Tensor = None class Kandinsky3EncoderProj(nn.Module): def __init__(self, encoder_hid_dim, cross_attention_dim): super().__init__() self.projection_linear = nn.Linear(encoder_hid_dim, cross_attention_dim, bias=False) self.projection_norm = nn.LayerNorm(cross_attention_dim) def forward(self, x): x = self.projection_linear(x) x = self.projection_norm(x) return x class Kandinsky3UNet(ModelMixin, ConfigMixin): @register_to_config def __init__(self, in_channels: int=4, time_embedding_dim: int=1536, groups: int=32, attention_head_dim: int=64, layers_per_block: Union[int, Tuple[int]]=3, block_out_channels: Tuple[int]=(384, 768, 1536, 3072), cross_attention_dim: Union[int, Tuple[int]]=4096, encoder_hid_dim: int=4096): super().__init__() expansion_ratio = 4 compression_ratio = 2 add_cross_attention = (False, True, True, True) add_self_attention = (False, True, True, True) out_channels = in_channels init_channels = block_out_channels[0] // 2 self.time_proj = Timesteps(init_channels, flip_sin_to_cos=False, downscale_freq_shift=1) self.time_embedding = TimestepEmbedding(init_channels, time_embedding_dim) self.add_time_condition = Kandinsky3AttentionPooling(time_embedding_dim, cross_attention_dim, attention_head_dim) self.conv_in = nn.Conv2d(in_channels, init_channels, kernel_size=3, padding=1) self.encoder_hid_proj = Kandinsky3EncoderProj(encoder_hid_dim, cross_attention_dim) hidden_dims = [init_channels] + list(block_out_channels) in_out_dims = list(zip(hidden_dims[:-1], hidden_dims[1:])) text_dims = [cross_attention_dim if is_exist else None for is_exist in add_cross_attention] num_blocks = len(block_out_channels) * [layers_per_block] layer_params = [num_blocks, text_dims, add_self_attention] rev_layer_params = map(reversed, layer_params) cat_dims = [] self.num_levels = len(in_out_dims) self.down_blocks = nn.ModuleList([]) for (level, ((in_dim, out_dim), res_block_num, text_dim, self_attention)) in enumerate(zip(in_out_dims, *layer_params)): down_sample = level != self.num_levels - 1 cat_dims.append(out_dim if level != self.num_levels - 1 else 0) self.down_blocks.append(Kandinsky3DownSampleBlock(in_dim, out_dim, time_embedding_dim, text_dim, res_block_num, groups, attention_head_dim, expansion_ratio, compression_ratio, down_sample, self_attention)) self.up_blocks = nn.ModuleList([]) for (level, ((out_dim, in_dim), res_block_num, text_dim, self_attention)) in enumerate(zip(reversed(in_out_dims), *rev_layer_params)): up_sample = level != 0 self.up_blocks.append(Kandinsky3UpSampleBlock(in_dim, cat_dims.pop(), out_dim, time_embedding_dim, text_dim, res_block_num, groups, attention_head_dim, expansion_ratio, compression_ratio, up_sample, self_attention)) self.conv_norm_out = nn.GroupNorm(groups, init_channels) self.conv_act_out = nn.SiLU() self.conv_out = nn.Conv2d(init_channels, out_channels, kernel_size=3, padding=1) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'set_processor'): processors[f'{name}.processor'] = module.processor for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): self.set_attn_processor(AttnProcessor()) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value def forward(self, sample, timestep, encoder_hidden_states=None, encoder_attention_mask=None, return_dict=True): if encoder_attention_mask is not None: encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) if not torch.is_tensor(timestep): dtype = torch.float32 if isinstance(timestep, float) else torch.int32 timestep = torch.tensor([timestep], dtype=dtype, device=sample.device) elif len(timestep.shape) == 0: timestep = timestep[None].to(sample.device) timestep = timestep.expand(sample.shape[0]) time_embed_input = self.time_proj(timestep).to(sample.dtype) time_embed = self.time_embedding(time_embed_input) encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) if encoder_hidden_states is not None: time_embed = self.add_time_condition(time_embed, encoder_hidden_states, encoder_attention_mask) hidden_states = [] sample = self.conv_in(sample) for (level, down_sample) in enumerate(self.down_blocks): sample = down_sample(sample, time_embed, encoder_hidden_states, encoder_attention_mask) if level != self.num_levels - 1: hidden_states.append(sample) for (level, up_sample) in enumerate(self.up_blocks): if level != 0: sample = torch.cat([sample, hidden_states.pop()], dim=1) sample = up_sample(sample, time_embed, encoder_hidden_states, encoder_attention_mask) sample = self.conv_norm_out(sample) sample = self.conv_act_out(sample) sample = self.conv_out(sample) if not return_dict: return (sample,) return Kandinsky3UNetOutput(sample=sample) class Kandinsky3UpSampleBlock(nn.Module): def __init__(self, in_channels, cat_dim, out_channels, time_embed_dim, context_dim=None, num_blocks=3, groups=32, head_dim=64, expansion_ratio=4, compression_ratio=2, up_sample=True, self_attention=True): super().__init__() up_resolutions = [[None, True if up_sample else None, None, None]] + [[None] * 4] * (num_blocks - 1) hidden_channels = [(in_channels + cat_dim, in_channels)] + [(in_channels, in_channels)] * (num_blocks - 2) + [(in_channels, out_channels)] attentions = [] resnets_in = [] resnets_out = [] self.self_attention = self_attention self.context_dim = context_dim if self_attention: attentions.append(Kandinsky3AttentionBlock(out_channels, time_embed_dim, None, groups, head_dim, expansion_ratio)) else: attentions.append(nn.Identity()) for ((in_channel, out_channel), up_resolution) in zip(hidden_channels, up_resolutions): resnets_in.append(Kandinsky3ResNetBlock(in_channel, in_channel, time_embed_dim, groups, compression_ratio, up_resolution)) if context_dim is not None: attentions.append(Kandinsky3AttentionBlock(in_channel, time_embed_dim, context_dim, groups, head_dim, expansion_ratio)) else: attentions.append(nn.Identity()) resnets_out.append(Kandinsky3ResNetBlock(in_channel, out_channel, time_embed_dim, groups, compression_ratio)) self.attentions = nn.ModuleList(attentions) self.resnets_in = nn.ModuleList(resnets_in) self.resnets_out = nn.ModuleList(resnets_out) def forward(self, x, time_embed, context=None, context_mask=None, image_mask=None): for (attention, resnet_in, resnet_out) in zip(self.attentions[1:], self.resnets_in, self.resnets_out): x = resnet_in(x, time_embed) if self.context_dim is not None: x = attention(x, time_embed, context, context_mask, image_mask) x = resnet_out(x, time_embed) if self.self_attention: x = self.attentions[0](x, time_embed, image_mask=image_mask) return x class Kandinsky3DownSampleBlock(nn.Module): def __init__(self, in_channels, out_channels, time_embed_dim, context_dim=None, num_blocks=3, groups=32, head_dim=64, expansion_ratio=4, compression_ratio=2, down_sample=True, self_attention=True): super().__init__() attentions = [] resnets_in = [] resnets_out = [] self.self_attention = self_attention self.context_dim = context_dim if self_attention: attentions.append(Kandinsky3AttentionBlock(in_channels, time_embed_dim, None, groups, head_dim, expansion_ratio)) else: attentions.append(nn.Identity()) up_resolutions = [[None] * 4] * (num_blocks - 1) + [[None, None, False if down_sample else None, None]] hidden_channels = [(in_channels, out_channels)] + [(out_channels, out_channels)] * (num_blocks - 1) for ((in_channel, out_channel), up_resolution) in zip(hidden_channels, up_resolutions): resnets_in.append(Kandinsky3ResNetBlock(in_channel, out_channel, time_embed_dim, groups, compression_ratio)) if context_dim is not None: attentions.append(Kandinsky3AttentionBlock(out_channel, time_embed_dim, context_dim, groups, head_dim, expansion_ratio)) else: attentions.append(nn.Identity()) resnets_out.append(Kandinsky3ResNetBlock(out_channel, out_channel, time_embed_dim, groups, compression_ratio, up_resolution)) self.attentions = nn.ModuleList(attentions) self.resnets_in = nn.ModuleList(resnets_in) self.resnets_out = nn.ModuleList(resnets_out) def forward(self, x, time_embed, context=None, context_mask=None, image_mask=None): if self.self_attention: x = self.attentions[0](x, time_embed, image_mask=image_mask) for (attention, resnet_in, resnet_out) in zip(self.attentions[1:], self.resnets_in, self.resnets_out): x = resnet_in(x, time_embed) if self.context_dim is not None: x = attention(x, time_embed, context, context_mask, image_mask) x = resnet_out(x, time_embed) return x class Kandinsky3ConditionalGroupNorm(nn.Module): def __init__(self, groups, normalized_shape, context_dim): super().__init__() self.norm = nn.GroupNorm(groups, normalized_shape, affine=False) self.context_mlp = nn.Sequential(nn.SiLU(), nn.Linear(context_dim, 2 * normalized_shape)) self.context_mlp[1].weight.data.zero_() self.context_mlp[1].bias.data.zero_() def forward(self, x, context): context = self.context_mlp(context) for _ in range(len(x.shape[2:])): context = context.unsqueeze(-1) (scale, shift) = context.chunk(2, dim=1) x = self.norm(x) * (scale + 1.0) + shift return x class Kandinsky3Block(nn.Module): def __init__(self, in_channels, out_channels, time_embed_dim, kernel_size=3, norm_groups=32, up_resolution=None): super().__init__() self.group_norm = Kandinsky3ConditionalGroupNorm(norm_groups, in_channels, time_embed_dim) self.activation = nn.SiLU() if up_resolution is not None and up_resolution: self.up_sample = nn.ConvTranspose2d(in_channels, in_channels, kernel_size=2, stride=2) else: self.up_sample = nn.Identity() padding = int(kernel_size > 1) self.projection = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding) if up_resolution is not None and (not up_resolution): self.down_sample = nn.Conv2d(out_channels, out_channels, kernel_size=2, stride=2) else: self.down_sample = nn.Identity() def forward(self, x, time_embed): x = self.group_norm(x, time_embed) x = self.activation(x) x = self.up_sample(x) x = self.projection(x) x = self.down_sample(x) return x class Kandinsky3ResNetBlock(nn.Module): def __init__(self, in_channels, out_channels, time_embed_dim, norm_groups=32, compression_ratio=2, up_resolutions=4 * [None]): super().__init__() kernel_sizes = [1, 3, 3, 1] hidden_channel = max(in_channels, out_channels) // compression_ratio hidden_channels = [(in_channels, hidden_channel)] + [(hidden_channel, hidden_channel)] * 2 + [(hidden_channel, out_channels)] self.resnet_blocks = nn.ModuleList([Kandinsky3Block(in_channel, out_channel, time_embed_dim, kernel_size, norm_groups, up_resolution) for ((in_channel, out_channel), kernel_size, up_resolution) in zip(hidden_channels, kernel_sizes, up_resolutions)]) self.shortcut_up_sample = nn.ConvTranspose2d(in_channels, in_channels, kernel_size=2, stride=2) if True in up_resolutions else nn.Identity() self.shortcut_projection = nn.Conv2d(in_channels, out_channels, kernel_size=1) if in_channels != out_channels else nn.Identity() self.shortcut_down_sample = nn.Conv2d(out_channels, out_channels, kernel_size=2, stride=2) if False in up_resolutions else nn.Identity() def forward(self, x, time_embed): out = x for resnet_block in self.resnet_blocks: out = resnet_block(out, time_embed) x = self.shortcut_up_sample(x) x = self.shortcut_projection(x) x = self.shortcut_down_sample(x) x = x + out return x class Kandinsky3AttentionPooling(nn.Module): def __init__(self, num_channels, context_dim, head_dim=64): super().__init__() self.attention = Attention(context_dim, context_dim, dim_head=head_dim, out_dim=num_channels, out_bias=False) def forward(self, x, context, context_mask=None): context_mask = context_mask.to(dtype=context.dtype) context = self.attention(context.mean(dim=1, keepdim=True), context, context_mask) return x + context.squeeze(1) class Kandinsky3AttentionBlock(nn.Module): def __init__(self, num_channels, time_embed_dim, context_dim=None, norm_groups=32, head_dim=64, expansion_ratio=4): super().__init__() self.in_norm = Kandinsky3ConditionalGroupNorm(norm_groups, num_channels, time_embed_dim) self.attention = Attention(num_channels, context_dim or num_channels, dim_head=head_dim, out_dim=num_channels, out_bias=False) hidden_channels = expansion_ratio * num_channels self.out_norm = Kandinsky3ConditionalGroupNorm(norm_groups, num_channels, time_embed_dim) self.feed_forward = nn.Sequential(nn.Conv2d(num_channels, hidden_channels, kernel_size=1, bias=False), nn.SiLU(), nn.Conv2d(hidden_channels, num_channels, kernel_size=1, bias=False)) def forward(self, x, time_embed, context=None, context_mask=None, image_mask=None): (height, width) = x.shape[-2:] out = self.in_norm(x, time_embed) out = out.reshape(x.shape[0], -1, height * width).permute(0, 2, 1) context = context if context is not None else out if context_mask is not None: context_mask = context_mask.to(dtype=context.dtype) out = self.attention(out, context, context_mask) out = out.permute(0, 2, 1).unsqueeze(-1).reshape(out.shape[0], -1, height, width) x = x + out out = self.out_norm(x, time_embed) out = self.feed_forward(out) x = x + out return x # File: diffusers-main/src/diffusers/models/unets/unet_motion_model.py from dataclasses import dataclass from typing import Any, Dict, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, FrozenDict, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin, UNet2DConditionLoadersMixin from ...utils import BaseOutput, deprecate, is_torch_version, logging from ...utils.torch_utils import apply_freeu from ..attention import BasicTransformerBlock from ..attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, AttnProcessor2_0, FusedAttnProcessor2_0, IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0 from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from ..resnet import Downsample2D, ResnetBlock2D, Upsample2D from ..transformers.dual_transformer_2d import DualTransformer2DModel from ..transformers.transformer_2d import Transformer2DModel from .unet_2d_blocks import UNetMidBlock2DCrossAttn from .unet_2d_condition import UNet2DConditionModel logger = logging.get_logger(__name__) @dataclass class UNetMotionOutput(BaseOutput): sample: torch.Tensor class AnimateDiffTransformer3D(nn.Module): def __init__(self, num_attention_heads: int=16, attention_head_dim: int=88, in_channels: Optional[int]=None, out_channels: Optional[int]=None, num_layers: int=1, dropout: float=0.0, norm_num_groups: int=32, cross_attention_dim: Optional[int]=None, attention_bias: bool=False, sample_size: Optional[int]=None, activation_fn: str='geglu', norm_elementwise_affine: bool=True, double_self_attention: bool=True, positional_embeddings: Optional[str]=None, num_positional_embeddings: Optional[int]=None): super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim self.in_channels = in_channels self.norm = nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-06, affine=True) self.proj_in = nn.Linear(in_channels, inner_dim) self.transformer_blocks = nn.ModuleList([BasicTransformerBlock(inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, attention_bias=attention_bias, double_self_attention=double_self_attention, norm_elementwise_affine=norm_elementwise_affine, positional_embeddings=positional_embeddings, num_positional_embeddings=num_positional_embeddings) for _ in range(num_layers)]) self.proj_out = nn.Linear(inner_dim, in_channels) def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.LongTensor]=None, timestep: Optional[torch.LongTensor]=None, class_labels: Optional[torch.LongTensor]=None, num_frames: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None) -> torch.Tensor: (batch_frames, channel, height, width) = hidden_states.shape batch_size = batch_frames // num_frames residual = hidden_states hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width) hidden_states = hidden_states.permute(0, 2, 1, 3, 4) hidden_states = self.norm(hidden_states) hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel) hidden_states = self.proj_in(input=hidden_states) for block in self.transformer_blocks: hidden_states = block(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels) hidden_states = self.proj_out(input=hidden_states) hidden_states = hidden_states[None, None, :].reshape(batch_size, height, width, num_frames, channel).permute(0, 3, 4, 1, 2).contiguous() hidden_states = hidden_states.reshape(batch_frames, channel, height, width) output = hidden_states + residual return output class DownBlockMotion(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor: float=1.0, add_downsample: bool=True, downsample_padding: int=1, temporal_num_attention_heads: Union[int, Tuple[int]]=1, temporal_cross_attention_dim: Optional[int]=None, temporal_max_seq_length: int=32, temporal_transformer_layers_per_block: Union[int, Tuple[int]]=1, temporal_double_self_attention: bool=True): super().__init__() resnets = [] motion_modules = [] if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = (temporal_transformer_layers_per_block,) * num_layers elif len(temporal_transformer_layers_per_block) != num_layers: raise ValueError(f'`temporal_transformer_layers_per_block` must be an integer or a tuple of integers of length {num_layers}') if isinstance(temporal_num_attention_heads, int): temporal_num_attention_heads = (temporal_num_attention_heads,) * num_layers elif len(temporal_num_attention_heads) != num_layers: raise ValueError(f'`temporal_num_attention_heads` must be an integer or a tuple of integers of length {num_layers}') for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) motion_modules.append(AnimateDiffTransformer3D(num_attention_heads=temporal_num_attention_heads[i], in_channels=out_channels, num_layers=temporal_transformer_layers_per_block[i], norm_num_groups=resnet_groups, cross_attention_dim=temporal_cross_attention_dim, attention_bias=False, activation_fn='geglu', positional_embeddings='sinusoidal', num_positional_embeddings=temporal_max_seq_length, attention_head_dim=out_channels // temporal_num_attention_heads[i], double_self_attention=temporal_double_self_attention)) self.resnets = nn.ModuleList(resnets) self.motion_modules = nn.ModuleList(motion_modules) if add_downsample: self.downsamplers = nn.ModuleList([Downsample2D(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, num_frames: int=1, *args, **kwargs) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) output_states = () blocks = zip(self.resnets, self.motion_modules) for (resnet, motion_module) in blocks: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False) else: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(input_tensor=hidden_states, temb=temb) hidden_states = motion_module(hidden_states, num_frames=num_frames) output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states=hidden_states) output_states = output_states + (hidden_states,) return (hidden_states, output_states) class CrossAttnDownBlockMotion(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: Union[int, Tuple[int]]=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads: int=1, cross_attention_dim: int=1280, output_scale_factor: float=1.0, downsample_padding: int=1, add_downsample: bool=True, dual_cross_attention: bool=False, use_linear_projection: bool=False, only_cross_attention: bool=False, upcast_attention: bool=False, attention_type: str='default', temporal_cross_attention_dim: Optional[int]=None, temporal_num_attention_heads: int=8, temporal_max_seq_length: int=32, temporal_transformer_layers_per_block: Union[int, Tuple[int]]=1, temporal_double_self_attention: bool=True): super().__init__() resnets = [] attentions = [] motion_modules = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = (transformer_layers_per_block,) * num_layers elif len(transformer_layers_per_block) != num_layers: raise ValueError(f'transformer_layers_per_block must be an integer or a list of integers of length {num_layers}') if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = (temporal_transformer_layers_per_block,) * num_layers elif len(temporal_transformer_layers_per_block) != num_layers: raise ValueError(f'temporal_transformer_layers_per_block must be an integer or a list of integers of length {num_layers}') for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) if not dual_cross_attention: attentions.append(Transformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, attention_type=attention_type)) else: attentions.append(DualTransformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups)) motion_modules.append(AnimateDiffTransformer3D(num_attention_heads=temporal_num_attention_heads, in_channels=out_channels, num_layers=temporal_transformer_layers_per_block[i], norm_num_groups=resnet_groups, cross_attention_dim=temporal_cross_attention_dim, attention_bias=False, activation_fn='geglu', positional_embeddings='sinusoidal', num_positional_embeddings=temporal_max_seq_length, attention_head_dim=out_channels // temporal_num_attention_heads, double_self_attention=temporal_double_self_attention)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.motion_modules = nn.ModuleList(motion_modules) if add_downsample: self.downsamplers = nn.ModuleList([Downsample2D(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, num_frames: int=1, encoder_attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, additional_residuals: Optional[torch.Tensor]=None): if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') output_states = () blocks = list(zip(self.resnets, self.attentions, self.motion_modules)) for (i, (resnet, attn, motion_module)) in enumerate(blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs) else: hidden_states = resnet(input_tensor=hidden_states, temb=temb) hidden_states = attn(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] hidden_states = motion_module(hidden_states, num_frames=num_frames) if i == len(blocks) - 1 and additional_residuals is not None: hidden_states = hidden_states + additional_residuals output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states=hidden_states) output_states = output_states + (hidden_states,) return (hidden_states, output_states) class CrossAttnUpBlockMotion(nn.Module): def __init__(self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, resolution_idx: Optional[int]=None, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: Union[int, Tuple[int]]=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads: int=1, cross_attention_dim: int=1280, output_scale_factor: float=1.0, add_upsample: bool=True, dual_cross_attention: bool=False, use_linear_projection: bool=False, only_cross_attention: bool=False, upcast_attention: bool=False, attention_type: str='default', temporal_cross_attention_dim: Optional[int]=None, temporal_num_attention_heads: int=8, temporal_max_seq_length: int=32, temporal_transformer_layers_per_block: Union[int, Tuple[int]]=1): super().__init__() resnets = [] attentions = [] motion_modules = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = (transformer_layers_per_block,) * num_layers elif len(transformer_layers_per_block) != num_layers: raise ValueError(f'transformer_layers_per_block must be an integer or a list of integers of length {num_layers}, got {len(transformer_layers_per_block)}') if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = (temporal_transformer_layers_per_block,) * num_layers elif len(temporal_transformer_layers_per_block) != num_layers: raise ValueError(f'temporal_transformer_layers_per_block must be an integer or a list of integers of length {num_layers}, got {len(temporal_transformer_layers_per_block)}') for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) if not dual_cross_attention: attentions.append(Transformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, attention_type=attention_type)) else: attentions.append(DualTransformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups)) motion_modules.append(AnimateDiffTransformer3D(num_attention_heads=temporal_num_attention_heads, in_channels=out_channels, num_layers=temporal_transformer_layers_per_block[i], norm_num_groups=resnet_groups, cross_attention_dim=temporal_cross_attention_dim, attention_bias=False, activation_fn='geglu', positional_embeddings='sinusoidal', num_positional_embeddings=temporal_max_seq_length, attention_head_dim=out_channels // temporal_num_attention_heads)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.motion_modules = nn.ModuleList(motion_modules) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, upsample_size: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, num_frames: int=1) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') is_freeu_enabled = getattr(self, 's1', None) and getattr(self, 's2', None) and getattr(self, 'b1', None) and getattr(self, 'b2', None) blocks = zip(self.resnets, self.attentions, self.motion_modules) for (resnet, attn, motion_module) in blocks: res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] if is_freeu_enabled: (hidden_states, res_hidden_states) = apply_freeu(self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs) else: hidden_states = resnet(input_tensor=hidden_states, temb=temb) hidden_states = attn(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] hidden_states = motion_module(hidden_states, num_frames=num_frames) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states=hidden_states, output_size=upsample_size) return hidden_states class UpBlockMotion(nn.Module): def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int]=None, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor: float=1.0, add_upsample: bool=True, temporal_cross_attention_dim: Optional[int]=None, temporal_num_attention_heads: int=8, temporal_max_seq_length: int=32, temporal_transformer_layers_per_block: Union[int, Tuple[int]]=1): super().__init__() resnets = [] motion_modules = [] if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = (temporal_transformer_layers_per_block,) * num_layers elif len(temporal_transformer_layers_per_block) != num_layers: raise ValueError(f'temporal_transformer_layers_per_block must be an integer or a list of integers of length {num_layers}') for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) motion_modules.append(AnimateDiffTransformer3D(num_attention_heads=temporal_num_attention_heads, in_channels=out_channels, num_layers=temporal_transformer_layers_per_block[i], norm_num_groups=resnet_groups, cross_attention_dim=temporal_cross_attention_dim, attention_bias=False, activation_fn='geglu', positional_embeddings='sinusoidal', num_positional_embeddings=temporal_max_seq_length, attention_head_dim=out_channels // temporal_num_attention_heads)) self.resnets = nn.ModuleList(resnets) self.motion_modules = nn.ModuleList(motion_modules) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, upsample_size=None, num_frames: int=1, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) is_freeu_enabled = getattr(self, 's1', None) and getattr(self, 's2', None) and getattr(self, 'b1', None) and getattr(self, 'b2', None) blocks = zip(self.resnets, self.motion_modules) for (resnet, motion_module) in blocks: res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] if is_freeu_enabled: (hidden_states, res_hidden_states) = apply_freeu(self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False) else: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(input_tensor=hidden_states, temb=temb) hidden_states = motion_module(hidden_states, num_frames=num_frames) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states=hidden_states, output_size=upsample_size) return hidden_states class UNetMidBlockCrossAttnMotion(nn.Module): def __init__(self, in_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: Union[int, Tuple[int]]=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads: int=1, output_scale_factor: float=1.0, cross_attention_dim: int=1280, dual_cross_attention: bool=False, use_linear_projection: bool=False, upcast_attention: bool=False, attention_type: str='default', temporal_num_attention_heads: int=1, temporal_cross_attention_dim: Optional[int]=None, temporal_max_seq_length: int=32, temporal_transformer_layers_per_block: Union[int, Tuple[int]]=1): super().__init__() self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = (transformer_layers_per_block,) * num_layers elif len(transformer_layers_per_block) != num_layers: raise ValueError(f'`transformer_layers_per_block` should be an integer or a list of integers of length {num_layers}.') if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = (temporal_transformer_layers_per_block,) * num_layers elif len(temporal_transformer_layers_per_block) != num_layers: raise ValueError(f'`temporal_transformer_layers_per_block` should be an integer or a list of integers of length {num_layers}.') resnets = [ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)] attentions = [] motion_modules = [] for i in range(num_layers): if not dual_cross_attention: attentions.append(Transformer2DModel(num_attention_heads, in_channels // num_attention_heads, in_channels=in_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type)) else: attentions.append(DualTransformer2DModel(num_attention_heads, in_channels // num_attention_heads, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups)) resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) motion_modules.append(AnimateDiffTransformer3D(num_attention_heads=temporal_num_attention_heads, attention_head_dim=in_channels // temporal_num_attention_heads, in_channels=in_channels, num_layers=temporal_transformer_layers_per_block[i], norm_num_groups=resnet_groups, cross_attention_dim=temporal_cross_attention_dim, attention_bias=False, positional_embeddings='sinusoidal', num_positional_embeddings=temporal_max_seq_length, activation_fn='geglu')) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.motion_modules = nn.ModuleList(motion_modules) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None, num_frames: int=1) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') hidden_states = self.resnets[0](input_tensor=hidden_states, temb=temb) blocks = zip(self.attentions, self.resnets[1:], self.motion_modules) for (attn, resnet, motion_module) in blocks: hidden_states = attn(hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states, temb, **ckpt_kwargs) hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs) else: hidden_states = motion_module(hidden_states, num_frames=num_frames) hidden_states = resnet(input_tensor=hidden_states, temb=temb) return hidden_states class MotionModules(nn.Module): def __init__(self, in_channels: int, layers_per_block: int=2, transformer_layers_per_block: Union[int, Tuple[int]]=8, num_attention_heads: Union[int, Tuple[int]]=8, attention_bias: bool=False, cross_attention_dim: Optional[int]=None, activation_fn: str='geglu', norm_num_groups: int=32, max_seq_length: int=32): super().__init__() self.motion_modules = nn.ModuleList([]) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = (transformer_layers_per_block,) * layers_per_block elif len(transformer_layers_per_block) != layers_per_block: raise ValueError(f'The number of transformer layers per block must match the number of layers per block, got {layers_per_block} and {len(transformer_layers_per_block)}') for i in range(layers_per_block): self.motion_modules.append(AnimateDiffTransformer3D(in_channels=in_channels, num_layers=transformer_layers_per_block[i], norm_num_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, attention_bias=attention_bias, num_attention_heads=num_attention_heads, attention_head_dim=in_channels // num_attention_heads, positional_embeddings='sinusoidal', num_positional_embeddings=max_seq_length)) class MotionAdapter(ModelMixin, ConfigMixin, FromOriginalModelMixin): @register_to_config def __init__(self, block_out_channels: Tuple[int, ...]=(320, 640, 1280, 1280), motion_layers_per_block: Union[int, Tuple[int]]=2, motion_transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple[int]]]=1, motion_mid_block_layers_per_block: int=1, motion_transformer_layers_per_mid_block: Union[int, Tuple[int]]=1, motion_num_attention_heads: Union[int, Tuple[int]]=8, motion_norm_num_groups: int=32, motion_max_seq_length: int=32, use_motion_mid_block: bool=True, conv_in_channels: Optional[int]=None): super().__init__() down_blocks = [] up_blocks = [] if isinstance(motion_layers_per_block, int): motion_layers_per_block = (motion_layers_per_block,) * len(block_out_channels) elif len(motion_layers_per_block) != len(block_out_channels): raise ValueError(f'The number of motion layers per block must match the number of blocks, got {len(block_out_channels)} and {len(motion_layers_per_block)}') if isinstance(motion_transformer_layers_per_block, int): motion_transformer_layers_per_block = (motion_transformer_layers_per_block,) * len(block_out_channels) if isinstance(motion_transformer_layers_per_mid_block, int): motion_transformer_layers_per_mid_block = (motion_transformer_layers_per_mid_block,) * motion_mid_block_layers_per_block elif len(motion_transformer_layers_per_mid_block) != motion_mid_block_layers_per_block: raise ValueError(f'The number of layers per mid block ({motion_mid_block_layers_per_block}) must match the length of motion_transformer_layers_per_mid_block ({len(motion_transformer_layers_per_mid_block)})') if isinstance(motion_num_attention_heads, int): motion_num_attention_heads = (motion_num_attention_heads,) * len(block_out_channels) elif len(motion_num_attention_heads) != len(block_out_channels): raise ValueError(f'The length of the attention head number tuple in the motion module must match the number of block, got {len(motion_num_attention_heads)} and {len(block_out_channels)}') if conv_in_channels: self.conv_in = nn.Conv2d(conv_in_channels, block_out_channels[0], kernel_size=3, padding=1) else: self.conv_in = None for (i, channel) in enumerate(block_out_channels): output_channel = block_out_channels[i] down_blocks.append(MotionModules(in_channels=output_channel, norm_num_groups=motion_norm_num_groups, cross_attention_dim=None, activation_fn='geglu', attention_bias=False, num_attention_heads=motion_num_attention_heads[i], max_seq_length=motion_max_seq_length, layers_per_block=motion_layers_per_block[i], transformer_layers_per_block=motion_transformer_layers_per_block[i])) if use_motion_mid_block: self.mid_block = MotionModules(in_channels=block_out_channels[-1], norm_num_groups=motion_norm_num_groups, cross_attention_dim=None, activation_fn='geglu', attention_bias=False, num_attention_heads=motion_num_attention_heads[-1], max_seq_length=motion_max_seq_length, layers_per_block=motion_mid_block_layers_per_block, transformer_layers_per_block=motion_transformer_layers_per_mid_block) else: self.mid_block = None reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] reversed_motion_layers_per_block = list(reversed(motion_layers_per_block)) reversed_motion_transformer_layers_per_block = list(reversed(motion_transformer_layers_per_block)) reversed_motion_num_attention_heads = list(reversed(motion_num_attention_heads)) for (i, channel) in enumerate(reversed_block_out_channels): output_channel = reversed_block_out_channels[i] up_blocks.append(MotionModules(in_channels=output_channel, norm_num_groups=motion_norm_num_groups, cross_attention_dim=None, activation_fn='geglu', attention_bias=False, num_attention_heads=reversed_motion_num_attention_heads[i], max_seq_length=motion_max_seq_length, layers_per_block=reversed_motion_layers_per_block[i] + 1, transformer_layers_per_block=reversed_motion_transformer_layers_per_block[i])) self.down_blocks = nn.ModuleList(down_blocks) self.up_blocks = nn.ModuleList(up_blocks) def forward(self, sample): pass class UNetMotionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, sample_size: Optional[int]=None, in_channels: int=4, out_channels: int=4, down_block_types: Tuple[str, ...]=('CrossAttnDownBlockMotion', 'CrossAttnDownBlockMotion', 'CrossAttnDownBlockMotion', 'DownBlockMotion'), up_block_types: Tuple[str, ...]=('UpBlockMotion', 'CrossAttnUpBlockMotion', 'CrossAttnUpBlockMotion', 'CrossAttnUpBlockMotion'), block_out_channels: Tuple[int, ...]=(320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]]=2, downsample_padding: int=1, mid_block_scale_factor: float=1, act_fn: str='silu', norm_num_groups: int=32, norm_eps: float=1e-05, cross_attention_dim: int=1280, transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]]=1, reverse_transformer_layers_per_block: Optional[Union[int, Tuple[int], Tuple[Tuple]]]=None, temporal_transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]]=1, reverse_temporal_transformer_layers_per_block: Optional[Union[int, Tuple[int], Tuple[Tuple]]]=None, transformer_layers_per_mid_block: Optional[Union[int, Tuple[int]]]=None, temporal_transformer_layers_per_mid_block: Optional[Union[int, Tuple[int]]]=1, use_linear_projection: bool=False, num_attention_heads: Union[int, Tuple[int, ...]]=8, motion_max_seq_length: int=32, motion_num_attention_heads: Union[int, Tuple[int, ...]]=8, reverse_motion_num_attention_heads: Optional[Union[int, Tuple[int, ...], Tuple[Tuple[int, ...], ...]]]=None, use_motion_mid_block: bool=True, mid_block_layers: int=1, encoder_hid_dim: Optional[int]=None, encoder_hid_dim_type: Optional[str]=None, addition_embed_type: Optional[str]=None, addition_time_embed_dim: Optional[int]=None, projection_class_embeddings_input_dim: Optional[int]=None, time_cond_proj_dim: Optional[int]=None): super().__init__() self.sample_size = sample_size if len(down_block_types) != len(up_block_types): raise ValueError(f'Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.') if len(block_out_channels) != len(down_block_types): raise ValueError(f'Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.') if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError(f'Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}.') if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError(f'Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}.') if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError(f'Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}.') if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None: for layer_number_per_block in transformer_layers_per_block: if isinstance(layer_number_per_block, list): raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.") if isinstance(temporal_transformer_layers_per_block, list) and reverse_temporal_transformer_layers_per_block is None: for layer_number_per_block in temporal_transformer_layers_per_block: if isinstance(layer_number_per_block, list): raise ValueError("Must provide 'reverse_temporal_transformer_layers_per_block` if using asymmetrical motion module in UNet.") conv_in_kernel = 3 conv_out_kernel = 3 conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding) time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], True, 0) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn, cond_proj_dim=time_cond_proj_dim) if encoder_hid_dim_type is None: self.encoder_hid_proj = None if addition_embed_type == 'text_time': self.add_time_proj = Timesteps(addition_time_embed_dim, True, 0) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if isinstance(reverse_transformer_layers_per_block, int): reverse_transformer_layers_per_block = [reverse_transformer_layers_per_block] * len(down_block_types) if isinstance(temporal_transformer_layers_per_block, int): temporal_transformer_layers_per_block = [temporal_transformer_layers_per_block] * len(down_block_types) if isinstance(reverse_temporal_transformer_layers_per_block, int): reverse_temporal_transformer_layers_per_block = [reverse_temporal_transformer_layers_per_block] * len(down_block_types) if isinstance(motion_num_attention_heads, int): motion_num_attention_heads = (motion_num_attention_heads,) * len(down_block_types) output_channel = block_out_channels[0] for (i, down_block_type) in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 if down_block_type == 'CrossAttnDownBlockMotion': down_block = CrossAttnDownBlockMotion(in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, num_attention_heads=num_attention_heads[i], cross_attention_dim=cross_attention_dim[i], downsample_padding=downsample_padding, add_downsample=not is_final_block, use_linear_projection=use_linear_projection, temporal_num_attention_heads=motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=temporal_transformer_layers_per_block[i]) elif down_block_type == 'DownBlockMotion': down_block = DownBlockMotion(in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, num_layers=layers_per_block[i], resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, add_downsample=not is_final_block, downsample_padding=downsample_padding, temporal_num_attention_heads=motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=temporal_transformer_layers_per_block[i]) else: raise ValueError('Invalid `down_block_type` encountered. Must be one of `CrossAttnDownBlockMotion` or `DownBlockMotion`') self.down_blocks.append(down_block) if transformer_layers_per_mid_block is None: transformer_layers_per_mid_block = transformer_layers_per_block[-1] if isinstance(transformer_layers_per_block[-1], int) else 1 if use_motion_mid_block: self.mid_block = UNetMidBlockCrossAttnMotion(in_channels=block_out_channels[-1], temb_channels=time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=False, use_linear_projection=use_linear_projection, num_layers=mid_block_layers, temporal_num_attention_heads=motion_num_attention_heads[-1], temporal_max_seq_length=motion_max_seq_length, transformer_layers_per_block=transformer_layers_per_mid_block, temporal_transformer_layers_per_block=temporal_transformer_layers_per_mid_block) else: self.mid_block = UNetMidBlock2DCrossAttn(in_channels=block_out_channels[-1], temb_channels=time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=False, use_linear_projection=use_linear_projection, num_layers=mid_block_layers, transformer_layers_per_block=transformer_layers_per_mid_block) self.num_upsamplers = 0 reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_motion_num_attention_heads = list(reversed(motion_num_attention_heads)) if reverse_transformer_layers_per_block is None: reverse_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) if reverse_temporal_transformer_layers_per_block is None: reverse_temporal_transformer_layers_per_block = list(reversed(temporal_transformer_layers_per_block)) output_channel = reversed_block_out_channels[0] for (i, up_block_type) in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False if up_block_type == 'CrossAttnUpBlockMotion': up_block = CrossAttnUpBlockMotion(in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, resolution_idx=i, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reverse_transformer_layers_per_block[i], resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, num_attention_heads=reversed_num_attention_heads[i], cross_attention_dim=reversed_cross_attention_dim[i], add_upsample=add_upsample, use_linear_projection=use_linear_projection, temporal_num_attention_heads=reversed_motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=reverse_temporal_transformer_layers_per_block[i]) elif up_block_type == 'UpBlockMotion': up_block = UpBlockMotion(in_channels=input_channel, prev_output_channel=prev_output_channel, out_channels=output_channel, temb_channels=time_embed_dim, resolution_idx=i, num_layers=reversed_layers_per_block[i] + 1, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, add_upsample=add_upsample, temporal_num_attention_heads=reversed_motion_num_attention_heads[i], temporal_max_seq_length=motion_max_seq_length, temporal_transformer_layers_per_block=reverse_temporal_transformer_layers_per_block[i]) else: raise ValueError('Invalid `up_block_type` encountered. Must be one of `CrossAttnUpBlockMotion` or `UpBlockMotion`') self.up_blocks.append(up_block) prev_output_channel = output_channel if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps) self.conv_act = nn.SiLU() else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding) @classmethod def from_unet2d(cls, unet: UNet2DConditionModel, motion_adapter: Optional[MotionAdapter]=None, load_weights: bool=True): has_motion_adapter = motion_adapter is not None if has_motion_adapter: motion_adapter.to(device=unet.device) if len(unet.config['down_block_types']) != len(motion_adapter.config['block_out_channels']): raise ValueError('Incompatible Motion Adapter, got different number of blocks') if isinstance(unet.config['layers_per_block'], int): expanded_layers_per_block = [unet.config['layers_per_block']] * len(unet.config['down_block_types']) else: expanded_layers_per_block = list(unet.config['layers_per_block']) if isinstance(motion_adapter.config['motion_layers_per_block'], int): expanded_adapter_layers_per_block = [motion_adapter.config['motion_layers_per_block']] * len(motion_adapter.config['block_out_channels']) else: expanded_adapter_layers_per_block = list(motion_adapter.config['motion_layers_per_block']) if expanded_layers_per_block != expanded_adapter_layers_per_block: raise ValueError('Incompatible Motion Adapter, got different number of layers per block') config = dict(unet.config) config['_class_name'] = cls.__name__ down_blocks = [] for down_blocks_type in config['down_block_types']: if 'CrossAttn' in down_blocks_type: down_blocks.append('CrossAttnDownBlockMotion') else: down_blocks.append('DownBlockMotion') config['down_block_types'] = down_blocks up_blocks = [] for down_blocks_type in config['up_block_types']: if 'CrossAttn' in down_blocks_type: up_blocks.append('CrossAttnUpBlockMotion') else: up_blocks.append('UpBlockMotion') config['up_block_types'] = up_blocks if has_motion_adapter: config['motion_num_attention_heads'] = motion_adapter.config['motion_num_attention_heads'] config['motion_max_seq_length'] = motion_adapter.config['motion_max_seq_length'] config['use_motion_mid_block'] = motion_adapter.config['use_motion_mid_block'] config['layers_per_block'] = motion_adapter.config['motion_layers_per_block'] config['temporal_transformer_layers_per_mid_block'] = motion_adapter.config['motion_transformer_layers_per_mid_block'] config['temporal_transformer_layers_per_block'] = motion_adapter.config['motion_transformer_layers_per_block'] config['motion_num_attention_heads'] = motion_adapter.config['motion_num_attention_heads'] if motion_adapter.config['conv_in_channels']: config['in_channels'] = motion_adapter.config['conv_in_channels'] if not config.get('num_attention_heads'): config['num_attention_heads'] = config['attention_head_dim'] (expected_kwargs, optional_kwargs) = cls._get_signature_keys(cls) config = FrozenDict({k: config.get(k) for k in config if k in expected_kwargs or k in optional_kwargs}) config['_class_name'] = cls.__name__ model = cls.from_config(config) if not load_weights: return model if has_motion_adapter and motion_adapter.config['conv_in_channels']: model.conv_in = motion_adapter.conv_in updated_conv_in_weight = torch.cat([unet.conv_in.weight, motion_adapter.conv_in.weight[:, 4:, :, :]], dim=1) model.conv_in.load_state_dict({'weight': updated_conv_in_weight, 'bias': unet.conv_in.bias}) else: model.conv_in.load_state_dict(unet.conv_in.state_dict()) model.time_proj.load_state_dict(unet.time_proj.state_dict()) model.time_embedding.load_state_dict(unet.time_embedding.state_dict()) if any((isinstance(proc, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)) for proc in unet.attn_processors.values())): attn_procs = {} for (name, processor) in unet.attn_processors.items(): if name.endswith('attn1.processor'): attn_processor_class = AttnProcessor2_0 if hasattr(F, 'scaled_dot_product_attention') else AttnProcessor attn_procs[name] = attn_processor_class() else: attn_processor_class = IPAdapterAttnProcessor2_0 if hasattr(F, 'scaled_dot_product_attention') else IPAdapterAttnProcessor attn_procs[name] = attn_processor_class(hidden_size=processor.hidden_size, cross_attention_dim=processor.cross_attention_dim, scale=processor.scale, num_tokens=processor.num_tokens) for (name, processor) in model.attn_processors.items(): if name not in attn_procs: attn_procs[name] = processor.__class__() model.set_attn_processor(attn_procs) model.config.encoder_hid_dim_type = 'ip_image_proj' model.encoder_hid_proj = unet.encoder_hid_proj for (i, down_block) in enumerate(unet.down_blocks): model.down_blocks[i].resnets.load_state_dict(down_block.resnets.state_dict()) if hasattr(model.down_blocks[i], 'attentions'): model.down_blocks[i].attentions.load_state_dict(down_block.attentions.state_dict()) if model.down_blocks[i].downsamplers: model.down_blocks[i].downsamplers.load_state_dict(down_block.downsamplers.state_dict()) for (i, up_block) in enumerate(unet.up_blocks): model.up_blocks[i].resnets.load_state_dict(up_block.resnets.state_dict()) if hasattr(model.up_blocks[i], 'attentions'): model.up_blocks[i].attentions.load_state_dict(up_block.attentions.state_dict()) if model.up_blocks[i].upsamplers: model.up_blocks[i].upsamplers.load_state_dict(up_block.upsamplers.state_dict()) model.mid_block.resnets.load_state_dict(unet.mid_block.resnets.state_dict()) model.mid_block.attentions.load_state_dict(unet.mid_block.attentions.state_dict()) if unet.conv_norm_out is not None: model.conv_norm_out.load_state_dict(unet.conv_norm_out.state_dict()) if unet.conv_act is not None: model.conv_act.load_state_dict(unet.conv_act.state_dict()) model.conv_out.load_state_dict(unet.conv_out.state_dict()) if has_motion_adapter: model.load_motion_modules(motion_adapter) model.to(unet.dtype) return model def freeze_unet2d_params(self) -> None: for param in self.parameters(): param.requires_grad = False for down_block in self.down_blocks: motion_modules = down_block.motion_modules for param in motion_modules.parameters(): param.requires_grad = True for up_block in self.up_blocks: motion_modules = up_block.motion_modules for param in motion_modules.parameters(): param.requires_grad = True if hasattr(self.mid_block, 'motion_modules'): motion_modules = self.mid_block.motion_modules for param in motion_modules.parameters(): param.requires_grad = True def load_motion_modules(self, motion_adapter: Optional[MotionAdapter]) -> None: for (i, down_block) in enumerate(motion_adapter.down_blocks): self.down_blocks[i].motion_modules.load_state_dict(down_block.motion_modules.state_dict()) for (i, up_block) in enumerate(motion_adapter.up_blocks): self.up_blocks[i].motion_modules.load_state_dict(up_block.motion_modules.state_dict()) if hasattr(self.mid_block, 'motion_modules'): self.mid_block.motion_modules.load_state_dict(motion_adapter.mid_block.motion_modules.state_dict()) def save_motion_modules(self, save_directory: str, is_main_process: bool=True, safe_serialization: bool=True, variant: Optional[str]=None, push_to_hub: bool=False, **kwargs) -> None: state_dict = self.state_dict() motion_state_dict = {} for (k, v) in state_dict.items(): if 'motion_modules' in k: motion_state_dict[k] = v adapter = MotionAdapter(block_out_channels=self.config['block_out_channels'], motion_layers_per_block=self.config['layers_per_block'], motion_norm_num_groups=self.config['norm_num_groups'], motion_num_attention_heads=self.config['motion_num_attention_heads'], motion_max_seq_length=self.config['motion_max_seq_length'], use_motion_mid_block=self.config['use_motion_mid_block']) adapter.load_state_dict(motion_state_dict) adapter.save_pretrained(save_directory=save_directory, is_main_process=is_main_process, safe_serialization=safe_serialization, variant=variant, push_to_hub=push_to_hub, **kwargs) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def enable_forward_chunking(self, chunk_size: Optional[int]=None, dim: int=0) -> None: if dim not in [0, 1]: raise ValueError(f'Make sure to set `dim` to either 0 or 1, not {dim}') chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, 'set_chunk_feed_forward'): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) def disable_forward_chunking(self) -> None: def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, 'set_chunk_feed_forward'): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, None, 0) def set_default_attn_processor(self) -> None: if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) def _set_gradient_checkpointing(self, module, value: bool=False) -> None: if isinstance(module, (CrossAttnDownBlockMotion, DownBlockMotion, CrossAttnUpBlockMotion, UpBlockMotion)): module.gradient_checkpointing = value def enable_freeu(self, s1: float, s2: float, b1: float, b2: float) -> None: for (i, upsample_block) in enumerate(self.up_blocks): setattr(upsample_block, 's1', s1) setattr(upsample_block, 's2', s2) setattr(upsample_block, 'b1', b1) setattr(upsample_block, 'b2', b2) def disable_freeu(self) -> None: freeu_keys = {'s1', 's2', 'b1', 'b2'} for (i, upsample_block) in enumerate(self.up_blocks): for k in freeu_keys: if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: setattr(upsample_block, k, None) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def forward(self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, timestep_cond: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]]=None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]]=None, mid_block_additional_residual: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[UNetMotionOutput, Tuple[torch.Tensor]]: default_overall_up_factor = 2 ** self.num_upsamplers forward_upsample_size = False upsample_size = None if any((s % default_overall_up_factor != 0 for s in sample.shape[-2:])): logger.info('Forward upsample size to force interpolation output size.') forward_upsample_size = True if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) timesteps = timestep if not torch.is_tensor(timesteps): is_mps = sample.device.type == 'mps' if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) num_frames = sample.shape[2] timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) t_emb = t_emb.to(dtype=self.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.config.addition_embed_type == 'text_time': if 'text_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`") text_embeds = added_cond_kwargs.get('text_embeds') if 'time_ids' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`") time_ids = added_cond_kwargs.get('time_ids') time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) emb = emb if aug_emb is None else emb + aug_emb emb = emb.repeat_interleave(repeats=num_frames, dim=0) if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == 'ip_image_proj': if 'image_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`") image_embeds = added_cond_kwargs.get('image_embeds') image_embeds = self.encoder_hid_proj(image_embeds) image_embeds = [image_embed.repeat_interleave(repeats=num_frames, dim=0) for image_embed in image_embeds] encoder_hidden_states = (encoder_hidden_states, image_embeds) sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:]) sample = self.conv_in(sample) down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, 'has_cross_attention') and downsample_block.has_cross_attention: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs) else: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames) down_block_res_samples += res_samples if down_block_additional_residuals is not None: new_down_block_res_samples = () for (down_block_res_sample, down_block_additional_residual) in zip(down_block_res_samples, down_block_additional_residuals): down_block_res_sample = down_block_res_sample + down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) down_block_res_samples = new_down_block_res_samples if self.mid_block is not None: if hasattr(self.mid_block, 'motion_modules'): sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs) else: sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs) if mid_block_additional_residual is not None: sample = sample + mid_block_additional_residual for (i, upsample_block) in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets):] down_block_res_samples = down_block_res_samples[:-len(upsample_block.resnets)] if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, 'has_cross_attention') and upsample_block.has_cross_attention: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, upsample_size=upsample_size, attention_mask=attention_mask, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs) else: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, num_frames=num_frames) if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4) if not return_dict: return (sample,) return UNetMotionOutput(sample=sample) # File: diffusers-main/src/diffusers/models/unets/unet_spatio_temporal_condition.py from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import UNet2DConditionLoadersMixin from ...utils import BaseOutput, logging from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor from ..embeddings import TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from .unet_3d_blocks import UNetMidBlockSpatioTemporal, get_down_block, get_up_block logger = logging.get_logger(__name__) @dataclass class UNetSpatioTemporalConditionOutput(BaseOutput): sample: torch.Tensor = None class UNetSpatioTemporalConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, sample_size: Optional[int]=None, in_channels: int=8, out_channels: int=4, down_block_types: Tuple[str]=('CrossAttnDownBlockSpatioTemporal', 'CrossAttnDownBlockSpatioTemporal', 'CrossAttnDownBlockSpatioTemporal', 'DownBlockSpatioTemporal'), up_block_types: Tuple[str]=('UpBlockSpatioTemporal', 'CrossAttnUpBlockSpatioTemporal', 'CrossAttnUpBlockSpatioTemporal', 'CrossAttnUpBlockSpatioTemporal'), block_out_channels: Tuple[int]=(320, 640, 1280, 1280), addition_time_embed_dim: int=256, projection_class_embeddings_input_dim: int=768, layers_per_block: Union[int, Tuple[int]]=2, cross_attention_dim: Union[int, Tuple[int]]=1024, transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]]=1, num_attention_heads: Union[int, Tuple[int]]=(5, 10, 20, 20), num_frames: int=25): super().__init__() self.sample_size = sample_size if len(down_block_types) != len(up_block_types): raise ValueError(f'Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.') if len(block_out_channels) != len(down_block_types): raise ValueError(f'Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.') if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError(f'Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}.') if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError(f'Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}.') if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError(f'Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}.') self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=1) time_embed_dim = block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], True, downscale_freq_shift=0) timestep_input_dim = block_out_channels[0] self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) self.add_time_proj = Timesteps(addition_time_embed_dim, True, downscale_freq_shift=0) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) blocks_time_embed_dim = time_embed_dim output_channel = block_out_channels[0] for (i, down_block_type) in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block(down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=1e-05, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], resnet_act_fn='silu') self.down_blocks.append(down_block) self.mid_block = UNetMidBlockSpatioTemporal(block_out_channels[-1], temb_channels=blocks_time_embed_dim, transformer_layers_per_block=transformer_layers_per_block[-1], cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1]) self.num_upsamplers = 0 reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) output_channel = reversed_block_out_channels[0] for (i, up_block_type) in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block(up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=1e-05, resolution_idx=i, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], resnet_act_fn='silu') self.up_blocks.append(up_block) prev_output_channel = output_channel self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-05) self.conv_act = nn.SiLU() self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): if all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value def enable_forward_chunking(self, chunk_size: Optional[int]=None, dim: int=0) -> None: if dim not in [0, 1]: raise ValueError(f'Make sure to set `dim` to either 0 or 1, not {dim}') chunk_size = chunk_size or 1 def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int): if hasattr(module, 'set_chunk_feed_forward'): module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim) for child in module.children(): fn_recursive_feed_forward(child, chunk_size, dim) for module in self.children(): fn_recursive_feed_forward(module, chunk_size, dim) def forward(self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, added_time_ids: torch.Tensor, return_dict: bool=True) -> Union[UNetSpatioTemporalConditionOutput, Tuple]: timesteps = timestep if not torch.is_tensor(timesteps): is_mps = sample.device.type == 'mps' if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) (batch_size, num_frames) = sample.shape[:2] timesteps = timesteps.expand(batch_size) t_emb = self.time_proj(timesteps) t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb) time_embeds = self.add_time_proj(added_time_ids.flatten()) time_embeds = time_embeds.reshape((batch_size, -1)) time_embeds = time_embeds.to(emb.dtype) aug_emb = self.add_embedding(time_embeds) emb = emb + aug_emb sample = sample.flatten(0, 1) emb = emb.repeat_interleave(num_frames, dim=0) encoder_hidden_states = encoder_hidden_states.repeat_interleave(num_frames, dim=0) sample = self.conv_in(sample) image_only_indicator = torch.zeros(batch_size, num_frames, dtype=sample.dtype, device=sample.device) down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, 'has_cross_attention') and downsample_block.has_cross_attention: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator) else: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, image_only_indicator=image_only_indicator) down_block_res_samples += res_samples sample = self.mid_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator) for (i, upsample_block) in enumerate(self.up_blocks): res_samples = down_block_res_samples[-len(upsample_block.resnets):] down_block_res_samples = down_block_res_samples[:-len(upsample_block.resnets)] if hasattr(upsample_block, 'has_cross_attention') and upsample_block.has_cross_attention: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, image_only_indicator=image_only_indicator) else: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, image_only_indicator=image_only_indicator) sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) sample = sample.reshape(batch_size, num_frames, *sample.shape[1:]) if not return_dict: return (sample,) return UNetSpatioTemporalConditionOutput(sample=sample) # File: diffusers-main/src/diffusers/models/unets/unet_stable_cascade.py import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin from ...utils import BaseOutput from ..attention_processor import Attention from ..modeling_utils import ModelMixin class SDCascadeLayerNorm(nn.LayerNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, x): x = x.permute(0, 2, 3, 1) x = super().forward(x) return x.permute(0, 3, 1, 2) class SDCascadeTimestepBlock(nn.Module): def __init__(self, c, c_timestep, conds=[]): super().__init__() self.mapper = nn.Linear(c_timestep, c * 2) self.conds = conds for cname in conds: setattr(self, f'mapper_{cname}', nn.Linear(c_timestep, c * 2)) def forward(self, x, t): t = t.chunk(len(self.conds) + 1, dim=1) (a, b) = self.mapper(t[0])[:, :, None, None].chunk(2, dim=1) for (i, c) in enumerate(self.conds): (ac, bc) = getattr(self, f'mapper_{c}')(t[i + 1])[:, :, None, None].chunk(2, dim=1) (a, b) = (a + ac, b + bc) return x * (1 + a) + b class SDCascadeResBlock(nn.Module): def __init__(self, c, c_skip=0, kernel_size=3, dropout=0.0): super().__init__() self.depthwise = nn.Conv2d(c, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c) self.norm = SDCascadeLayerNorm(c, elementwise_affine=False, eps=1e-06) self.channelwise = nn.Sequential(nn.Linear(c + c_skip, c * 4), nn.GELU(), GlobalResponseNorm(c * 4), nn.Dropout(dropout), nn.Linear(c * 4, c)) def forward(self, x, x_skip=None): x_res = x x = self.norm(self.depthwise(x)) if x_skip is not None: x = torch.cat([x, x_skip], dim=1) x = self.channelwise(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x + x_res class GlobalResponseNorm(nn.Module): def __init__(self, dim): super().__init__() self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) def forward(self, x): agg_norm = torch.norm(x, p=2, dim=(1, 2), keepdim=True) stand_div_norm = agg_norm / (agg_norm.mean(dim=-1, keepdim=True) + 1e-06) return self.gamma * (x * stand_div_norm) + self.beta + x class SDCascadeAttnBlock(nn.Module): def __init__(self, c, c_cond, nhead, self_attn=True, dropout=0.0): super().__init__() self.self_attn = self_attn self.norm = SDCascadeLayerNorm(c, elementwise_affine=False, eps=1e-06) self.attention = Attention(query_dim=c, heads=nhead, dim_head=c // nhead, dropout=dropout, bias=True) self.kv_mapper = nn.Sequential(nn.SiLU(), nn.Linear(c_cond, c)) def forward(self, x, kv): kv = self.kv_mapper(kv) norm_x = self.norm(x) if self.self_attn: (batch_size, channel, _, _) = x.shape kv = torch.cat([norm_x.view(batch_size, channel, -1).transpose(1, 2), kv], dim=1) x = x + self.attention(norm_x, encoder_hidden_states=kv) return x class UpDownBlock2d(nn.Module): def __init__(self, in_channels, out_channels, mode, enabled=True): super().__init__() if mode not in ['up', 'down']: raise ValueError(f'{mode} not supported') interpolation = nn.Upsample(scale_factor=2 if mode == 'up' else 0.5, mode='bilinear', align_corners=True) if enabled else nn.Identity() mapping = nn.Conv2d(in_channels, out_channels, kernel_size=1) self.blocks = nn.ModuleList([interpolation, mapping] if mode == 'up' else [mapping, interpolation]) def forward(self, x): for block in self.blocks: x = block(x) return x @dataclass class StableCascadeUNetOutput(BaseOutput): sample: torch.Tensor = None class StableCascadeUNet(ModelMixin, ConfigMixin, FromOriginalModelMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, in_channels: int=16, out_channels: int=16, timestep_ratio_embedding_dim: int=64, patch_size: int=1, conditioning_dim: int=2048, block_out_channels: Tuple[int]=(2048, 2048), num_attention_heads: Tuple[int]=(32, 32), down_num_layers_per_block: Tuple[int]=(8, 24), up_num_layers_per_block: Tuple[int]=(24, 8), down_blocks_repeat_mappers: Optional[Tuple[int]]=(1, 1), up_blocks_repeat_mappers: Optional[Tuple[int]]=(1, 1), block_types_per_layer: Tuple[Tuple[str]]=(('SDCascadeResBlock', 'SDCascadeTimestepBlock', 'SDCascadeAttnBlock'), ('SDCascadeResBlock', 'SDCascadeTimestepBlock', 'SDCascadeAttnBlock')), clip_text_in_channels: Optional[int]=None, clip_text_pooled_in_channels=1280, clip_image_in_channels: Optional[int]=None, clip_seq=4, effnet_in_channels: Optional[int]=None, pixel_mapper_in_channels: Optional[int]=None, kernel_size=3, dropout: Union[float, Tuple[float]]=(0.1, 0.1), self_attn: Union[bool, Tuple[bool]]=True, timestep_conditioning_type: Tuple[str]=('sca', 'crp'), switch_level: Optional[Tuple[bool]]=None): super().__init__() if len(block_out_channels) != len(down_num_layers_per_block): raise ValueError(f'Number of elements in `down_num_layers_per_block` must match the length of `block_out_channels`: {len(block_out_channels)}') elif len(block_out_channels) != len(up_num_layers_per_block): raise ValueError(f'Number of elements in `up_num_layers_per_block` must match the length of `block_out_channels`: {len(block_out_channels)}') elif len(block_out_channels) != len(down_blocks_repeat_mappers): raise ValueError(f'Number of elements in `down_blocks_repeat_mappers` must match the length of `block_out_channels`: {len(block_out_channels)}') elif len(block_out_channels) != len(up_blocks_repeat_mappers): raise ValueError(f'Number of elements in `up_blocks_repeat_mappers` must match the length of `block_out_channels`: {len(block_out_channels)}') elif len(block_out_channels) != len(block_types_per_layer): raise ValueError(f'Number of elements in `block_types_per_layer` must match the length of `block_out_channels`: {len(block_out_channels)}') if isinstance(dropout, float): dropout = (dropout,) * len(block_out_channels) if isinstance(self_attn, bool): self_attn = (self_attn,) * len(block_out_channels) if effnet_in_channels is not None: self.effnet_mapper = nn.Sequential(nn.Conv2d(effnet_in_channels, block_out_channels[0] * 4, kernel_size=1), nn.GELU(), nn.Conv2d(block_out_channels[0] * 4, block_out_channels[0], kernel_size=1), SDCascadeLayerNorm(block_out_channels[0], elementwise_affine=False, eps=1e-06)) if pixel_mapper_in_channels is not None: self.pixels_mapper = nn.Sequential(nn.Conv2d(pixel_mapper_in_channels, block_out_channels[0] * 4, kernel_size=1), nn.GELU(), nn.Conv2d(block_out_channels[0] * 4, block_out_channels[0], kernel_size=1), SDCascadeLayerNorm(block_out_channels[0], elementwise_affine=False, eps=1e-06)) self.clip_txt_pooled_mapper = nn.Linear(clip_text_pooled_in_channels, conditioning_dim * clip_seq) if clip_text_in_channels is not None: self.clip_txt_mapper = nn.Linear(clip_text_in_channels, conditioning_dim) if clip_image_in_channels is not None: self.clip_img_mapper = nn.Linear(clip_image_in_channels, conditioning_dim * clip_seq) self.clip_norm = nn.LayerNorm(conditioning_dim, elementwise_affine=False, eps=1e-06) self.embedding = nn.Sequential(nn.PixelUnshuffle(patch_size), nn.Conv2d(in_channels * patch_size ** 2, block_out_channels[0], kernel_size=1), SDCascadeLayerNorm(block_out_channels[0], elementwise_affine=False, eps=1e-06)) def get_block(block_type, in_channels, nhead, c_skip=0, dropout=0, self_attn=True): if block_type == 'SDCascadeResBlock': return SDCascadeResBlock(in_channels, c_skip, kernel_size=kernel_size, dropout=dropout) elif block_type == 'SDCascadeAttnBlock': return SDCascadeAttnBlock(in_channels, conditioning_dim, nhead, self_attn=self_attn, dropout=dropout) elif block_type == 'SDCascadeTimestepBlock': return SDCascadeTimestepBlock(in_channels, timestep_ratio_embedding_dim, conds=timestep_conditioning_type) else: raise ValueError(f'Block type {block_type} not supported') self.down_blocks = nn.ModuleList() self.down_downscalers = nn.ModuleList() self.down_repeat_mappers = nn.ModuleList() for i in range(len(block_out_channels)): if i > 0: self.down_downscalers.append(nn.Sequential(SDCascadeLayerNorm(block_out_channels[i - 1], elementwise_affine=False, eps=1e-06), UpDownBlock2d(block_out_channels[i - 1], block_out_channels[i], mode='down', enabled=switch_level[i - 1]) if switch_level is not None else nn.Conv2d(block_out_channels[i - 1], block_out_channels[i], kernel_size=2, stride=2))) else: self.down_downscalers.append(nn.Identity()) down_block = nn.ModuleList() for _ in range(down_num_layers_per_block[i]): for block_type in block_types_per_layer[i]: block = get_block(block_type, block_out_channels[i], num_attention_heads[i], dropout=dropout[i], self_attn=self_attn[i]) down_block.append(block) self.down_blocks.append(down_block) if down_blocks_repeat_mappers is not None: block_repeat_mappers = nn.ModuleList() for _ in range(down_blocks_repeat_mappers[i] - 1): block_repeat_mappers.append(nn.Conv2d(block_out_channels[i], block_out_channels[i], kernel_size=1)) self.down_repeat_mappers.append(block_repeat_mappers) self.up_blocks = nn.ModuleList() self.up_upscalers = nn.ModuleList() self.up_repeat_mappers = nn.ModuleList() for i in reversed(range(len(block_out_channels))): if i > 0: self.up_upscalers.append(nn.Sequential(SDCascadeLayerNorm(block_out_channels[i], elementwise_affine=False, eps=1e-06), UpDownBlock2d(block_out_channels[i], block_out_channels[i - 1], mode='up', enabled=switch_level[i - 1]) if switch_level is not None else nn.ConvTranspose2d(block_out_channels[i], block_out_channels[i - 1], kernel_size=2, stride=2))) else: self.up_upscalers.append(nn.Identity()) up_block = nn.ModuleList() for j in range(up_num_layers_per_block[::-1][i]): for (k, block_type) in enumerate(block_types_per_layer[i]): c_skip = block_out_channels[i] if i < len(block_out_channels) - 1 and j == k == 0 else 0 block = get_block(block_type, block_out_channels[i], num_attention_heads[i], c_skip=c_skip, dropout=dropout[i], self_attn=self_attn[i]) up_block.append(block) self.up_blocks.append(up_block) if up_blocks_repeat_mappers is not None: block_repeat_mappers = nn.ModuleList() for _ in range(up_blocks_repeat_mappers[::-1][i] - 1): block_repeat_mappers.append(nn.Conv2d(block_out_channels[i], block_out_channels[i], kernel_size=1)) self.up_repeat_mappers.append(block_repeat_mappers) self.clf = nn.Sequential(SDCascadeLayerNorm(block_out_channels[0], elementwise_affine=False, eps=1e-06), nn.Conv2d(block_out_channels[0], out_channels * patch_size ** 2, kernel_size=1), nn.PixelShuffle(patch_size)) self.gradient_checkpointing = False def _set_gradient_checkpointing(self, value=False): self.gradient_checkpointing = value def _init_weights(self, m): if isinstance(m, (nn.Conv2d, nn.Linear)): torch.nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) nn.init.normal_(self.clip_txt_pooled_mapper.weight, std=0.02) nn.init.normal_(self.clip_txt_mapper.weight, std=0.02) if hasattr(self, 'clip_txt_mapper') else None nn.init.normal_(self.clip_img_mapper.weight, std=0.02) if hasattr(self, 'clip_img_mapper') else None if hasattr(self, 'effnet_mapper'): nn.init.normal_(self.effnet_mapper[0].weight, std=0.02) nn.init.normal_(self.effnet_mapper[2].weight, std=0.02) if hasattr(self, 'pixels_mapper'): nn.init.normal_(self.pixels_mapper[0].weight, std=0.02) nn.init.normal_(self.pixels_mapper[2].weight, std=0.02) torch.nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) nn.init.constant_(self.clf[1].weight, 0) for level_block in self.down_blocks + self.up_blocks: for block in level_block: if isinstance(block, SDCascadeResBlock): block.channelwise[-1].weight.data *= np.sqrt(1 / sum(self.config.blocks[0])) elif isinstance(block, SDCascadeTimestepBlock): nn.init.constant_(block.mapper.weight, 0) def get_timestep_ratio_embedding(self, timestep_ratio, max_positions=10000): r = timestep_ratio * max_positions half_dim = self.config.timestep_ratio_embedding_dim // 2 emb = math.log(max_positions) / (half_dim - 1) emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() emb = r[:, None] * emb[None, :] emb = torch.cat([emb.sin(), emb.cos()], dim=1) if self.config.timestep_ratio_embedding_dim % 2 == 1: emb = nn.functional.pad(emb, (0, 1), mode='constant') return emb.to(dtype=r.dtype) def get_clip_embeddings(self, clip_txt_pooled, clip_txt=None, clip_img=None): if len(clip_txt_pooled.shape) == 2: clip_txt_pool = clip_txt_pooled.unsqueeze(1) clip_txt_pool = self.clip_txt_pooled_mapper(clip_txt_pooled).view(clip_txt_pooled.size(0), clip_txt_pooled.size(1) * self.config.clip_seq, -1) if clip_txt is not None and clip_img is not None: clip_txt = self.clip_txt_mapper(clip_txt) if len(clip_img.shape) == 2: clip_img = clip_img.unsqueeze(1) clip_img = self.clip_img_mapper(clip_img).view(clip_img.size(0), clip_img.size(1) * self.config.clip_seq, -1) clip = torch.cat([clip_txt, clip_txt_pool, clip_img], dim=1) else: clip = clip_txt_pool return self.clip_norm(clip) def _down_encode(self, x, r_embed, clip): level_outputs = [] block_group = zip(self.down_blocks, self.down_downscalers, self.down_repeat_mappers) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward for (down_block, downscaler, repmap) in block_group: x = downscaler(x) for i in range(len(repmap) + 1): for block in down_block: if isinstance(block, SDCascadeResBlock): x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, use_reentrant=False) elif isinstance(block, SDCascadeAttnBlock): x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, clip, use_reentrant=False) elif isinstance(block, SDCascadeTimestepBlock): x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, r_embed, use_reentrant=False) else: x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), use_reentrant=False) if i < len(repmap): x = repmap[i](x) level_outputs.insert(0, x) else: for (down_block, downscaler, repmap) in block_group: x = downscaler(x) for i in range(len(repmap) + 1): for block in down_block: if isinstance(block, SDCascadeResBlock): x = block(x) elif isinstance(block, SDCascadeAttnBlock): x = block(x, clip) elif isinstance(block, SDCascadeTimestepBlock): x = block(x, r_embed) else: x = block(x) if i < len(repmap): x = repmap[i](x) level_outputs.insert(0, x) return level_outputs def _up_decode(self, level_outputs, r_embed, clip): x = level_outputs[0] block_group = zip(self.up_blocks, self.up_upscalers, self.up_repeat_mappers) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward for (i, (up_block, upscaler, repmap)) in enumerate(block_group): for j in range(len(repmap) + 1): for (k, block) in enumerate(up_block): if isinstance(block, SDCascadeResBlock): skip = level_outputs[i] if k == 0 and i > 0 else None if skip is not None and (x.size(-1) != skip.size(-1) or x.size(-2) != skip.size(-2)): orig_type = x.dtype x = torch.nn.functional.interpolate(x.float(), skip.shape[-2:], mode='bilinear', align_corners=True) x = x.to(orig_type) x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, skip, use_reentrant=False) elif isinstance(block, SDCascadeAttnBlock): x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, clip, use_reentrant=False) elif isinstance(block, SDCascadeTimestepBlock): x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, r_embed, use_reentrant=False) else: x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, use_reentrant=False) if j < len(repmap): x = repmap[j](x) x = upscaler(x) else: for (i, (up_block, upscaler, repmap)) in enumerate(block_group): for j in range(len(repmap) + 1): for (k, block) in enumerate(up_block): if isinstance(block, SDCascadeResBlock): skip = level_outputs[i] if k == 0 and i > 0 else None if skip is not None and (x.size(-1) != skip.size(-1) or x.size(-2) != skip.size(-2)): orig_type = x.dtype x = torch.nn.functional.interpolate(x.float(), skip.shape[-2:], mode='bilinear', align_corners=True) x = x.to(orig_type) x = block(x, skip) elif isinstance(block, SDCascadeAttnBlock): x = block(x, clip) elif isinstance(block, SDCascadeTimestepBlock): x = block(x, r_embed) else: x = block(x) if j < len(repmap): x = repmap[j](x) x = upscaler(x) return x def forward(self, sample, timestep_ratio, clip_text_pooled, clip_text=None, clip_img=None, effnet=None, pixels=None, sca=None, crp=None, return_dict=True): if pixels is None: pixels = sample.new_zeros(sample.size(0), 3, 8, 8) timestep_ratio_embed = self.get_timestep_ratio_embedding(timestep_ratio) for c in self.config.timestep_conditioning_type: if c == 'sca': cond = sca elif c == 'crp': cond = crp else: cond = None t_cond = cond or torch.zeros_like(timestep_ratio) timestep_ratio_embed = torch.cat([timestep_ratio_embed, self.get_timestep_ratio_embedding(t_cond)], dim=1) clip = self.get_clip_embeddings(clip_txt_pooled=clip_text_pooled, clip_txt=clip_text, clip_img=clip_img) x = self.embedding(sample) if hasattr(self, 'effnet_mapper') and effnet is not None: x = x + self.effnet_mapper(nn.functional.interpolate(effnet, size=x.shape[-2:], mode='bilinear', align_corners=True)) if hasattr(self, 'pixels_mapper'): x = x + nn.functional.interpolate(self.pixels_mapper(pixels), size=x.shape[-2:], mode='bilinear', align_corners=True) level_outputs = self._down_encode(x, timestep_ratio_embed, clip) x = self._up_decode(level_outputs, timestep_ratio_embed, clip) sample = self.clf(x) if not return_dict: return (sample,) return StableCascadeUNetOutput(sample=sample) # File: diffusers-main/src/diffusers/models/unets/uvit_2d.py from typing import Dict, Union import torch import torch.nn.functional as F from torch import nn from torch.utils.checkpoint import checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin from ..attention import BasicTransformerBlock, SkipFFTransformerBlock from ..attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor from ..embeddings import TimestepEmbedding, get_timestep_embedding from ..modeling_utils import ModelMixin from ..normalization import GlobalResponseNorm, RMSNorm from ..resnet import Downsample2D, Upsample2D class UVit2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, hidden_size: int=1024, use_bias: bool=False, hidden_dropout: float=0.0, cond_embed_dim: int=768, micro_cond_encode_dim: int=256, micro_cond_embed_dim: int=1280, encoder_hidden_size: int=768, vocab_size: int=8256, codebook_size: int=8192, in_channels: int=768, block_out_channels: int=768, num_res_blocks: int=3, downsample: bool=False, upsample: bool=False, block_num_heads: int=12, num_hidden_layers: int=22, num_attention_heads: int=16, attention_dropout: float=0.0, intermediate_size: int=2816, layer_norm_eps: float=1e-06, ln_elementwise_affine: bool=True, sample_size: int=64): super().__init__() self.encoder_proj = nn.Linear(encoder_hidden_size, hidden_size, bias=use_bias) self.encoder_proj_layer_norm = RMSNorm(hidden_size, layer_norm_eps, ln_elementwise_affine) self.embed = UVit2DConvEmbed(in_channels, block_out_channels, vocab_size, ln_elementwise_affine, layer_norm_eps, use_bias) self.cond_embed = TimestepEmbedding(micro_cond_embed_dim + cond_embed_dim, hidden_size, sample_proj_bias=use_bias) self.down_block = UVitBlock(block_out_channels, num_res_blocks, hidden_size, hidden_dropout, ln_elementwise_affine, layer_norm_eps, use_bias, block_num_heads, attention_dropout, downsample, False) self.project_to_hidden_norm = RMSNorm(block_out_channels, layer_norm_eps, ln_elementwise_affine) self.project_to_hidden = nn.Linear(block_out_channels, hidden_size, bias=use_bias) self.transformer_layers = nn.ModuleList([BasicTransformerBlock(dim=hidden_size, num_attention_heads=num_attention_heads, attention_head_dim=hidden_size // num_attention_heads, dropout=hidden_dropout, cross_attention_dim=hidden_size, attention_bias=use_bias, norm_type='ada_norm_continuous', ada_norm_continous_conditioning_embedding_dim=hidden_size, norm_elementwise_affine=ln_elementwise_affine, norm_eps=layer_norm_eps, ada_norm_bias=use_bias, ff_inner_dim=intermediate_size, ff_bias=use_bias, attention_out_bias=use_bias) for _ in range(num_hidden_layers)]) self.project_from_hidden_norm = RMSNorm(hidden_size, layer_norm_eps, ln_elementwise_affine) self.project_from_hidden = nn.Linear(hidden_size, block_out_channels, bias=use_bias) self.up_block = UVitBlock(block_out_channels, num_res_blocks, hidden_size, hidden_dropout, ln_elementwise_affine, layer_norm_eps, use_bias, block_num_heads, attention_dropout, downsample=False, upsample=upsample) self.mlm_layer = ConvMlmLayer(block_out_channels, in_channels, use_bias, ln_elementwise_affine, layer_norm_eps, codebook_size) self.gradient_checkpointing = False def _set_gradient_checkpointing(self, module, value: bool=False) -> None: pass def forward(self, input_ids, encoder_hidden_states, pooled_text_emb, micro_conds, cross_attention_kwargs=None): encoder_hidden_states = self.encoder_proj(encoder_hidden_states) encoder_hidden_states = self.encoder_proj_layer_norm(encoder_hidden_states) micro_cond_embeds = get_timestep_embedding(micro_conds.flatten(), self.config.micro_cond_encode_dim, flip_sin_to_cos=True, downscale_freq_shift=0) micro_cond_embeds = micro_cond_embeds.reshape((input_ids.shape[0], -1)) pooled_text_emb = torch.cat([pooled_text_emb, micro_cond_embeds], dim=1) pooled_text_emb = pooled_text_emb.to(dtype=self.dtype) pooled_text_emb = self.cond_embed(pooled_text_emb).to(encoder_hidden_states.dtype) hidden_states = self.embed(input_ids) hidden_states = self.down_block(hidden_states, pooled_text_emb=pooled_text_emb, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs) (batch_size, channels, height, width) = hidden_states.shape hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch_size, height * width, channels) hidden_states = self.project_to_hidden_norm(hidden_states) hidden_states = self.project_to_hidden(hidden_states) for layer in self.transformer_layers: if self.training and self.gradient_checkpointing: def layer_(*args): return checkpoint(layer, *args) else: layer_ = layer hidden_states = layer_(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs={'pooled_text_emb': pooled_text_emb}) hidden_states = self.project_from_hidden_norm(hidden_states) hidden_states = self.project_from_hidden(hidden_states) hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2) hidden_states = self.up_block(hidden_states, pooled_text_emb=pooled_text_emb, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs) logits = self.mlm_layer(hidden_states) return logits @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) class UVit2DConvEmbed(nn.Module): def __init__(self, in_channels, block_out_channels, vocab_size, elementwise_affine, eps, bias): super().__init__() self.embeddings = nn.Embedding(vocab_size, in_channels) self.layer_norm = RMSNorm(in_channels, eps, elementwise_affine) self.conv = nn.Conv2d(in_channels, block_out_channels, kernel_size=1, bias=bias) def forward(self, input_ids): embeddings = self.embeddings(input_ids) embeddings = self.layer_norm(embeddings) embeddings = embeddings.permute(0, 3, 1, 2) embeddings = self.conv(embeddings) return embeddings class UVitBlock(nn.Module): def __init__(self, channels, num_res_blocks: int, hidden_size, hidden_dropout, ln_elementwise_affine, layer_norm_eps, use_bias, block_num_heads, attention_dropout, downsample: bool, upsample: bool): super().__init__() if downsample: self.downsample = Downsample2D(channels, use_conv=True, padding=0, name='Conv2d_0', kernel_size=2, norm_type='rms_norm', eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine, bias=use_bias) else: self.downsample = None self.res_blocks = nn.ModuleList([ConvNextBlock(channels, layer_norm_eps, ln_elementwise_affine, use_bias, hidden_dropout, hidden_size) for i in range(num_res_blocks)]) self.attention_blocks = nn.ModuleList([SkipFFTransformerBlock(channels, block_num_heads, channels // block_num_heads, hidden_size, use_bias, attention_dropout, channels, attention_bias=use_bias, attention_out_bias=use_bias) for _ in range(num_res_blocks)]) if upsample: self.upsample = Upsample2D(channels, use_conv_transpose=True, kernel_size=2, padding=0, name='conv', norm_type='rms_norm', eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine, bias=use_bias, interpolate=False) else: self.upsample = None def forward(self, x, pooled_text_emb, encoder_hidden_states, cross_attention_kwargs): if self.downsample is not None: x = self.downsample(x) for (res_block, attention_block) in zip(self.res_blocks, self.attention_blocks): x = res_block(x, pooled_text_emb) (batch_size, channels, height, width) = x.shape x = x.view(batch_size, channels, height * width).permute(0, 2, 1) x = attention_block(x, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs) x = x.permute(0, 2, 1).view(batch_size, channels, height, width) if self.upsample is not None: x = self.upsample(x) return x class ConvNextBlock(nn.Module): def __init__(self, channels, layer_norm_eps, ln_elementwise_affine, use_bias, hidden_dropout, hidden_size, res_ffn_factor=4): super().__init__() self.depthwise = nn.Conv2d(channels, channels, kernel_size=3, padding=1, groups=channels, bias=use_bias) self.norm = RMSNorm(channels, layer_norm_eps, ln_elementwise_affine) self.channelwise_linear_1 = nn.Linear(channels, int(channels * res_ffn_factor), bias=use_bias) self.channelwise_act = nn.GELU() self.channelwise_norm = GlobalResponseNorm(int(channels * res_ffn_factor)) self.channelwise_linear_2 = nn.Linear(int(channels * res_ffn_factor), channels, bias=use_bias) self.channelwise_dropout = nn.Dropout(hidden_dropout) self.cond_embeds_mapper = nn.Linear(hidden_size, channels * 2, use_bias) def forward(self, x, cond_embeds): x_res = x x = self.depthwise(x) x = x.permute(0, 2, 3, 1) x = self.norm(x) x = self.channelwise_linear_1(x) x = self.channelwise_act(x) x = self.channelwise_norm(x) x = self.channelwise_linear_2(x) x = self.channelwise_dropout(x) x = x.permute(0, 3, 1, 2) x = x + x_res (scale, shift) = self.cond_embeds_mapper(F.silu(cond_embeds)).chunk(2, dim=1) x = x * (1 + scale[:, :, None, None]) + shift[:, :, None, None] return x class ConvMlmLayer(nn.Module): def __init__(self, block_out_channels: int, in_channels: int, use_bias: bool, ln_elementwise_affine: bool, layer_norm_eps: float, codebook_size: int): super().__init__() self.conv1 = nn.Conv2d(block_out_channels, in_channels, kernel_size=1, bias=use_bias) self.layer_norm = RMSNorm(in_channels, layer_norm_eps, ln_elementwise_affine) self.conv2 = nn.Conv2d(in_channels, codebook_size, kernel_size=1, bias=use_bias) def forward(self, hidden_states): hidden_states = self.conv1(hidden_states) hidden_states = self.layer_norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) logits = self.conv2(hidden_states) return logits # File: diffusers-main/src/diffusers/models/upsampling.py from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from ..utils import deprecate from .normalization import RMSNorm class Upsample1D(nn.Module): def __init__(self, channels: int, use_conv: bool=False, use_conv_transpose: bool=False, out_channels: Optional[int]=None, name: str='conv'): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.use_conv_transpose = use_conv_transpose self.name = name self.conv = None if use_conv_transpose: self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1) elif use_conv: self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1) def forward(self, inputs: torch.Tensor) -> torch.Tensor: assert inputs.shape[1] == self.channels if self.use_conv_transpose: return self.conv(inputs) outputs = F.interpolate(inputs, scale_factor=2.0, mode='nearest') if self.use_conv: outputs = self.conv(outputs) return outputs class Upsample2D(nn.Module): def __init__(self, channels: int, use_conv: bool=False, use_conv_transpose: bool=False, out_channels: Optional[int]=None, name: str='conv', kernel_size: Optional[int]=None, padding=1, norm_type=None, eps=None, elementwise_affine=None, bias=True, interpolate=True): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.use_conv_transpose = use_conv_transpose self.name = name self.interpolate = interpolate if norm_type == 'ln_norm': self.norm = nn.LayerNorm(channels, eps, elementwise_affine) elif norm_type == 'rms_norm': self.norm = RMSNorm(channels, eps, elementwise_affine) elif norm_type is None: self.norm = None else: raise ValueError(f'unknown norm_type: {norm_type}') conv = None if use_conv_transpose: if kernel_size is None: kernel_size = 4 conv = nn.ConvTranspose2d(channels, self.out_channels, kernel_size=kernel_size, stride=2, padding=padding, bias=bias) elif use_conv: if kernel_size is None: kernel_size = 3 conv = nn.Conv2d(self.channels, self.out_channels, kernel_size=kernel_size, padding=padding, bias=bias) if name == 'conv': self.conv = conv else: self.Conv2d_0 = conv def forward(self, hidden_states: torch.Tensor, output_size: Optional[int]=None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) assert hidden_states.shape[1] == self.channels if self.norm is not None: hidden_states = self.norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) if self.use_conv_transpose: return self.conv(hidden_states) dtype = hidden_states.dtype if dtype == torch.bfloat16: hidden_states = hidden_states.to(torch.float32) if hidden_states.shape[0] >= 64: hidden_states = hidden_states.contiguous() if self.interpolate: if output_size is None: hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode='nearest') else: hidden_states = F.interpolate(hidden_states, size=output_size, mode='nearest') if dtype == torch.bfloat16: hidden_states = hidden_states.to(dtype) if self.use_conv: if self.name == 'conv': hidden_states = self.conv(hidden_states) else: hidden_states = self.Conv2d_0(hidden_states) return hidden_states class FirUpsample2D(nn.Module): def __init__(self, channels: Optional[int]=None, out_channels: Optional[int]=None, use_conv: bool=False, fir_kernel: Tuple[int, int, int, int]=(1, 3, 3, 1)): super().__init__() out_channels = out_channels if out_channels else channels if use_conv: self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) self.use_conv = use_conv self.fir_kernel = fir_kernel self.out_channels = out_channels def _upsample_2d(self, hidden_states: torch.Tensor, weight: Optional[torch.Tensor]=None, kernel: Optional[torch.Tensor]=None, factor: int=2, gain: float=1) -> torch.Tensor: assert isinstance(factor, int) and factor >= 1 if kernel is None: kernel = [1] * factor kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * (gain * factor ** 2) if self.use_conv: convH = weight.shape[2] convW = weight.shape[3] inC = weight.shape[1] pad_value = kernel.shape[0] - factor - (convW - 1) stride = (factor, factor) output_shape = ((hidden_states.shape[2] - 1) * factor + convH, (hidden_states.shape[3] - 1) * factor + convW) output_padding = (output_shape[0] - (hidden_states.shape[2] - 1) * stride[0] - convH, output_shape[1] - (hidden_states.shape[3] - 1) * stride[1] - convW) assert output_padding[0] >= 0 and output_padding[1] >= 0 num_groups = hidden_states.shape[1] // inC weight = torch.reshape(weight, (num_groups, -1, inC, convH, convW)) weight = torch.flip(weight, dims=[3, 4]).permute(0, 2, 1, 3, 4) weight = torch.reshape(weight, (num_groups * inC, -1, convH, convW)) inverse_conv = F.conv_transpose2d(hidden_states, weight, stride=stride, output_padding=output_padding, padding=0) output = upfirdn2d_native(inverse_conv, torch.tensor(kernel, device=inverse_conv.device), pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2 + 1)) else: pad_value = kernel.shape[0] - factor output = upfirdn2d_native(hidden_states, torch.tensor(kernel, device=hidden_states.device), up=factor, pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2)) return output def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if self.use_conv: height = self._upsample_2d(hidden_states, self.Conv2d_0.weight, kernel=self.fir_kernel) height = height + self.Conv2d_0.bias.reshape(1, -1, 1, 1) else: height = self._upsample_2d(hidden_states, kernel=self.fir_kernel, factor=2) return height class KUpsample2D(nn.Module): def __init__(self, pad_mode: str='reflect'): super().__init__() self.pad_mode = pad_mode kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) * 2 self.pad = kernel_1d.shape[1] // 2 - 1 self.register_buffer('kernel', kernel_1d.T @ kernel_1d, persistent=False) def forward(self, inputs: torch.Tensor) -> torch.Tensor: inputs = F.pad(inputs, ((self.pad + 1) // 2,) * 4, self.pad_mode) weight = inputs.new_zeros([inputs.shape[1], inputs.shape[1], self.kernel.shape[0], self.kernel.shape[1]]) indices = torch.arange(inputs.shape[1], device=inputs.device) kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1) weight[indices, indices] = kernel return F.conv_transpose2d(inputs, weight, stride=2, padding=self.pad * 2 + 1) class CogVideoXUpsample3D(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: int=3, stride: int=1, padding: int=1, compress_time: bool=False) -> None: super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.compress_time = compress_time def forward(self, inputs: torch.Tensor) -> torch.Tensor: if self.compress_time: if inputs.shape[2] > 1 and inputs.shape[2] % 2 == 1: (x_first, x_rest) = (inputs[:, :, 0], inputs[:, :, 1:]) x_first = F.interpolate(x_first, scale_factor=2.0) x_rest = F.interpolate(x_rest, scale_factor=2.0) x_first = x_first[:, :, None, :, :] inputs = torch.cat([x_first, x_rest], dim=2) elif inputs.shape[2] > 1: inputs = F.interpolate(inputs, scale_factor=2.0) else: inputs = inputs.squeeze(2) inputs = F.interpolate(inputs, scale_factor=2.0) inputs = inputs[:, :, None, :, :] else: (b, c, t, h, w) = inputs.shape inputs = inputs.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w) inputs = F.interpolate(inputs, scale_factor=2.0) inputs = inputs.reshape(b, t, c, *inputs.shape[2:]).permute(0, 2, 1, 3, 4) (b, c, t, h, w) = inputs.shape inputs = inputs.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w) inputs = self.conv(inputs) inputs = inputs.reshape(b, t, *inputs.shape[1:]).permute(0, 2, 1, 3, 4) return inputs def upfirdn2d_native(tensor: torch.Tensor, kernel: torch.Tensor, up: int=1, down: int=1, pad: Tuple[int, int]=(0, 0)) -> torch.Tensor: up_x = up_y = up down_x = down_y = down pad_x0 = pad_y0 = pad[0] pad_x1 = pad_y1 = pad[1] (_, channel, in_h, in_w) = tensor.shape tensor = tensor.reshape(-1, in_h, in_w, 1) (_, in_h, in_w, minor) = tensor.shape (kernel_h, kernel_w) = kernel.shape out = tensor.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out.to(tensor.device) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upsample_2d(hidden_states: torch.Tensor, kernel: Optional[torch.Tensor]=None, factor: int=2, gain: float=1) -> torch.Tensor: assert isinstance(factor, int) and factor >= 1 if kernel is None: kernel = [1] * factor kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * (gain * factor ** 2) pad_value = kernel.shape[0] - factor output = upfirdn2d_native(hidden_states, kernel.to(device=hidden_states.device), up=factor, pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2)) return output # File: diffusers-main/src/diffusers/models/vae_flax.py import math from functools import partial from typing import Tuple import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .modeling_flax_utils import FlaxModelMixin @flax.struct.dataclass class FlaxDecoderOutput(BaseOutput): sample: jnp.ndarray @flax.struct.dataclass class FlaxAutoencoderKLOutput(BaseOutput): latent_dist: 'FlaxDiagonalGaussianDistribution' class FlaxUpsample2D(nn.Module): in_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv(self.in_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) def __call__(self, hidden_states): (batch, height, width, channels) = hidden_states.shape hidden_states = jax.image.resize(hidden_states, shape=(batch, height * 2, width * 2, channels), method='nearest') hidden_states = self.conv(hidden_states) return hidden_states class FlaxDownsample2D(nn.Module): in_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv(self.in_channels, kernel_size=(3, 3), strides=(2, 2), padding='VALID', dtype=self.dtype) def __call__(self, hidden_states): pad = ((0, 0), (0, 1), (0, 1), (0, 0)) hidden_states = jnp.pad(hidden_states, pad_width=pad) hidden_states = self.conv(hidden_states) return hidden_states class FlaxResnetBlock2D(nn.Module): in_channels: int out_channels: int = None dropout: float = 0.0 groups: int = 32 use_nin_shortcut: bool = None dtype: jnp.dtype = jnp.float32 def setup(self): out_channels = self.in_channels if self.out_channels is None else self.out_channels self.norm1 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-06) self.conv1 = nn.Conv(out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) self.norm2 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-06) self.dropout_layer = nn.Dropout(self.dropout) self.conv2 = nn.Conv(out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut self.conv_shortcut = None if use_nin_shortcut: self.conv_shortcut = nn.Conv(out_channels, kernel_size=(1, 1), strides=(1, 1), padding='VALID', dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): residual = hidden_states hidden_states = self.norm1(hidden_states) hidden_states = nn.swish(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm2(hidden_states) hidden_states = nn.swish(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: residual = self.conv_shortcut(residual) return hidden_states + residual class FlaxAttentionBlock(nn.Module): channels: int num_head_channels: int = None num_groups: int = 32 dtype: jnp.dtype = jnp.float32 def setup(self): self.num_heads = self.channels // self.num_head_channels if self.num_head_channels is not None else 1 dense = partial(nn.Dense, self.channels, dtype=self.dtype) self.group_norm = nn.GroupNorm(num_groups=self.num_groups, epsilon=1e-06) (self.query, self.key, self.value) = (dense(), dense(), dense()) self.proj_attn = dense() def transpose_for_scores(self, projection): new_projection_shape = projection.shape[:-1] + (self.num_heads, -1) new_projection = projection.reshape(new_projection_shape) new_projection = jnp.transpose(new_projection, (0, 2, 1, 3)) return new_projection def __call__(self, hidden_states): residual = hidden_states (batch, height, width, channels) = hidden_states.shape hidden_states = self.group_norm(hidden_states) hidden_states = hidden_states.reshape((batch, height * width, channels)) query = self.query(hidden_states) key = self.key(hidden_states) value = self.value(hidden_states) query = self.transpose_for_scores(query) key = self.transpose_for_scores(key) value = self.transpose_for_scores(value) scale = 1 / math.sqrt(math.sqrt(self.channels / self.num_heads)) attn_weights = jnp.einsum('...qc,...kc->...qk', query * scale, key * scale) attn_weights = nn.softmax(attn_weights, axis=-1) hidden_states = jnp.einsum('...kc,...qk->...qc', value, attn_weights) hidden_states = jnp.transpose(hidden_states, (0, 2, 1, 3)) new_hidden_states_shape = hidden_states.shape[:-2] + (self.channels,) hidden_states = hidden_states.reshape(new_hidden_states_shape) hidden_states = self.proj_attn(hidden_states) hidden_states = hidden_states.reshape((batch, height, width, channels)) hidden_states = hidden_states + residual return hidden_states class FlaxDownEncoderBlock2D(nn.Module): in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 resnet_groups: int = 32 add_downsample: bool = True dtype: jnp.dtype = jnp.float32 def setup(self): resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D(in_channels=in_channels, out_channels=self.out_channels, dropout=self.dropout, groups=self.resnet_groups, dtype=self.dtype) resnets.append(res_block) self.resnets = resnets if self.add_downsample: self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): for resnet in self.resnets: hidden_states = resnet(hidden_states, deterministic=deterministic) if self.add_downsample: hidden_states = self.downsamplers_0(hidden_states) return hidden_states class FlaxUpDecoderBlock2D(nn.Module): in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 resnet_groups: int = 32 add_upsample: bool = True dtype: jnp.dtype = jnp.float32 def setup(self): resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D(in_channels=in_channels, out_channels=self.out_channels, dropout=self.dropout, groups=self.resnet_groups, dtype=self.dtype) resnets.append(res_block) self.resnets = resnets if self.add_upsample: self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): for resnet in self.resnets: hidden_states = resnet(hidden_states, deterministic=deterministic) if self.add_upsample: hidden_states = self.upsamplers_0(hidden_states) return hidden_states class FlaxUNetMidBlock2D(nn.Module): in_channels: int dropout: float = 0.0 num_layers: int = 1 resnet_groups: int = 32 num_attention_heads: int = 1 dtype: jnp.dtype = jnp.float32 def setup(self): resnet_groups = self.resnet_groups if self.resnet_groups is not None else min(self.in_channels // 4, 32) resnets = [FlaxResnetBlock2D(in_channels=self.in_channels, out_channels=self.in_channels, dropout=self.dropout, groups=resnet_groups, dtype=self.dtype)] attentions = [] for _ in range(self.num_layers): attn_block = FlaxAttentionBlock(channels=self.in_channels, num_head_channels=self.num_attention_heads, num_groups=resnet_groups, dtype=self.dtype) attentions.append(attn_block) res_block = FlaxResnetBlock2D(in_channels=self.in_channels, out_channels=self.in_channels, dropout=self.dropout, groups=resnet_groups, dtype=self.dtype) resnets.append(res_block) self.resnets = resnets self.attentions = attentions def __call__(self, hidden_states, deterministic=True): hidden_states = self.resnets[0](hidden_states, deterministic=deterministic) for (attn, resnet) in zip(self.attentions, self.resnets[1:]): hidden_states = attn(hidden_states) hidden_states = resnet(hidden_states, deterministic=deterministic) return hidden_states class FlaxEncoder(nn.Module): in_channels: int = 3 out_channels: int = 3 down_block_types: Tuple[str] = ('DownEncoderBlock2D',) block_out_channels: Tuple[int] = (64,) layers_per_block: int = 2 norm_num_groups: int = 32 act_fn: str = 'silu' double_z: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): block_out_channels = self.block_out_channels self.conv_in = nn.Conv(block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) down_blocks = [] output_channel = block_out_channels[0] for (i, _) in enumerate(self.down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = FlaxDownEncoderBlock2D(in_channels=input_channel, out_channels=output_channel, num_layers=self.layers_per_block, resnet_groups=self.norm_num_groups, add_downsample=not is_final_block, dtype=self.dtype) down_blocks.append(down_block) self.down_blocks = down_blocks self.mid_block = FlaxUNetMidBlock2D(in_channels=block_out_channels[-1], resnet_groups=self.norm_num_groups, num_attention_heads=None, dtype=self.dtype) conv_out_channels = 2 * self.out_channels if self.double_z else self.out_channels self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-06) self.conv_out = nn.Conv(conv_out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) def __call__(self, sample, deterministic: bool=True): sample = self.conv_in(sample) for block in self.down_blocks: sample = block(sample, deterministic=deterministic) sample = self.mid_block(sample, deterministic=deterministic) sample = self.conv_norm_out(sample) sample = nn.swish(sample) sample = self.conv_out(sample) return sample class FlaxDecoder(nn.Module): in_channels: int = 3 out_channels: int = 3 up_block_types: Tuple[str] = ('UpDecoderBlock2D',) block_out_channels: int = (64,) layers_per_block: int = 2 norm_num_groups: int = 32 act_fn: str = 'silu' dtype: jnp.dtype = jnp.float32 def setup(self): block_out_channels = self.block_out_channels self.conv_in = nn.Conv(block_out_channels[-1], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) self.mid_block = FlaxUNetMidBlock2D(in_channels=block_out_channels[-1], resnet_groups=self.norm_num_groups, num_attention_heads=None, dtype=self.dtype) reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] up_blocks = [] for (i, _) in enumerate(self.up_block_types): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 up_block = FlaxUpDecoderBlock2D(in_channels=prev_output_channel, out_channels=output_channel, num_layers=self.layers_per_block + 1, resnet_groups=self.norm_num_groups, add_upsample=not is_final_block, dtype=self.dtype) up_blocks.append(up_block) prev_output_channel = output_channel self.up_blocks = up_blocks self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-06) self.conv_out = nn.Conv(self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype) def __call__(self, sample, deterministic: bool=True): sample = self.conv_in(sample) sample = self.mid_block(sample, deterministic=deterministic) for block in self.up_blocks: sample = block(sample, deterministic=deterministic) sample = self.conv_norm_out(sample) sample = nn.swish(sample) sample = self.conv_out(sample) return sample class FlaxDiagonalGaussianDistribution(object): def __init__(self, parameters, deterministic=False): (self.mean, self.logvar) = jnp.split(parameters, 2, axis=-1) self.logvar = jnp.clip(self.logvar, -30.0, 20.0) self.deterministic = deterministic self.std = jnp.exp(0.5 * self.logvar) self.var = jnp.exp(self.logvar) if self.deterministic: self.var = self.std = jnp.zeros_like(self.mean) def sample(self, key): return self.mean + self.std * jax.random.normal(key, self.mean.shape) def kl(self, other=None): if self.deterministic: return jnp.array([0.0]) if other is None: return 0.5 * jnp.sum(self.mean ** 2 + self.var - 1.0 - self.logvar, axis=[1, 2, 3]) return 0.5 * jnp.sum(jnp.square(self.mean - other.mean) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar, axis=[1, 2, 3]) def nll(self, sample, axis=[1, 2, 3]): if self.deterministic: return jnp.array([0.0]) logtwopi = jnp.log(2.0 * jnp.pi) return 0.5 * jnp.sum(logtwopi + self.logvar + jnp.square(sample - self.mean) / self.var, axis=axis) def mode(self): return self.mean @flax_register_to_config class FlaxAutoencoderKL(nn.Module, FlaxModelMixin, ConfigMixin): in_channels: int = 3 out_channels: int = 3 down_block_types: Tuple[str] = ('DownEncoderBlock2D',) up_block_types: Tuple[str] = ('UpDecoderBlock2D',) block_out_channels: Tuple[int] = (64,) layers_per_block: int = 1 act_fn: str = 'silu' latent_channels: int = 4 norm_num_groups: int = 32 sample_size: int = 32 scaling_factor: float = 0.18215 dtype: jnp.dtype = jnp.float32 def setup(self): self.encoder = FlaxEncoder(in_channels=self.config.in_channels, out_channels=self.config.latent_channels, down_block_types=self.config.down_block_types, block_out_channels=self.config.block_out_channels, layers_per_block=self.config.layers_per_block, act_fn=self.config.act_fn, norm_num_groups=self.config.norm_num_groups, double_z=True, dtype=self.dtype) self.decoder = FlaxDecoder(in_channels=self.config.latent_channels, out_channels=self.config.out_channels, up_block_types=self.config.up_block_types, block_out_channels=self.config.block_out_channels, layers_per_block=self.config.layers_per_block, norm_num_groups=self.config.norm_num_groups, act_fn=self.config.act_fn, dtype=self.dtype) self.quant_conv = nn.Conv(2 * self.config.latent_channels, kernel_size=(1, 1), strides=(1, 1), padding='VALID', dtype=self.dtype) self.post_quant_conv = nn.Conv(self.config.latent_channels, kernel_size=(1, 1), strides=(1, 1), padding='VALID', dtype=self.dtype) def init_weights(self, rng: jax.Array) -> FrozenDict: sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) (params_rng, dropout_rng, gaussian_rng) = jax.random.split(rng, 3) rngs = {'params': params_rng, 'dropout': dropout_rng, 'gaussian': gaussian_rng} return self.init(rngs, sample)['params'] def encode(self, sample, deterministic: bool=True, return_dict: bool=True): sample = jnp.transpose(sample, (0, 2, 3, 1)) hidden_states = self.encoder(sample, deterministic=deterministic) moments = self.quant_conv(hidden_states) posterior = FlaxDiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return FlaxAutoencoderKLOutput(latent_dist=posterior) def decode(self, latents, deterministic: bool=True, return_dict: bool=True): if latents.shape[-1] != self.config.latent_channels: latents = jnp.transpose(latents, (0, 2, 3, 1)) hidden_states = self.post_quant_conv(latents) hidden_states = self.decoder(hidden_states, deterministic=deterministic) hidden_states = jnp.transpose(hidden_states, (0, 3, 1, 2)) if not return_dict: return (hidden_states,) return FlaxDecoderOutput(sample=hidden_states) def __call__(self, sample, sample_posterior=False, deterministic: bool=True, return_dict: bool=True): posterior = self.encode(sample, deterministic=deterministic, return_dict=return_dict) if sample_posterior: rng = self.make_rng('gaussian') hidden_states = posterior.latent_dist.sample(rng) else: hidden_states = posterior.latent_dist.mode() sample = self.decode(hidden_states, return_dict=return_dict).sample if not return_dict: return (sample,) return FlaxDecoderOutput(sample=sample) # File: diffusers-main/src/diffusers/models/vq_model.py from ..utils import deprecate from .autoencoders.vq_model import VQEncoderOutput, VQModel class VQEncoderOutput(VQEncoderOutput): def __init__(self, *args, **kwargs): deprecation_message = 'Importing `VQEncoderOutput` from `diffusers.models.vq_model` is deprecated and this will be removed in a future version. Please use `from diffusers.models.autoencoders.vq_model import VQEncoderOutput`, instead.' deprecate('VQEncoderOutput', '0.31', deprecation_message) super().__init__(*args, **kwargs) class VQModel(VQModel): def __init__(self, *args, **kwargs): deprecation_message = 'Importing `VQModel` from `diffusers.models.vq_model` is deprecated and this will be removed in a future version. Please use `from diffusers.models.autoencoders.vq_model import VQModel`, instead.' deprecate('VQModel', '0.31', deprecation_message) super().__init__(*args, **kwargs) # File: diffusers-main/src/diffusers/optimization.py """""" import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging logger = logging.get_logger(__name__) class SchedulerType(Enum): LINEAR = 'linear' COSINE = 'cosine' COSINE_WITH_RESTARTS = 'cosine_with_restarts' POLYNOMIAL = 'polynomial' CONSTANT = 'constant' CONSTANT_WITH_WARMUP = 'constant_with_warmup' PIECEWISE_CONSTANT = 'piecewise_constant' def get_constant_schedule(optimizer: Optimizer, last_epoch: int=-1) -> LambdaLR: return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int=-1) -> LambdaLR: def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) def get_piecewise_constant_schedule(optimizer: Optimizer, step_rules: str, last_epoch: int=-1) -> LambdaLR: rules_dict = {} rule_list = step_rules.split(',') for rule_str in rule_list[:-1]: (value_str, steps_str) = rule_str.split(':') steps = int(steps_str) value = float(value_str) rules_dict[steps] = value last_lr_multiple = float(rule_list[-1]) def create_rules_function(rules_dict, last_lr_multiple): def rule_func(steps: int) -> float: sorted_steps = sorted(rules_dict.keys()) for (i, sorted_step) in enumerate(sorted_steps): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func rules_func = create_rules_function(rules_dict, last_lr_multiple) return LambdaLR(optimizer, rules_func, last_epoch=last_epoch) def get_linear_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, last_epoch: int=-1) -> LambdaLR: def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float=0.5, last_epoch: int=-1) -> LambdaLR: def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_with_hard_restarts_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int=1, last_epoch: int=-1) -> LambdaLR: def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * (float(num_cycles) * progress % 1.0)))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_polynomial_decay_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, lr_end: float=1e-07, power: float=1.0, last_epoch: int=-1) -> LambdaLR: lr_init = optimizer.defaults['lr'] if not lr_init > lr_end: raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})') def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) elif current_step > num_training_steps: return lr_end / lr_init else: lr_range = lr_init - lr_end decay_steps = num_training_steps - num_warmup_steps pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps decay = lr_range * pct_remaining ** power + lr_end return decay / lr_init return LambdaLR(optimizer, lr_lambda, last_epoch) TYPE_TO_SCHEDULER_FUNCTION = {SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule} def get_scheduler(name: Union[str, SchedulerType], optimizer: Optimizer, step_rules: Optional[str]=None, num_warmup_steps: Optional[int]=None, num_training_steps: Optional[int]=None, num_cycles: int=1, power: float=1.0, last_epoch: int=-1) -> LambdaLR: name = SchedulerType(name) schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(optimizer, last_epoch=last_epoch) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(optimizer, step_rules=step_rules, last_epoch=last_epoch) if num_warmup_steps is None: raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.') if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, last_epoch=last_epoch) if num_training_steps is None: raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.') if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles, last_epoch=last_epoch) if name == SchedulerType.POLYNOMIAL: return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, power=power, last_epoch=last_epoch) return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, last_epoch=last_epoch) # File: diffusers-main/src/diffusers/pipelines/__init__.py from typing import TYPE_CHECKING from ..utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_k_diffusion_available, is_librosa_available, is_note_seq_available, is_onnx_available, is_sentencepiece_available, is_torch_available, is_torch_npu_available, is_transformers_available _dummy_objects = {} _import_structure = {'controlnet': [], 'controlnet_hunyuandit': [], 'controlnet_sd3': [], 'controlnet_xs': [], 'deprecated': [], 'latent_diffusion': [], 'ledits_pp': [], 'marigold': [], 'pag': [], 'stable_diffusion': [], 'stable_diffusion_xl': []} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_pt_objects _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) else: _import_structure['auto_pipeline'] = ['AutoPipelineForImage2Image', 'AutoPipelineForInpainting', 'AutoPipelineForText2Image'] _import_structure['consistency_models'] = ['ConsistencyModelPipeline'] _import_structure['dance_diffusion'] = ['DanceDiffusionPipeline'] _import_structure['ddim'] = ['DDIMPipeline'] _import_structure['ddpm'] = ['DDPMPipeline'] _import_structure['dit'] = ['DiTPipeline'] _import_structure['latent_diffusion'].extend(['LDMSuperResolutionPipeline']) _import_structure['pipeline_utils'] = ['AudioPipelineOutput', 'DiffusionPipeline', 'StableDiffusionMixin', 'ImagePipelineOutput'] _import_structure['deprecated'].extend(['PNDMPipeline', 'LDMPipeline', 'RePaintPipeline', 'ScoreSdeVePipeline', 'KarrasVePipeline']) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_torch_and_librosa_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_librosa_objects)) else: _import_structure['deprecated'].extend(['AudioDiffusionPipeline', 'Mel']) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_transformers_and_torch_and_note_seq_objects _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) else: _import_structure['deprecated'].extend(['MidiProcessor', 'SpectrogramDiffusionPipeline']) try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['deprecated'].extend(['VQDiffusionPipeline', 'AltDiffusionPipeline', 'AltDiffusionImg2ImgPipeline', 'CycleDiffusionPipeline', 'StableDiffusionInpaintPipelineLegacy', 'StableDiffusionPix2PixZeroPipeline', 'StableDiffusionParadigmsPipeline', 'StableDiffusionModelEditingPipeline', 'VersatileDiffusionDualGuidedPipeline', 'VersatileDiffusionImageVariationPipeline', 'VersatileDiffusionPipeline', 'VersatileDiffusionTextToImagePipeline']) _import_structure['amused'] = ['AmusedImg2ImgPipeline', 'AmusedInpaintPipeline', 'AmusedPipeline'] _import_structure['animatediff'] = ['AnimateDiffPipeline', 'AnimateDiffControlNetPipeline', 'AnimateDiffSDXLPipeline', 'AnimateDiffSparseControlNetPipeline', 'AnimateDiffVideoToVideoPipeline', 'AnimateDiffVideoToVideoControlNetPipeline'] _import_structure['flux'] = ['FluxControlNetPipeline', 'FluxImg2ImgPipeline', 'FluxInpaintPipeline', 'FluxPipeline'] _import_structure['audioldm'] = ['AudioLDMPipeline'] _import_structure['audioldm2'] = ['AudioLDM2Pipeline', 'AudioLDM2ProjectionModel', 'AudioLDM2UNet2DConditionModel'] _import_structure['blip_diffusion'] = ['BlipDiffusionPipeline'] _import_structure['cogvideo'] = ['CogVideoXPipeline', 'CogVideoXVideoToVideoPipeline'] _import_structure['controlnet'].extend(['BlipDiffusionControlNetPipeline', 'StableDiffusionControlNetImg2ImgPipeline', 'StableDiffusionControlNetInpaintPipeline', 'StableDiffusionControlNetPipeline', 'StableDiffusionXLControlNetImg2ImgPipeline', 'StableDiffusionXLControlNetInpaintPipeline', 'StableDiffusionXLControlNetPipeline']) _import_structure['pag'].extend(['AnimateDiffPAGPipeline', 'KolorsPAGPipeline', 'HunyuanDiTPAGPipeline', 'StableDiffusion3PAGPipeline', 'StableDiffusionPAGPipeline', 'StableDiffusionControlNetPAGPipeline', 'StableDiffusionXLPAGPipeline', 'StableDiffusionXLPAGInpaintPipeline', 'StableDiffusionXLControlNetPAGImg2ImgPipeline', 'StableDiffusionXLControlNetPAGPipeline', 'StableDiffusionXLPAGImg2ImgPipeline', 'PixArtSigmaPAGPipeline']) _import_structure['controlnet_xs'].extend(['StableDiffusionControlNetXSPipeline', 'StableDiffusionXLControlNetXSPipeline']) _import_structure['controlnet_hunyuandit'].extend(['HunyuanDiTControlNetPipeline']) _import_structure['controlnet_sd3'].extend(['StableDiffusion3ControlNetPipeline', 'StableDiffusion3ControlNetInpaintingPipeline']) _import_structure['deepfloyd_if'] = ['IFImg2ImgPipeline', 'IFImg2ImgSuperResolutionPipeline', 'IFInpaintingPipeline', 'IFInpaintingSuperResolutionPipeline', 'IFPipeline', 'IFSuperResolutionPipeline'] _import_structure['hunyuandit'] = ['HunyuanDiTPipeline'] _import_structure['kandinsky'] = ['KandinskyCombinedPipeline', 'KandinskyImg2ImgCombinedPipeline', 'KandinskyImg2ImgPipeline', 'KandinskyInpaintCombinedPipeline', 'KandinskyInpaintPipeline', 'KandinskyPipeline', 'KandinskyPriorPipeline'] _import_structure['kandinsky2_2'] = ['KandinskyV22CombinedPipeline', 'KandinskyV22ControlnetImg2ImgPipeline', 'KandinskyV22ControlnetPipeline', 'KandinskyV22Img2ImgCombinedPipeline', 'KandinskyV22Img2ImgPipeline', 'KandinskyV22InpaintCombinedPipeline', 'KandinskyV22InpaintPipeline', 'KandinskyV22Pipeline', 'KandinskyV22PriorEmb2EmbPipeline', 'KandinskyV22PriorPipeline'] _import_structure['kandinsky3'] = ['Kandinsky3Img2ImgPipeline', 'Kandinsky3Pipeline'] _import_structure['latent_consistency_models'] = ['LatentConsistencyModelImg2ImgPipeline', 'LatentConsistencyModelPipeline'] _import_structure['latent_diffusion'].extend(['LDMTextToImagePipeline']) _import_structure['ledits_pp'].extend(['LEditsPPPipelineStableDiffusion', 'LEditsPPPipelineStableDiffusionXL']) _import_structure['latte'] = ['LattePipeline'] _import_structure['lumina'] = ['LuminaText2ImgPipeline'] _import_structure['marigold'].extend(['MarigoldDepthPipeline', 'MarigoldNormalsPipeline']) _import_structure['musicldm'] = ['MusicLDMPipeline'] _import_structure['paint_by_example'] = ['PaintByExamplePipeline'] _import_structure['pia'] = ['PIAPipeline'] _import_structure['pixart_alpha'] = ['PixArtAlphaPipeline', 'PixArtSigmaPipeline'] _import_structure['semantic_stable_diffusion'] = ['SemanticStableDiffusionPipeline'] _import_structure['shap_e'] = ['ShapEImg2ImgPipeline', 'ShapEPipeline'] _import_structure['stable_audio'] = ['StableAudioProjectionModel', 'StableAudioPipeline'] _import_structure['stable_cascade'] = ['StableCascadeCombinedPipeline', 'StableCascadeDecoderPipeline', 'StableCascadePriorPipeline'] _import_structure['stable_diffusion'].extend(['CLIPImageProjection', 'StableDiffusionDepth2ImgPipeline', 'StableDiffusionImageVariationPipeline', 'StableDiffusionImg2ImgPipeline', 'StableDiffusionInpaintPipeline', 'StableDiffusionInstructPix2PixPipeline', 'StableDiffusionLatentUpscalePipeline', 'StableDiffusionPipeline', 'StableDiffusionUpscalePipeline', 'StableUnCLIPImg2ImgPipeline', 'StableUnCLIPPipeline', 'StableDiffusionLDM3DPipeline']) _import_structure['aura_flow'] = ['AuraFlowPipeline'] _import_structure['stable_diffusion_3'] = ['StableDiffusion3Pipeline', 'StableDiffusion3Img2ImgPipeline', 'StableDiffusion3InpaintPipeline'] _import_structure['stable_diffusion_attend_and_excite'] = ['StableDiffusionAttendAndExcitePipeline'] _import_structure['stable_diffusion_safe'] = ['StableDiffusionPipelineSafe'] _import_structure['stable_diffusion_sag'] = ['StableDiffusionSAGPipeline'] _import_structure['stable_diffusion_gligen'] = ['StableDiffusionGLIGENPipeline', 'StableDiffusionGLIGENTextImagePipeline'] _import_structure['stable_video_diffusion'] = ['StableVideoDiffusionPipeline'] _import_structure['stable_diffusion_xl'].extend(['StableDiffusionXLImg2ImgPipeline', 'StableDiffusionXLInpaintPipeline', 'StableDiffusionXLInstructPix2PixPipeline', 'StableDiffusionXLPipeline']) _import_structure['stable_diffusion_diffedit'] = ['StableDiffusionDiffEditPipeline'] _import_structure['stable_diffusion_ldm3d'] = ['StableDiffusionLDM3DPipeline'] _import_structure['stable_diffusion_panorama'] = ['StableDiffusionPanoramaPipeline'] _import_structure['t2i_adapter'] = ['StableDiffusionAdapterPipeline', 'StableDiffusionXLAdapterPipeline'] _import_structure['text_to_video_synthesis'] = ['TextToVideoSDPipeline', 'TextToVideoZeroPipeline', 'TextToVideoZeroSDXLPipeline', 'VideoToVideoSDPipeline'] _import_structure['i2vgen_xl'] = ['I2VGenXLPipeline'] _import_structure['unclip'] = ['UnCLIPImageVariationPipeline', 'UnCLIPPipeline'] _import_structure['unidiffuser'] = ['ImageTextPipelineOutput', 'UniDiffuserModel', 'UniDiffuserPipeline', 'UniDiffuserTextDecoder'] _import_structure['wuerstchen'] = ['WuerstchenCombinedPipeline', 'WuerstchenDecoderPipeline', 'WuerstchenPriorPipeline'] try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_onnx_objects _dummy_objects.update(get_objects_from_module(dummy_onnx_objects)) else: _import_structure['onnx_utils'] = ['OnnxRuntimeModel'] try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_torch_and_transformers_and_onnx_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_onnx_objects)) else: _import_structure['stable_diffusion'].extend(['OnnxStableDiffusionImg2ImgPipeline', 'OnnxStableDiffusionInpaintPipeline', 'OnnxStableDiffusionPipeline', 'OnnxStableDiffusionUpscalePipeline', 'StableDiffusionOnnxPipeline']) try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_torch_and_transformers_and_k_diffusion_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_k_diffusion_objects)) else: _import_structure['stable_diffusion_k_diffusion'] = ['StableDiffusionKDiffusionPipeline', 'StableDiffusionXLKDiffusionPipeline'] try: if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_torch_and_transformers_and_sentencepiece_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_sentencepiece_objects)) else: _import_structure['kolors'] = ['KolorsPipeline', 'KolorsImg2ImgPipeline'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_flax_objects _dummy_objects.update(get_objects_from_module(dummy_flax_objects)) else: _import_structure['pipeline_flax_utils'] = ['FlaxDiffusionPipeline'] try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_flax_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) else: _import_structure['controlnet'].extend(['FlaxStableDiffusionControlNetPipeline']) _import_structure['stable_diffusion'].extend(['FlaxStableDiffusionImg2ImgPipeline', 'FlaxStableDiffusionInpaintPipeline', 'FlaxStableDiffusionPipeline']) _import_structure['stable_diffusion_xl'].extend(['FlaxStableDiffusionXLPipeline']) if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * else: from .auto_pipeline import AutoPipelineForImage2Image, AutoPipelineForInpainting, AutoPipelineForText2Image from .consistency_models import ConsistencyModelPipeline from .dance_diffusion import DanceDiffusionPipeline from .ddim import DDIMPipeline from .ddpm import DDPMPipeline from .deprecated import KarrasVePipeline, LDMPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline from .dit import DiTPipeline from .latent_diffusion import LDMSuperResolutionPipeline from .pipeline_utils import AudioPipelineOutput, DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_librosa_objects import * else: from .deprecated import AudioDiffusionPipeline, Mel try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_transformers_objects import * else: from .amused import AmusedImg2ImgPipeline, AmusedInpaintPipeline, AmusedPipeline from .animatediff import AnimateDiffControlNetPipeline, AnimateDiffPipeline, AnimateDiffSDXLPipeline, AnimateDiffSparseControlNetPipeline, AnimateDiffVideoToVideoControlNetPipeline, AnimateDiffVideoToVideoPipeline from .audioldm import AudioLDMPipeline from .audioldm2 import AudioLDM2Pipeline, AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel from .aura_flow import AuraFlowPipeline from .blip_diffusion import BlipDiffusionPipeline from .cogvideo import CogVideoXPipeline, CogVideoXVideoToVideoPipeline from .controlnet import BlipDiffusionControlNetPipeline, StableDiffusionControlNetImg2ImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionXLControlNetImg2ImgPipeline, StableDiffusionXLControlNetInpaintPipeline, StableDiffusionXLControlNetPipeline from .controlnet_hunyuandit import HunyuanDiTControlNetPipeline from .controlnet_sd3 import StableDiffusion3ControlNetInpaintingPipeline, StableDiffusion3ControlNetPipeline from .controlnet_xs import StableDiffusionControlNetXSPipeline, StableDiffusionXLControlNetXSPipeline from .deepfloyd_if import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline from .deprecated import AltDiffusionImg2ImgPipeline, AltDiffusionPipeline, CycleDiffusionPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionModelEditingPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPix2PixZeroPipeline, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VQDiffusionPipeline from .flux import FluxControlNetPipeline, FluxImg2ImgPipeline, FluxInpaintPipeline, FluxPipeline from .hunyuandit import HunyuanDiTPipeline from .i2vgen_xl import I2VGenXLPipeline from .kandinsky import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyImg2ImgPipeline, KandinskyInpaintCombinedPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline from .kandinsky2_2 import KandinskyV22CombinedPipeline, KandinskyV22ControlnetImg2ImgPipeline, KandinskyV22ControlnetPipeline, KandinskyV22Img2ImgCombinedPipeline, KandinskyV22Img2ImgPipeline, KandinskyV22InpaintCombinedPipeline, KandinskyV22InpaintPipeline, KandinskyV22Pipeline, KandinskyV22PriorEmb2EmbPipeline, KandinskyV22PriorPipeline from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline from .latent_diffusion import LDMTextToImagePipeline from .latte import LattePipeline from .ledits_pp import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput, LEditsPPPipelineStableDiffusion, LEditsPPPipelineStableDiffusionXL from .lumina import LuminaText2ImgPipeline from .marigold import MarigoldDepthPipeline, MarigoldNormalsPipeline from .musicldm import MusicLDMPipeline from .pag import AnimateDiffPAGPipeline, HunyuanDiTPAGPipeline, KolorsPAGPipeline, PixArtSigmaPAGPipeline, StableDiffusion3PAGPipeline, StableDiffusionControlNetPAGPipeline, StableDiffusionPAGPipeline, StableDiffusionXLControlNetPAGImg2ImgPipeline, StableDiffusionXLControlNetPAGPipeline, StableDiffusionXLPAGImg2ImgPipeline, StableDiffusionXLPAGInpaintPipeline, StableDiffusionXLPAGPipeline from .paint_by_example import PaintByExamplePipeline from .pia import PIAPipeline from .pixart_alpha import PixArtAlphaPipeline, PixArtSigmaPipeline from .semantic_stable_diffusion import SemanticStableDiffusionPipeline from .shap_e import ShapEImg2ImgPipeline, ShapEPipeline from .stable_audio import StableAudioPipeline, StableAudioProjectionModel from .stable_cascade import StableCascadeCombinedPipeline, StableCascadeDecoderPipeline, StableCascadePriorPipeline from .stable_diffusion import CLIPImageProjection, StableDiffusionDepth2ImgPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInstructPix2PixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImg2ImgPipeline, StableUnCLIPPipeline from .stable_diffusion_3 import StableDiffusion3Img2ImgPipeline, StableDiffusion3InpaintPipeline, StableDiffusion3Pipeline from .stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .stable_diffusion_gligen import StableDiffusionGLIGENPipeline, StableDiffusionGLIGENTextImagePipeline from .stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline from .stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .stable_diffusion_safe import StableDiffusionPipelineSafe from .stable_diffusion_sag import StableDiffusionSAGPipeline from .stable_diffusion_xl import StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLInstructPix2PixPipeline, StableDiffusionXLPipeline from .stable_video_diffusion import StableVideoDiffusionPipeline from .t2i_adapter import StableDiffusionAdapterPipeline, StableDiffusionXLAdapterPipeline from .text_to_video_synthesis import TextToVideoSDPipeline, TextToVideoZeroPipeline, TextToVideoZeroSDXLPipeline, VideoToVideoSDPipeline from .unclip import UnCLIPImageVariationPipeline, UnCLIPPipeline from .unidiffuser import ImageTextPipelineOutput, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder from .wuerstchen import WuerstchenCombinedPipeline, WuerstchenDecoderPipeline, WuerstchenPriorPipeline try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_onnx_objects import * else: from .onnx_utils import OnnxRuntimeModel try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_transformers_and_onnx_objects import * else: from .stable_diffusion import OnnxStableDiffusionImg2ImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_transformers_and_k_diffusion_objects import * else: from .stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline, StableDiffusionXLKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_transformers_and_sentencepiece_objects import * else: from .kolors import KolorsImg2ImgPipeline, KolorsPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * else: from .pipeline_flax_utils import FlaxDiffusionPipeline try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_and_transformers_objects import * else: from .controlnet import FlaxStableDiffusionControlNetPipeline from .stable_diffusion import FlaxStableDiffusionImg2ImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline from .stable_diffusion_xl import FlaxStableDiffusionXLPipeline try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_transformers_and_torch_and_note_seq_objects import * else: from .deprecated import MidiProcessor, SpectrogramDiffusionPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/amused/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import AmusedImg2ImgPipeline, AmusedInpaintPipeline, AmusedPipeline _dummy_objects.update({'AmusedPipeline': AmusedPipeline, 'AmusedImg2ImgPipeline': AmusedImg2ImgPipeline, 'AmusedInpaintPipeline': AmusedInpaintPipeline}) else: _import_structure['pipeline_amused'] = ['AmusedPipeline'] _import_structure['pipeline_amused_img2img'] = ['AmusedImg2ImgPipeline'] _import_structure['pipeline_amused_inpaint'] = ['AmusedInpaintPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import AmusedPipeline else: from .pipeline_amused import AmusedPipeline from .pipeline_amused_img2img import AmusedImg2ImgPipeline from .pipeline_amused_inpaint import AmusedInpaintPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/amused/pipeline_amused.py from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer from ...image_processor import VaeImageProcessor from ...models import UVit2DModel, VQModel from ...schedulers import AmusedScheduler from ...utils import replace_example_docstring from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import AmusedPipeline\n\n >>> pipe = AmusedPipeline.from_pretrained("amused/amused-512", variant="fp16", torch_dtype=torch.float16)\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "a photo of an astronaut riding a horse on mars"\n >>> image = pipe(prompt).images[0]\n ```\n' class AmusedPipeline(DiffusionPipeline): image_processor: VaeImageProcessor vqvae: VQModel tokenizer: CLIPTokenizer text_encoder: CLIPTextModelWithProjection transformer: UVit2DModel scheduler: AmusedScheduler model_cpu_offload_seq = 'text_encoder->transformer->vqvae' def __init__(self, vqvae: VQModel, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, transformer: UVit2DModel, scheduler: AmusedScheduler): super().__init__() self.register_modules(vqvae=vqvae, tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[List[str], str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=12, guidance_scale: float=10.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[torch.Generator]=None, latents: Optional[torch.IntTensor]=None, prompt_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_encoder_hidden_states: Optional[torch.Tensor]=None, output_type='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, micro_conditioning_aesthetic_score: int=6, micro_conditioning_crop_coord: Tuple[int, int]=(0, 0), temperature: Union[int, Tuple[int, int], List[int]]=(2, 0)): if prompt_embeds is not None and encoder_hidden_states is None or (prompt_embeds is None and encoder_hidden_states is not None): raise ValueError('pass either both `prompt_embeds` and `encoder_hidden_states` or neither') if negative_prompt_embeds is not None and negative_encoder_hidden_states is None or (negative_prompt_embeds is None and negative_encoder_hidden_states is not None): raise ValueError('pass either both `negatve_prompt_embeds` and `negative_encoder_hidden_states` or neither') if prompt is None and prompt_embeds is None or (prompt is not None and prompt_embeds is not None): raise ValueError('pass only one of `prompt` or `prompt_embeds`') if isinstance(prompt, str): prompt = [prompt] if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] batch_size = batch_size * num_images_per_prompt if height is None: height = self.transformer.config.sample_size * self.vae_scale_factor if width is None: width = self.transformer.config.sample_size * self.vae_scale_factor if prompt_embeds is None: input_ids = self.tokenizer(prompt, return_tensors='pt', padding='max_length', truncation=True, max_length=self.tokenizer.model_max_length).input_ids.to(self._execution_device) outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) prompt_embeds = outputs.text_embeds encoder_hidden_states = outputs.hidden_states[-2] prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1) encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) if guidance_scale > 1.0: if negative_prompt_embeds is None: if negative_prompt is None: negative_prompt = [''] * len(prompt) if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] input_ids = self.tokenizer(negative_prompt, return_tensors='pt', padding='max_length', truncation=True, max_length=self.tokenizer.model_max_length).input_ids.to(self._execution_device) outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) negative_prompt_embeds = outputs.text_embeds negative_encoder_hidden_states = outputs.hidden_states[-2] negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1) negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds]) encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states]) micro_conds = torch.tensor([width, height, micro_conditioning_crop_coord[0], micro_conditioning_crop_coord[1], micro_conditioning_aesthetic_score], device=self._execution_device, dtype=encoder_hidden_states.dtype) micro_conds = micro_conds.unsqueeze(0) micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1) shape = (batch_size, height // self.vae_scale_factor, width // self.vae_scale_factor) if latents is None: latents = torch.full(shape, self.scheduler.config.mask_token_id, dtype=torch.long, device=self._execution_device) self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device) num_warmup_steps = len(self.scheduler.timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, timestep) in enumerate(self.scheduler.timesteps): if guidance_scale > 1.0: model_input = torch.cat([latents] * 2) else: model_input = latents model_output = self.transformer(model_input, micro_conds=micro_conds, pooled_text_emb=prompt_embeds, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs) if guidance_scale > 1.0: (uncond_logits, cond_logits) = model_output.chunk(2) model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits) latents = self.scheduler.step(model_output=model_output, timestep=timestep, sample=latents, generator=generator).prev_sample if i == len(self.scheduler.timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, timestep, latents) if output_type == 'latent': output = latents else: needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast if needs_upcasting: self.vqvae.float() output = self.vqvae.decode(latents, force_not_quantize=True, shape=(batch_size, height // self.vae_scale_factor, width // self.vae_scale_factor, self.vqvae.config.latent_channels)).sample.clip(0, 1) output = self.image_processor.postprocess(output, output_type) if needs_upcasting: self.vqvae.half() self.maybe_free_model_hooks() if not return_dict: return (output,) return ImagePipelineOutput(output) # File: diffusers-main/src/diffusers/pipelines/amused/pipeline_amused_img2img.py from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer from ...image_processor import PipelineImageInput, VaeImageProcessor from ...models import UVit2DModel, VQModel from ...schedulers import AmusedScheduler from ...utils import replace_example_docstring from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import AmusedImg2ImgPipeline\n >>> from diffusers.utils import load_image\n\n >>> pipe = AmusedImg2ImgPipeline.from_pretrained(\n ... "amused/amused-512", variant="fp16", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "winter mountains"\n >>> input_image = (\n ... load_image(\n ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg"\n ... )\n ... .resize((512, 512))\n ... .convert("RGB")\n ... )\n >>> image = pipe(prompt, input_image).images[0]\n ```\n' class AmusedImg2ImgPipeline(DiffusionPipeline): image_processor: VaeImageProcessor vqvae: VQModel tokenizer: CLIPTokenizer text_encoder: CLIPTextModelWithProjection transformer: UVit2DModel scheduler: AmusedScheduler model_cpu_offload_seq = 'text_encoder->transformer->vqvae' _exclude_from_cpu_offload = ['vqvae'] def __init__(self, vqvae: VQModel, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, transformer: UVit2DModel, scheduler: AmusedScheduler): super().__init__() self.register_modules(vqvae=vqvae, tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[List[str], str]]=None, image: PipelineImageInput=None, strength: float=0.5, num_inference_steps: int=12, guidance_scale: float=10.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[torch.Generator]=None, prompt_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_encoder_hidden_states: Optional[torch.Tensor]=None, output_type='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, micro_conditioning_aesthetic_score: int=6, micro_conditioning_crop_coord: Tuple[int, int]=(0, 0), temperature: Union[int, Tuple[int, int], List[int]]=(2, 0)): if prompt_embeds is not None and encoder_hidden_states is None or (prompt_embeds is None and encoder_hidden_states is not None): raise ValueError('pass either both `prompt_embeds` and `encoder_hidden_states` or neither') if negative_prompt_embeds is not None and negative_encoder_hidden_states is None or (negative_prompt_embeds is None and negative_encoder_hidden_states is not None): raise ValueError('pass either both `negative_prompt_embeds` and `negative_encoder_hidden_states` or neither') if prompt is None and prompt_embeds is None or (prompt is not None and prompt_embeds is not None): raise ValueError('pass only one of `prompt` or `prompt_embeds`') if isinstance(prompt, str): prompt = [prompt] if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] batch_size = batch_size * num_images_per_prompt if prompt_embeds is None: input_ids = self.tokenizer(prompt, return_tensors='pt', padding='max_length', truncation=True, max_length=self.tokenizer.model_max_length).input_ids.to(self._execution_device) outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) prompt_embeds = outputs.text_embeds encoder_hidden_states = outputs.hidden_states[-2] prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1) encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) if guidance_scale > 1.0: if negative_prompt_embeds is None: if negative_prompt is None: negative_prompt = [''] * len(prompt) if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] input_ids = self.tokenizer(negative_prompt, return_tensors='pt', padding='max_length', truncation=True, max_length=self.tokenizer.model_max_length).input_ids.to(self._execution_device) outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) negative_prompt_embeds = outputs.text_embeds negative_encoder_hidden_states = outputs.hidden_states[-2] negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1) negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds]) encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states]) image = self.image_processor.preprocess(image) (height, width) = image.shape[-2:] micro_conds = torch.tensor([width, height, micro_conditioning_crop_coord[0], micro_conditioning_crop_coord[1], micro_conditioning_aesthetic_score], device=self._execution_device, dtype=encoder_hidden_states.dtype) micro_conds = micro_conds.unsqueeze(0) micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1) self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device) num_inference_steps = int(len(self.scheduler.timesteps) * strength) start_timestep_idx = len(self.scheduler.timesteps) - num_inference_steps needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast if needs_upcasting: self.vqvae.float() latents = self.vqvae.encode(image.to(dtype=self.vqvae.dtype, device=self._execution_device)).latents (latents_bsz, channels, latents_height, latents_width) = latents.shape latents = self.vqvae.quantize(latents)[2][2].reshape(latents_bsz, latents_height, latents_width) latents = self.scheduler.add_noise(latents, self.scheduler.timesteps[start_timestep_idx - 1], generator=generator) latents = latents.repeat(num_images_per_prompt, 1, 1) with self.progress_bar(total=num_inference_steps) as progress_bar: for i in range(start_timestep_idx, len(self.scheduler.timesteps)): timestep = self.scheduler.timesteps[i] if guidance_scale > 1.0: model_input = torch.cat([latents] * 2) else: model_input = latents model_output = self.transformer(model_input, micro_conds=micro_conds, pooled_text_emb=prompt_embeds, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs) if guidance_scale > 1.0: (uncond_logits, cond_logits) = model_output.chunk(2) model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits) latents = self.scheduler.step(model_output=model_output, timestep=timestep, sample=latents, generator=generator).prev_sample if i == len(self.scheduler.timesteps) - 1 or (i + 1) % self.scheduler.order == 0: progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, timestep, latents) if output_type == 'latent': output = latents else: output = self.vqvae.decode(latents, force_not_quantize=True, shape=(batch_size, height // self.vae_scale_factor, width // self.vae_scale_factor, self.vqvae.config.latent_channels)).sample.clip(0, 1) output = self.image_processor.postprocess(output, output_type) if needs_upcasting: self.vqvae.half() self.maybe_free_model_hooks() if not return_dict: return (output,) return ImagePipelineOutput(output) # File: diffusers-main/src/diffusers/pipelines/amused/pipeline_amused_inpaint.py from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer from ...image_processor import PipelineImageInput, VaeImageProcessor from ...models import UVit2DModel, VQModel from ...schedulers import AmusedScheduler from ...utils import replace_example_docstring from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import AmusedInpaintPipeline\n >>> from diffusers.utils import load_image\n\n >>> pipe = AmusedInpaintPipeline.from_pretrained(\n ... "amused/amused-512", variant="fp16", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "fall mountains"\n >>> input_image = (\n ... load_image(\n ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1.jpg"\n ... )\n ... .resize((512, 512))\n ... .convert("RGB")\n ... )\n >>> mask = (\n ... load_image(\n ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1_mask.png"\n ... )\n ... .resize((512, 512))\n ... .convert("L")\n ... )\n >>> pipe(prompt, input_image, mask).images[0].save("out.png")\n ```\n' class AmusedInpaintPipeline(DiffusionPipeline): image_processor: VaeImageProcessor vqvae: VQModel tokenizer: CLIPTokenizer text_encoder: CLIPTextModelWithProjection transformer: UVit2DModel scheduler: AmusedScheduler model_cpu_offload_seq = 'text_encoder->transformer->vqvae' _exclude_from_cpu_offload = ['vqvae'] def __init__(self, vqvae: VQModel, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, transformer: UVit2DModel, scheduler: AmusedScheduler): super().__init__() self.register_modules(vqvae=vqvae, tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False) self.mask_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True, do_resize=True) self.scheduler.register_to_config(masking_schedule='linear') @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[List[str], str]]=None, image: PipelineImageInput=None, mask_image: PipelineImageInput=None, strength: float=1.0, num_inference_steps: int=12, guidance_scale: float=10.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[torch.Generator]=None, prompt_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_encoder_hidden_states: Optional[torch.Tensor]=None, output_type='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, micro_conditioning_aesthetic_score: int=6, micro_conditioning_crop_coord: Tuple[int, int]=(0, 0), temperature: Union[int, Tuple[int, int], List[int]]=(2, 0)): if prompt_embeds is not None and encoder_hidden_states is None or (prompt_embeds is None and encoder_hidden_states is not None): raise ValueError('pass either both `prompt_embeds` and `encoder_hidden_states` or neither') if negative_prompt_embeds is not None and negative_encoder_hidden_states is None or (negative_prompt_embeds is None and negative_encoder_hidden_states is not None): raise ValueError('pass either both `negatve_prompt_embeds` and `negative_encoder_hidden_states` or neither') if prompt is None and prompt_embeds is None or (prompt is not None and prompt_embeds is not None): raise ValueError('pass only one of `prompt` or `prompt_embeds`') if isinstance(prompt, str): prompt = [prompt] if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] batch_size = batch_size * num_images_per_prompt if prompt_embeds is None: input_ids = self.tokenizer(prompt, return_tensors='pt', padding='max_length', truncation=True, max_length=self.tokenizer.model_max_length).input_ids.to(self._execution_device) outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) prompt_embeds = outputs.text_embeds encoder_hidden_states = outputs.hidden_states[-2] prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1) encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) if guidance_scale > 1.0: if negative_prompt_embeds is None: if negative_prompt is None: negative_prompt = [''] * len(prompt) if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] input_ids = self.tokenizer(negative_prompt, return_tensors='pt', padding='max_length', truncation=True, max_length=self.tokenizer.model_max_length).input_ids.to(self._execution_device) outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) negative_prompt_embeds = outputs.text_embeds negative_encoder_hidden_states = outputs.hidden_states[-2] negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1) negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds]) encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states]) image = self.image_processor.preprocess(image) (height, width) = image.shape[-2:] micro_conds = torch.tensor([width, height, micro_conditioning_crop_coord[0], micro_conditioning_crop_coord[1], micro_conditioning_aesthetic_score], device=self._execution_device, dtype=encoder_hidden_states.dtype) micro_conds = micro_conds.unsqueeze(0) micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1) self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device) num_inference_steps = int(len(self.scheduler.timesteps) * strength) start_timestep_idx = len(self.scheduler.timesteps) - num_inference_steps needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast if needs_upcasting: self.vqvae.float() latents = self.vqvae.encode(image.to(dtype=self.vqvae.dtype, device=self._execution_device)).latents (latents_bsz, channels, latents_height, latents_width) = latents.shape latents = self.vqvae.quantize(latents)[2][2].reshape(latents_bsz, latents_height, latents_width) mask = self.mask_processor.preprocess(mask_image, height // self.vae_scale_factor, width // self.vae_scale_factor) mask = mask.reshape(mask.shape[0], latents_height, latents_width).bool().to(latents.device) latents[mask] = self.scheduler.config.mask_token_id starting_mask_ratio = mask.sum() / latents.numel() latents = latents.repeat(num_images_per_prompt, 1, 1) with self.progress_bar(total=num_inference_steps) as progress_bar: for i in range(start_timestep_idx, len(self.scheduler.timesteps)): timestep = self.scheduler.timesteps[i] if guidance_scale > 1.0: model_input = torch.cat([latents] * 2) else: model_input = latents model_output = self.transformer(model_input, micro_conds=micro_conds, pooled_text_emb=prompt_embeds, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs) if guidance_scale > 1.0: (uncond_logits, cond_logits) = model_output.chunk(2) model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits) latents = self.scheduler.step(model_output=model_output, timestep=timestep, sample=latents, generator=generator, starting_mask_ratio=starting_mask_ratio).prev_sample if i == len(self.scheduler.timesteps) - 1 or (i + 1) % self.scheduler.order == 0: progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, timestep, latents) if output_type == 'latent': output = latents else: output = self.vqvae.decode(latents, force_not_quantize=True, shape=(batch_size, height // self.vae_scale_factor, width // self.vae_scale_factor, self.vqvae.config.latent_channels)).sample.clip(0, 1) output = self.image_processor.postprocess(output, output_type) if needs_upcasting: self.vqvae.half() self.maybe_free_model_hooks() if not return_dict: return (output,) return ImagePipelineOutput(output) # File: diffusers-main/src/diffusers/pipelines/animatediff/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {'pipeline_output': ['AnimateDiffPipelineOutput']} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_animatediff'] = ['AnimateDiffPipeline'] _import_structure['pipeline_animatediff_controlnet'] = ['AnimateDiffControlNetPipeline'] _import_structure['pipeline_animatediff_sdxl'] = ['AnimateDiffSDXLPipeline'] _import_structure['pipeline_animatediff_sparsectrl'] = ['AnimateDiffSparseControlNetPipeline'] _import_structure['pipeline_animatediff_video2video'] = ['AnimateDiffVideoToVideoPipeline'] _import_structure['pipeline_animatediff_video2video_controlnet'] = ['AnimateDiffVideoToVideoControlNetPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_animatediff import AnimateDiffPipeline from .pipeline_animatediff_controlnet import AnimateDiffControlNetPipeline from .pipeline_animatediff_sdxl import AnimateDiffSDXLPipeline from .pipeline_animatediff_sparsectrl import AnimateDiffSparseControlNetPipeline from .pipeline_animatediff_video2video import AnimateDiffVideoToVideoPipeline from .pipeline_animatediff_video2video_controlnet import AnimateDiffVideoToVideoControlNetPipeline from .pipeline_output import AnimateDiffPipelineOutput else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/animatediff/pipeline_animatediff.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel from ...models.lora import adjust_lora_scale_text_encoder from ...models.unets.unet_motion_model import MotionAdapter from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..free_init_utils import FreeInitMixin from ..free_noise_utils import AnimateDiffFreeNoiseMixin from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import AnimateDiffPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler\n >>> from diffusers.utils import export_to_gif\n\n >>> adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")\n >>> pipe = AnimateDiffPipeline.from_pretrained("frankjoshua/toonyou_beta6", motion_adapter=adapter)\n >>> pipe.scheduler = DDIMScheduler(beta_schedule="linear", steps_offset=1, clip_sample=False)\n >>> output = pipe(prompt="A corgi walking in the park")\n >>> frames = output.frames[0]\n >>> export_to_gif(frames, "animation.gif")\n ```\n' class AnimateDiffPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, FreeInitMixin, AnimateDiffFreeNoiseMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['feature_extractor', 'image_encoder', 'motion_adapter'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: Union[UNet2DConditionModel, UNetMotionModel], motion_adapter: MotionAdapter, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler], feature_extractor: CLIPImageProcessor=None, image_encoder: CLIPVisionModelWithProjection=None): super().__init__() if isinstance(unet, UNet2DConditionModel): unet = UNetMotionModel.from_unet2d(unet, motion_adapter) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, motion_adapter=motion_adapter, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor) def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def decode_latents(self, latents, decode_chunk_size: int=16): latents = 1 / self.vae.config.scaling_factor * latents (batch_size, channels, num_frames, height, width) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) video = [] for i in range(0, latents.shape[0], decode_chunk_size): batch_latents = latents[i:i + decode_chunk_size] batch_latents = self.vae.decode(batch_latents).sample video.append(batch_latents) video = torch.cat(video) video = video[None, :].reshape((batch_size, num_frames, -1) + video.shape[2:]).permute(0, 2, 1, 3, 4) video = video.float() return video def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, (str, list, dict))): raise ValueError(f'`prompt` has to be of type `str`, `list` or `dict` but is type(prompt)={type(prompt)!r}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_latents(self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None): if self.free_noise_enabled: latents = self._prepare_latents_free_noise(batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[str, List[str]]]=None, num_frames: Optional[int]=16, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_videos_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], decode_chunk_size: int=16, **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_videos_per_prompt = 1 self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, (str, dict)): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None if self.free_noise_enabled: (prompt_embeds, negative_prompt_embeds) = self._encode_prompt_free_noise(prompt=prompt, num_frames=num_frames, device=device, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) else: (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_videos_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt, self.do_classifier_free_guidance) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_videos_per_prompt, num_channels_latents, num_frames, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 for free_init_iter in range(num_free_init_iters): if self.free_init_enabled: (latents, timesteps) = self._apply_free_init(latents, free_init_iter, num_inference_steps, device, latents.dtype, generator) self._num_timesteps = len(timesteps) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=self._num_timesteps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs).sample if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) if output_type == 'latent': video = latents else: video_tensor = self.decode_latents(latents, decode_chunk_size) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (video,) return AnimateDiffPipelineOutput(frames=video) # File: diffusers-main/src/diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel, UNetMotionModel from ...models.lora import adjust_lora_scale_text_encoder from ...models.unets.unet_motion_model import MotionAdapter from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, randn_tensor from ...video_processor import VideoProcessor from ..controlnet.multicontrolnet import MultiControlNetModel from ..free_init_utils import FreeInitMixin from ..free_noise_utils import AnimateDiffFreeNoiseMixin from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import AnimateDiffPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import (\n ... AnimateDiffControlNetPipeline,\n ... AutoencoderKL,\n ... ControlNetModel,\n ... MotionAdapter,\n ... LCMScheduler,\n ... )\n >>> from diffusers.utils import export_to_gif, load_video\n\n >>> # Additionally, you will need a preprocess videos before they can be used with the ControlNet\n >>> # HF maintains just the right package for it: `pip install controlnet_aux`\n >>> from controlnet_aux.processor import ZoeDetector\n\n >>> # Download controlnets from https://huggingface.co/lllyasviel/ControlNet-v1-1 to use .from_single_file\n >>> # Download Diffusers-format controlnets, such as https://huggingface.co/lllyasviel/sd-controlnet-depth, to use .from_pretrained()\n >>> controlnet = ControlNetModel.from_single_file("control_v11f1p_sd15_depth.pth", torch_dtype=torch.float16)\n\n >>> # We use AnimateLCM for this example but one can use the original motion adapters as well (for example, https://huggingface.co/guoyww/animatediff-motion-adapter-v1-5-3)\n >>> motion_adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM")\n\n >>> vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)\n >>> pipe: AnimateDiffControlNetPipeline = AnimateDiffControlNetPipeline.from_pretrained(\n ... "SG161222/Realistic_Vision_V5.1_noVAE",\n ... motion_adapter=motion_adapter,\n ... controlnet=controlnet,\n ... vae=vae,\n ... ).to(device="cuda", dtype=torch.float16)\n >>> pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear")\n >>> pipe.load_lora_weights(\n ... "wangfuyun/AnimateLCM", weight_name="AnimateLCM_sd15_t2v_lora.safetensors", adapter_name="lcm-lora"\n ... )\n >>> pipe.set_adapters(["lcm-lora"], [0.8])\n\n >>> depth_detector = ZoeDetector.from_pretrained("lllyasviel/Annotators").to("cuda")\n >>> video = load_video(\n ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-1.gif"\n ... )\n >>> conditioning_frames = []\n\n >>> with pipe.progress_bar(total=len(video)) as progress_bar:\n ... for frame in video:\n ... conditioning_frames.append(depth_detector(frame))\n ... progress_bar.update()\n\n >>> prompt = "a panda, playing a guitar, sitting in a pink boat, in the ocean, mountains in background, realistic, high quality"\n >>> negative_prompt = "bad quality, worst quality"\n\n >>> video = pipe(\n ... prompt=prompt,\n ... negative_prompt=negative_prompt,\n ... num_frames=len(video),\n ... num_inference_steps=10,\n ... guidance_scale=2.0,\n ... conditioning_frames=conditioning_frames,\n ... generator=torch.Generator().manual_seed(42),\n ... ).frames[0]\n\n >>> export_to_gif(video, "animatediff_controlnet.gif", fps=8)\n ```\n' class AnimateDiffControlNetPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, FreeInitMixin, AnimateDiffFreeNoiseMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['feature_extractor', 'image_encoder'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: Union[UNet2DConditionModel, UNetMotionModel], motion_adapter: MotionAdapter, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, feature_extractor: Optional[CLIPImageProcessor]=None, image_encoder: Optional[CLIPVisionModelWithProjection]=None): super().__init__() if isinstance(unet, UNet2DConditionModel): unet = UNetMotionModel.from_unet2d(unet, motion_adapter) if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, motion_adapter=motion_adapter, controlnet=controlnet, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor) self.control_video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def decode_latents(self, latents, decode_chunk_size: int=16): latents = 1 / self.vae.config.scaling_factor * latents (batch_size, channels, num_frames, height, width) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) video = [] for i in range(0, latents.shape[0], decode_chunk_size): batch_latents = latents[i:i + decode_chunk_size] batch_latents = self.vae.decode(batch_latents).sample video.append(batch_latents) video = torch.cat(video) video = video[None, :].reshape((batch_size, num_frames, -1) + video.shape[2:]).permute(0, 2, 1, 3, 4) video = video.float() return video def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, num_frames, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, video=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, (str, list, dict))): raise ValueError(f'`prompt` has to be of type `str`, `list` or `dict` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning(f'You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)} prompts. The conditionings will be fixed across the prompts.') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.controlnet, torch._dynamo.eval_frame.OptimizedModule) if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): if not isinstance(video, list): raise TypeError(f'For single controlnet, `image` must be of type `list` but got {type(video)}') if len(video) != num_frames: raise ValueError(f'Excepted image to have length {num_frames} but got len(video)={len(video)!r}') elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if not isinstance(video, list) or not isinstance(video[0], list): raise TypeError(f'For multiple controlnets: `image` must be type list of lists but got type(video)={type(video)!r}') if len(video[0]) != num_frames: raise ValueError(f'Expected length of image sublist as {num_frames} but got len(video[0])={len(video[0])!r}') if any((len(img) != len(video[0]) for img in video)): raise ValueError('All conditioning frame batches for multicontrolnet must be same size') else: assert False if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if isinstance(controlnet_conditioning_scale, list): if any((isinstance(i, list) for i in controlnet_conditioning_scale)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(self.controlnet.nets): raise ValueError('For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have the same length as the number of controlnets') else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError(f'`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list.') if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError(f'`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}.') for (start, end) in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError(f'control guidance start: {start} cannot be larger or equal to control guidance end: {end}.') if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") def prepare_latents(self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None): if self.free_noise_enabled: latents = self._prepare_latents_free_noise(batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def prepare_video(self, video, width, height, batch_size, num_videos_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False): video = self.control_video_processor.preprocess_video(video, height=height, width=width).to(dtype=torch.float32) video = video.permute(0, 2, 1, 3, 4).flatten(0, 1) video_batch_size = video.shape[0] if video_batch_size == 1: repeat_by = batch_size else: repeat_by = num_videos_per_prompt video = video.repeat_interleave(repeat_by, dim=0) video = video.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): video = torch.cat([video] * 2) return video @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() def __call__(self, prompt: Union[str, List[str]]=None, num_frames: Optional[int]=16, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_videos_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[PipelineImageInput]=None, conditioning_frames: Optional[List[PipelineImageInput]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, guess_mode: bool=False, control_guidance_start: Union[float, List[float]]=0.0, control_guidance_end: Union[float, List[float]]=1.0, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], decode_chunk_size: int=16): controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_videos_per_prompt = 1 self.check_inputs(prompt=prompt, height=height, width=width, num_frames=num_frames, negative_prompt=negative_prompt, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, video=conditioning_frames, controlnet_conditioning_scale=controlnet_conditioning_scale, control_guidance_start=control_guidance_start, control_guidance_end=control_guidance_end) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, (str, dict)): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions guess_mode = guess_mode or global_pool_conditions text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None if self.free_noise_enabled: (prompt_embeds, negative_prompt_embeds) = self._encode_prompt_free_noise(prompt=prompt, num_frames=num_frames, device=device, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) else: (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_videos_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt, self.do_classifier_free_guidance) if isinstance(controlnet, ControlNetModel): conditioning_frames = self.prepare_video(video=conditioning_frames, width=width, height=height, batch_size=batch_size * num_videos_per_prompt * num_frames, num_videos_per_prompt=num_videos_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) elif isinstance(controlnet, MultiControlNetModel): cond_prepared_videos = [] for frame_ in conditioning_frames: prepared_video = self.prepare_video(video=frame_, width=width, height=height, batch_size=batch_size * num_videos_per_prompt * num_frames, num_videos_per_prompt=num_videos_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) cond_prepared_videos.append(prepared_video) conditioning_frames = cond_prepared_videos else: assert False self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_videos_per_prompt, num_channels_latents, num_frames, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None controlnet_keep = [] for i in range(len(timesteps)): keeps = [1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for (s, e) in zip(control_guidance_start, control_guidance_end)] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 for free_init_iter in range(num_free_init_iters): if self.free_init_enabled: (latents, timesteps) = self._apply_free_init(latents, free_init_iter, num_inference_steps, device, latents.dtype, generator) self._num_timesteps = len(timesteps) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=self._num_timesteps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if guess_mode and self.do_classifier_free_guidance: control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for (c, s) in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] control_model_input = torch.transpose(control_model_input, 1, 2) control_model_input = control_model_input.reshape((-1, control_model_input.shape[2], control_model_input.shape[3], control_model_input.shape[4])) (down_block_res_samples, mid_block_res_sample) = self.controlnet(control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=conditioning_frames, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample).sample if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == 'latent': video = latents else: video_tensor = self.decode_latents(latents, decode_chunk_size) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (video,) return AnimateDiffPipelineOutput(frames=video) # File: diffusers-main/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, MotionAdapter, UNet2DConditionModel, UNetMotionModel from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils import USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..free_init_utils import FreeInitMixin from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import AnimateDiffPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers.models import MotionAdapter\n >>> from diffusers import AnimateDiffSDXLPipeline, DDIMScheduler\n >>> from diffusers.utils import export_to_gif\n\n >>> adapter = MotionAdapter.from_pretrained(\n ... "a-r-r-o-w/animatediff-motion-adapter-sdxl-beta", torch_dtype=torch.float16\n ... )\n\n >>> model_id = "stabilityai/stable-diffusion-xl-base-1.0"\n >>> scheduler = DDIMScheduler.from_pretrained(\n ... model_id,\n ... subfolder="scheduler",\n ... clip_sample=False,\n ... timestep_spacing="linspace",\n ... beta_schedule="linear",\n ... steps_offset=1,\n ... )\n >>> pipe = AnimateDiffSDXLPipeline.from_pretrained(\n ... model_id,\n ... motion_adapter=adapter,\n ... scheduler=scheduler,\n ... torch_dtype=torch.float16,\n ... variant="fp16",\n ... ).to("cuda")\n\n >>> # enable memory savings\n >>> pipe.enable_vae_slicing()\n >>> pipe.enable_vae_tiling()\n\n >>> output = pipe(\n ... prompt="a panda surfing in the ocean, realistic, high quality",\n ... negative_prompt="low quality, worst quality",\n ... num_inference_steps=20,\n ... guidance_scale=8,\n ... width=1024,\n ... height=1024,\n ... num_frames=16,\n ... )\n\n >>> frames = output.frames[0]\n >>> export_to_gif(frames, "animation.gif")\n ```\n' def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class AnimateDiffSDXLPipeline(DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, IPAdapterMixin, FreeInitMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->image_encoder->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'image_encoder', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'negative_add_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: Union[UNet2DConditionModel, UNetMotionModel], motion_adapter: MotionAdapter, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler], image_encoder: CLIPVisionModelWithProjection=None, feature_extractor: CLIPImageProcessor=None, force_zeros_for_empty_prompt: bool=True): super().__init__() if isinstance(unet, UNet2DConditionModel): unet = UNetMotionModel.from_unet2d(unet, motion_adapter) self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, motion_adapter=motion_adapter, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = self.unet.config.sample_size def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_videos_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_videos_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_videos_per_prompt).view(bs_embed * num_videos_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_videos_per_prompt).view(bs_embed * num_videos_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents (batch_size, channels, num_frames, height, width) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) image = self.vae.decode(latents).sample video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) video = video.float() return video def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, prompt_2, height, width, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') def prepare_latents(self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, num_frames: int=16, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_videos_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, original_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Optional[Tuple[int, int]]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents']): height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor num_videos_per_prompt = 1 original_size = original_size or (height, width) target_size = target_size or (height, width) self.check_inputs(prompt, prompt_2, height, width, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, device=device, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_videos_per_prompt, num_channels_latents, num_frames, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids(negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_videos_per_prompt, 1) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt, self.do_classifier_free_guidance) if self.denoising_end is not None and isinstance(self.denoising_end, float) and (self.denoising_end > 0) and (self.denoising_end < 1): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - self.denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_videos_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 for free_init_iter in range(num_free_init_iters): if self.free_init_enabled: (latents, timesteps) = self._apply_free_init(latents, free_init_iter, num_inference_steps, device, latents.dtype, generator) self._num_timesteps = len(timesteps) with self.progress_bar(total=self._num_timesteps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if ip_adapter_image is not None or ip_adapter_image_embeds: added_cond_kwargs['image_embeds'] = image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) negative_add_time_ids = callback_outputs.pop('negative_add_time_ids', negative_add_time_ids) progress_bar.update() needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) if output_type == 'latent': video = latents else: video_tensor = self.decode_latents(latents) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) if needs_upcasting: self.vae.to(dtype=torch.float16) self.maybe_free_model_hooks() if not return_dict: return (video,) return AnimateDiffPipelineOutput(frames=video) # File: diffusers-main/src/diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel from ...models.controlnet_sparsectrl import SparseControlNetModel from ...models.lora import adjust_lora_scale_text_encoder from ...models.unets.unet_motion_model import MotionAdapter from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, randn_tensor from ...video_processor import VideoProcessor from ..free_init_utils import FreeInitMixin from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import AnimateDiffPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```python\n >>> import torch\n >>> from diffusers import AnimateDiffSparseControlNetPipeline\n >>> from diffusers.models import AutoencoderKL, MotionAdapter, SparseControlNetModel\n >>> from diffusers.schedulers import DPMSolverMultistepScheduler\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE"\n >>> motion_adapter_id = "guoyww/animatediff-motion-adapter-v1-5-3"\n >>> controlnet_id = "guoyww/animatediff-sparsectrl-scribble"\n >>> lora_adapter_id = "guoyww/animatediff-motion-lora-v1-5-3"\n >>> vae_id = "stabilityai/sd-vae-ft-mse"\n >>> device = "cuda"\n\n >>> motion_adapter = MotionAdapter.from_pretrained(motion_adapter_id, torch_dtype=torch.float16).to(device)\n >>> controlnet = SparseControlNetModel.from_pretrained(controlnet_id, torch_dtype=torch.float16).to(device)\n >>> vae = AutoencoderKL.from_pretrained(vae_id, torch_dtype=torch.float16).to(device)\n >>> scheduler = DPMSolverMultistepScheduler.from_pretrained(\n ... model_id,\n ... subfolder="scheduler",\n ... beta_schedule="linear",\n ... algorithm_type="dpmsolver++",\n ... use_karras_sigmas=True,\n ... )\n >>> pipe = AnimateDiffSparseControlNetPipeline.from_pretrained(\n ... model_id,\n ... motion_adapter=motion_adapter,\n ... controlnet=controlnet,\n ... vae=vae,\n ... scheduler=scheduler,\n ... torch_dtype=torch.float16,\n ... ).to(device)\n >>> pipe.load_lora_weights(lora_adapter_id, adapter_name="motion_lora")\n >>> pipe.fuse_lora(lora_scale=1.0)\n\n >>> prompt = "an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality"\n >>> negative_prompt = "low quality, worst quality, letterboxed"\n\n >>> image_files = [\n ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-1.png",\n ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-2.png",\n ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-3.png",\n ... ]\n >>> condition_frame_indices = [0, 8, 15]\n >>> conditioning_frames = [load_image(img_file) for img_file in image_files]\n\n >>> video = pipe(\n ... prompt=prompt,\n ... negative_prompt=negative_prompt,\n ... num_inference_steps=25,\n ... conditioning_frames=conditioning_frames,\n ... controlnet_conditioning_scale=1.0,\n ... controlnet_frame_indices=condition_frame_indices,\n ... generator=torch.Generator().manual_seed(1337),\n ... ).frames[0]\n >>> export_to_gif(video, "output.gif")\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') class AnimateDiffSparseControlNetPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, FreeInitMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['feature_extractor', 'image_encoder', 'motion_adapter'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: Union[UNet2DConditionModel, UNetMotionModel], motion_adapter: MotionAdapter, controlnet: SparseControlNetModel, scheduler: KarrasDiffusionSchedulers, feature_extractor: CLIPImageProcessor=None, image_encoder: CLIPVisionModelWithProjection=None): super().__init__() if isinstance(unet, UNet2DConditionModel): unet = UNetMotionModel.from_unet2d(unet, motion_adapter) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, motion_adapter=motion_adapter, controlnet=controlnet, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor) self.control_image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents (batch_size, channels, num_frames, height, width) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) image = self.vae.decode(latents).sample video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) video = video.float() return video def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, image=None, controlnet_conditioning_scale: float=1.0): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.controlnet, torch._dynamo.eval_frame.OptimizedModule) if isinstance(self.controlnet, SparseControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, SparseControlNetModel)): if isinstance(image, list): for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: self.check_image(image, prompt, prompt_embeds) else: assert False if isinstance(self.controlnet, SparseControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, SparseControlNetModel)): if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') else: assert False def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if not image_is_pil and (not image_is_tensor) and (not image_is_np) and (not image_is_pil_list) and (not image_is_tensor_list) and (not image_is_np_list): raise TypeError(f'image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}') if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError(f'If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}') def prepare_latents(self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def prepare_image(self, image, width, height, device, dtype): image = self.control_image_processor.preprocess(image, height=height, width=width) controlnet_images = image.unsqueeze(0).to(device, dtype) (batch_size, num_frames, channels, height, width) = controlnet_images.shape assert controlnet_images.min() >= 0 and controlnet_images.max() <= 1 if self.controlnet.use_simplified_condition_embedding: controlnet_images = controlnet_images.reshape(batch_size * num_frames, channels, height, width) controlnet_images = 2 * controlnet_images - 1 conditioning_frames = retrieve_latents(self.vae.encode(controlnet_images)) * self.vae.config.scaling_factor conditioning_frames = conditioning_frames.reshape(batch_size, num_frames, 4, height // self.vae_scale_factor, width // self.vae_scale_factor) else: conditioning_frames = controlnet_images conditioning_frames = conditioning_frames.permute(0, 2, 1, 3, 4) return conditioning_frames def prepare_sparse_control_conditioning(self, conditioning_frames: torch.Tensor, num_frames: int, controlnet_frame_indices: int, device: torch.device, dtype: torch.dtype) -> Tuple[torch.Tensor, torch.Tensor]: assert conditioning_frames.shape[2] >= len(controlnet_frame_indices) (batch_size, channels, _, height, width) = conditioning_frames.shape controlnet_cond = torch.zeros((batch_size, channels, num_frames, height, width), dtype=dtype, device=device) controlnet_cond_mask = torch.zeros((batch_size, 1, num_frames, height, width), dtype=dtype, device=device) controlnet_cond[:, :, controlnet_frame_indices] = conditioning_frames[:, :, :len(controlnet_frame_indices)] controlnet_cond_mask[:, :, controlnet_frame_indices] = 1 return (controlnet_cond, controlnet_cond_mask) @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_frames: int=16, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_videos_per_prompt: int=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, conditioning_frames: Optional[List[PipelineImageInput]]=None, output_type: str='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, controlnet_frame_indices: List[int]=[0], guess_mode: bool=False, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents']): controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_videos_per_prompt = 1 self.check_inputs(prompt=prompt, height=height, width=width, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ip_adapter_image=ip_adapter_image, ip_adapter_image_embeds=ip_adapter_image_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, image=conditioning_frames, controlnet_conditioning_scale=controlnet_conditioning_scale) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device global_pool_conditions = controlnet.config.global_pool_conditions if isinstance(controlnet, SparseControlNetModel) else controlnet.nets[0].config.global_pool_conditions guess_mode = guess_mode or global_pool_conditions text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_videos_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt, self.do_classifier_free_guidance) conditioning_frames = self.prepare_image(conditioning_frames, width, height, device, controlnet.dtype) (controlnet_cond, controlnet_cond_mask) = self.prepare_sparse_control_conditioning(conditioning_frames, num_frames, controlnet_frame_indices, device, controlnet.dtype) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_videos_per_prompt, num_channels_latents, num_frames, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 for free_init_iter in range(num_free_init_iters): if self.free_init_enabled: (latents, timesteps) = self._apply_free_init(latents, free_init_iter, num_inference_steps, device, latents.dtype, generator) self._num_timesteps = len(timesteps) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=self._num_timesteps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if guess_mode and self.do_classifier_free_guidance: control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds (down_block_res_samples, mid_block_res_sample) = self.controlnet(control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=controlnet_cond, conditioning_mask=controlnet_cond_mask, conditioning_scale=controlnet_conditioning_scale, guess_mode=guess_mode, return_dict=False) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample).sample if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == 'latent': video = latents else: video_tensor = self.decode_latents(latents) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (video,) return AnimateDiffPipelineOutput(frames=video) # File: diffusers-main/src/diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel from ...models.lora import adjust_lora_scale_text_encoder from ...models.unets.unet_motion_model import MotionAdapter from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..free_init_utils import FreeInitMixin from ..free_noise_utils import AnimateDiffFreeNoiseMixin from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import AnimateDiffPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import imageio\n >>> import requests\n >>> import torch\n >>> from diffusers import AnimateDiffVideoToVideoPipeline, DDIMScheduler, MotionAdapter\n >>> from diffusers.utils import export_to_gif\n >>> from io import BytesIO\n >>> from PIL import Image\n\n >>> adapter = MotionAdapter.from_pretrained(\n ... "guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16\n ... )\n >>> pipe = AnimateDiffVideoToVideoPipeline.from_pretrained(\n ... "SG161222/Realistic_Vision_V5.1_noVAE", motion_adapter=adapter\n ... ).to("cuda")\n >>> pipe.scheduler = DDIMScheduler(\n ... beta_schedule="linear", steps_offset=1, clip_sample=False, timespace_spacing="linspace"\n ... )\n\n\n >>> def load_video(file_path: str):\n ... images = []\n\n ... if file_path.startswith(("http://", "https://")):\n ... # If the file_path is a URL\n ... response = requests.get(file_path)\n ... response.raise_for_status()\n ... content = BytesIO(response.content)\n ... vid = imageio.get_reader(content)\n ... else:\n ... # Assuming it\'s a local file path\n ... vid = imageio.get_reader(file_path)\n\n ... for frame in vid:\n ... pil_image = Image.fromarray(frame)\n ... images.append(pil_image)\n\n ... return images\n\n\n >>> video = load_video(\n ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-1.gif"\n ... )\n >>> output = pipe(\n ... video=video, prompt="panda playing a guitar, on a boat, in the ocean, high quality", strength=0.5\n ... )\n >>> frames = output.frames[0]\n >>> export_to_gif(frames, "animation.gif")\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class AnimateDiffVideoToVideoPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, FreeInitMixin, AnimateDiffFreeNoiseMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['feature_extractor', 'image_encoder', 'motion_adapter'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, motion_adapter: MotionAdapter, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler], feature_extractor: CLIPImageProcessor=None, image_encoder: CLIPVisionModelWithProjection=None): super().__init__() if isinstance(unet, UNet2DConditionModel): unet = UNetMotionModel.from_unet2d(unet, motion_adapter) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, motion_adapter=motion_adapter, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor) def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, (str, dict)): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def encode_video(self, video, generator, decode_chunk_size: int=16) -> torch.Tensor: latents = [] for i in range(0, len(video), decode_chunk_size): batch_video = video[i:i + decode_chunk_size] batch_video = retrieve_latents(self.vae.encode(batch_video), generator=generator) latents.append(batch_video) return torch.cat(latents) def decode_latents(self, latents, decode_chunk_size: int=16): latents = 1 / self.vae.config.scaling_factor * latents (batch_size, channels, num_frames, height, width) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) video = [] for i in range(0, latents.shape[0], decode_chunk_size): batch_latents = latents[i:i + decode_chunk_size] batch_latents = self.vae.decode(batch_latents).sample video.append(batch_latents) video = torch.cat(video) video = video[None, :].reshape((batch_size, num_frames, -1) + video.shape[2:]).permute(0, 2, 1, 3, 4) video = video.float() return video def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, strength, height, width, video=None, latents=None, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, (str, list, dict))): raise ValueError(f'`prompt` has to be of type `str`, `list` or `dict` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if video is not None and latents is not None: raise ValueError('Only one of `video` or `latents` should be provided') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def get_timesteps(self, num_inference_steps, timesteps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = timesteps[t_start * self.scheduler.order:] return (timesteps, num_inference_steps - t_start) def prepare_latents(self, video: Optional[torch.Tensor]=None, height: int=64, width: int=64, num_channels_latents: int=4, batch_size: int=1, timestep: Optional[int]=None, dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, decode_chunk_size: int=16, add_noise: bool=False) -> torch.Tensor: num_frames = video.shape[1] if latents is None else latents.shape[2] shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: if self.vae.config.force_upcast: video = video.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list): if len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') init_latents = [self.encode_video(video[i], generator[i], decode_chunk_size).unsqueeze(0) for i in range(batch_size)] else: init_latents = [self.encode_video(vid, generator, decode_chunk_size).unsqueeze(0) for vid in video] init_latents = torch.cat(init_latents, dim=0) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: error_message = f'You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial images (`image`). Please make sure to update your script to pass as many initial images as text prompts' raise ValueError(error_message) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) latents = self.scheduler.add_noise(init_latents, noise, timestep).permute(0, 2, 1, 3, 4) else: if shape != latents.shape: raise ValueError(f'`latents` expected to have shape={shape!r}, but found latents.shape={latents.shape!r}') latents = latents.to(device, dtype=dtype) if add_noise: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self.scheduler.add_noise(latents, noise, timestep) return latents @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() def __call__(self, video: List[List[PipelineImageInput]]=None, prompt: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, enforce_inference_steps: bool=False, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, guidance_scale: float=7.5, strength: float=0.8, negative_prompt: Optional[Union[str, List[str]]]=None, num_videos_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], decode_chunk_size: int=16): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_videos_per_prompt = 1 self.check_inputs(prompt=prompt, strength=strength, height=height, width=width, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, video=video, latents=latents, ip_adapter_image=ip_adapter_image, ip_adapter_image_embeds=ip_adapter_image_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, (str, dict)): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device dtype = self.dtype if not enforce_inference_steps: (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, timesteps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) else: denoising_inference_steps = int(num_inference_steps / strength) (timesteps, denoising_inference_steps) = retrieve_timesteps(self.scheduler, denoising_inference_steps, device, timesteps, sigmas) timesteps = timesteps[-num_inference_steps:] latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) if latents is None: video = self.video_processor.preprocess_video(video, height=height, width=width) video = video.permute(0, 2, 1, 3, 4) video = video.to(device=device, dtype=dtype) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(video=video, height=height, width=width, num_channels_latents=num_channels_latents, batch_size=batch_size * num_videos_per_prompt, timestep=latent_timestep, dtype=dtype, device=device, generator=generator, latents=latents, decode_chunk_size=decode_chunk_size, add_noise=enforce_inference_steps) text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None num_frames = latents.shape[2] if self.free_noise_enabled: (prompt_embeds, negative_prompt_embeds) = self._encode_prompt_free_noise(prompt=prompt, num_frames=num_frames, device=device, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) else: (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_videos_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt, self.do_classifier_free_guidance) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 for free_init_iter in range(num_free_init_iters): if self.free_init_enabled: (latents, timesteps) = self._apply_free_init(latents, free_init_iter, num_inference_steps, device, latents.dtype, generator) num_inference_steps = len(timesteps) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, timesteps, strength, device) self._num_timesteps = len(timesteps) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=self._num_timesteps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs).sample if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == 'latent': video = latents else: video_tensor = self.decode_latents(latents, decode_chunk_size) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (video,) return AnimateDiffPipelineOutput(frames=video) # File: diffusers-main/src/diffusers/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel, UNetMotionModel from ...models.lora import adjust_lora_scale_text_encoder from ...models.unets.unet_motion_model import MotionAdapter from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, randn_tensor from ...video_processor import VideoProcessor from ..controlnet.multicontrolnet import MultiControlNetModel from ..free_init_utils import FreeInitMixin from ..free_noise_utils import AnimateDiffFreeNoiseMixin from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import AnimateDiffPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from PIL import Image\n >>> from tqdm.auto import tqdm\n\n >>> from diffusers import AnimateDiffVideoToVideoControlNetPipeline\n >>> from diffusers.utils import export_to_gif, load_video\n >>> from diffusers import AutoencoderKL, ControlNetModel, MotionAdapter, LCMScheduler\n\n >>> controlnet = ControlNetModel.from_pretrained(\n ... "lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16\n ... )\n >>> motion_adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM")\n >>> vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)\n\n >>> pipe = AnimateDiffVideoToVideoControlNetPipeline.from_pretrained(\n ... "SG161222/Realistic_Vision_V5.1_noVAE",\n ... motion_adapter=motion_adapter,\n ... controlnet=controlnet,\n ... vae=vae,\n ... ).to(device="cuda", dtype=torch.float16)\n\n >>> pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear")\n >>> pipe.load_lora_weights(\n ... "wangfuyun/AnimateLCM", weight_name="AnimateLCM_sd15_t2v_lora.safetensors", adapter_name="lcm-lora"\n ... )\n >>> pipe.set_adapters(["lcm-lora"], [0.8])\n\n >>> video = load_video(\n ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/dance.gif"\n ... )\n >>> video = [frame.convert("RGB") for frame in video]\n\n >>> from controlnet_aux.processor import OpenposeDetector\n\n >>> open_pose = OpenposeDetector.from_pretrained("lllyasviel/Annotators").to("cuda")\n >>> for frame in tqdm(video):\n ... conditioning_frames.append(open_pose(frame))\n\n >>> prompt = "astronaut in space, dancing"\n >>> negative_prompt = "bad quality, worst quality, jpeg artifacts, ugly"\n\n >>> strength = 0.8\n >>> with torch.inference_mode():\n ... video = pipe(\n ... video=video,\n ... prompt=prompt,\n ... negative_prompt=negative_prompt,\n ... num_inference_steps=10,\n ... guidance_scale=2.0,\n ... controlnet_conditioning_scale=0.75,\n ... conditioning_frames=conditioning_frames,\n ... strength=strength,\n ... generator=torch.Generator().manual_seed(42),\n ... ).frames[0]\n\n >>> video = [frame.resize(conditioning_frames[0].size) for frame in video]\n >>> export_to_gif(video, f"animatediff_vid2vid_controlnet.gif", fps=8)\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class AnimateDiffVideoToVideoControlNetPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, FreeInitMixin, AnimateDiffFreeNoiseMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['feature_extractor', 'image_encoder', 'motion_adapter'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, motion_adapter: MotionAdapter, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler], feature_extractor: CLIPImageProcessor=None, image_encoder: CLIPVisionModelWithProjection=None): super().__init__() if isinstance(unet, UNet2DConditionModel): unet = UNetMotionModel.from_unet2d(unet, motion_adapter) if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, motion_adapter=motion_adapter, controlnet=controlnet, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor) self.control_video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, (str, dict)): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def encode_video(self, video, generator, decode_chunk_size: int=16) -> torch.Tensor: latents = [] for i in range(0, len(video), decode_chunk_size): batch_video = video[i:i + decode_chunk_size] batch_video = retrieve_latents(self.vae.encode(batch_video), generator=generator) latents.append(batch_video) return torch.cat(latents) def decode_latents(self, latents, decode_chunk_size: int=16): latents = 1 / self.vae.config.scaling_factor * latents (batch_size, channels, num_frames, height, width) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) video = [] for i in range(0, latents.shape[0], decode_chunk_size): batch_latents = latents[i:i + decode_chunk_size] batch_latents = self.vae.decode(batch_latents).sample video.append(batch_latents) video = torch.cat(video) video = video[None, :].reshape((batch_size, num_frames, -1) + video.shape[2:]).permute(0, 2, 1, 3, 4) video = video.float() return video def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, strength, height, width, video=None, conditioning_frames=None, latents=None, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, (str, list, dict))): raise ValueError(f'`prompt` has to be of type `str`, `list` or `dict` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if video is not None and latents is not None: raise ValueError('Only one of `video` or `latents` should be provided') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning(f'You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)} prompts. The conditionings will be fixed across the prompts.') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.controlnet, torch._dynamo.eval_frame.OptimizedModule) num_frames = len(video) if latents is None else latents.shape[2] if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): if not isinstance(conditioning_frames, list): raise TypeError(f'For single controlnet, `image` must be of type `list` but got {type(conditioning_frames)}') if len(conditioning_frames) != num_frames: raise ValueError(f'Excepted image to have length {num_frames} but got len(conditioning_frames)={len(conditioning_frames)!r}') elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if not isinstance(conditioning_frames, list) or not isinstance(conditioning_frames[0], list): raise TypeError(f'For multiple controlnets: `image` must be type list of lists but got type(conditioning_frames)={type(conditioning_frames)!r}') if len(conditioning_frames[0]) != num_frames: raise ValueError(f'Expected length of image sublist as {num_frames} but got len(conditioning_frames)={len(conditioning_frames)!r}') if any((len(img) != len(conditioning_frames[0]) for img in conditioning_frames)): raise ValueError('All conditioning frame batches for multicontrolnet must be same size') else: assert False if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if isinstance(controlnet_conditioning_scale, list): if any((isinstance(i, list) for i in controlnet_conditioning_scale)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(self.controlnet.nets): raise ValueError('For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have the same length as the number of controlnets') else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError(f'`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list.') if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError(f'`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}.') for (start, end) in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError(f'control guidance start: {start} cannot be larger or equal to control guidance end: {end}.') if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") def get_timesteps(self, num_inference_steps, timesteps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = timesteps[t_start * self.scheduler.order:] return (timesteps, num_inference_steps - t_start) def prepare_latents(self, video: Optional[torch.Tensor]=None, height: int=64, width: int=64, num_channels_latents: int=4, batch_size: int=1, timestep: Optional[int]=None, dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, decode_chunk_size: int=16, add_noise: bool=False) -> torch.Tensor: num_frames = video.shape[1] if latents is None else latents.shape[2] shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: if self.vae.config.force_upcast: video = video.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list): if len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') init_latents = [self.encode_video(video[i], generator[i], decode_chunk_size).unsqueeze(0) for i in range(batch_size)] else: init_latents = [self.encode_video(vid, generator, decode_chunk_size).unsqueeze(0) for vid in video] init_latents = torch.cat(init_latents, dim=0) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: error_message = f'You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial images (`image`). Please make sure to update your script to pass as many initial images as text prompts' raise ValueError(error_message) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) latents = self.scheduler.add_noise(init_latents, noise, timestep).permute(0, 2, 1, 3, 4) else: if shape != latents.shape: raise ValueError(f'`latents` expected to have shape={shape!r}, but found latents.shape={latents.shape!r}') latents = latents.to(device, dtype=dtype) if add_noise: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self.scheduler.add_noise(latents, noise, timestep) return latents def prepare_conditioning_frames(self, video, width, height, batch_size, num_videos_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False): video = self.control_video_processor.preprocess_video(video, height=height, width=width).to(dtype=torch.float32) video = video.permute(0, 2, 1, 3, 4).flatten(0, 1) video_batch_size = video.shape[0] if video_batch_size == 1: repeat_by = batch_size else: repeat_by = num_videos_per_prompt video = video.repeat_interleave(repeat_by, dim=0) video = video.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): video = torch.cat([video] * 2) return video @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() def __call__(self, video: List[List[PipelineImageInput]]=None, prompt: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, enforce_inference_steps: bool=False, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, guidance_scale: float=7.5, strength: float=0.8, negative_prompt: Optional[Union[str, List[str]]]=None, num_videos_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, conditioning_frames: Optional[List[PipelineImageInput]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, guess_mode: bool=False, control_guidance_start: Union[float, List[float]]=0.0, control_guidance_end: Union[float, List[float]]=1.0, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], decode_chunk_size: int=16): controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_videos_per_prompt = 1 self.check_inputs(prompt=prompt, strength=strength, height=height, width=width, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, video=video, conditioning_frames=conditioning_frames, latents=latents, ip_adapter_image=ip_adapter_image, ip_adapter_image_embeds=ip_adapter_image_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, controlnet_conditioning_scale=controlnet_conditioning_scale, control_guidance_start=control_guidance_start, control_guidance_end=control_guidance_end) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, (str, dict)): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device dtype = self.dtype if not enforce_inference_steps: (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, timesteps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) else: denoising_inference_steps = int(num_inference_steps / strength) (timesteps, denoising_inference_steps) = retrieve_timesteps(self.scheduler, denoising_inference_steps, device, timesteps, sigmas) timesteps = timesteps[-num_inference_steps:] latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) if latents is None: video = self.video_processor.preprocess_video(video, height=height, width=width) video = video.permute(0, 2, 1, 3, 4) video = video.to(device=device, dtype=dtype) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(video=video, height=height, width=width, num_channels_latents=num_channels_latents, batch_size=batch_size * num_videos_per_prompt, timestep=latent_timestep, dtype=dtype, device=device, generator=generator, latents=latents, decode_chunk_size=decode_chunk_size, add_noise=enforce_inference_steps) text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None num_frames = latents.shape[2] if self.free_noise_enabled: (prompt_embeds, negative_prompt_embeds) = self._encode_prompt_free_noise(prompt=prompt, num_frames=num_frames, device=device, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) else: (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_videos_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt, self.do_classifier_free_guidance) if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions guess_mode = guess_mode or global_pool_conditions controlnet_keep = [] for i in range(len(timesteps)): keeps = [1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for (s, e) in zip(control_guidance_start, control_guidance_end)] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) if isinstance(controlnet, ControlNetModel): conditioning_frames = self.prepare_conditioning_frames(video=conditioning_frames, width=width, height=height, batch_size=batch_size * num_videos_per_prompt * num_frames, num_videos_per_prompt=num_videos_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) elif isinstance(controlnet, MultiControlNetModel): cond_prepared_videos = [] for frame_ in conditioning_frames: prepared_video = self.prepare_conditioning_frames(video=frame_, width=width, height=height, batch_size=batch_size * num_videos_per_prompt * num_frames, num_videos_per_prompt=num_videos_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) cond_prepared_videos.append(prepared_video) conditioning_frames = cond_prepared_videos else: assert False extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 for free_init_iter in range(num_free_init_iters): if self.free_init_enabled: (latents, timesteps) = self._apply_free_init(latents, free_init_iter, num_inference_steps, device, latents.dtype, generator) num_inference_steps = len(timesteps) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, timesteps, strength, device) self._num_timesteps = len(timesteps) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=self._num_timesteps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if guess_mode and self.do_classifier_free_guidance: control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for (c, s) in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] control_model_input = torch.transpose(control_model_input, 1, 2) control_model_input = control_model_input.reshape((-1, control_model_input.shape[2], control_model_input.shape[3], control_model_input.shape[4])) (down_block_res_samples, mid_block_res_sample) = self.controlnet(control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=conditioning_frames, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample).sample if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == 'latent': video = latents else: video_tensor = self.decode_latents(latents, decode_chunk_size) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (video,) return AnimateDiffPipelineOutput(frames=video) # File: diffusers-main/src/diffusers/pipelines/animatediff/pipeline_output.py from dataclasses import dataclass from typing import List, Union import numpy as np import PIL.Image import torch from ...utils import BaseOutput @dataclass class AnimateDiffPipelineOutput(BaseOutput): frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]] # File: diffusers-main/src/diffusers/pipelines/audioldm/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, is_transformers_version _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.27.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import AudioLDMPipeline _dummy_objects.update({'AudioLDMPipeline': AudioLDMPipeline}) else: _import_structure['pipeline_audioldm'] = ['AudioLDMPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.27.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import AudioLDMPipeline else: from .pipeline_audioldm import AudioLDMPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/audioldm/pipeline_audioldm.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch import torch.nn.functional as F from transformers import ClapTextModelWithProjection, RobertaTokenizer, RobertaTokenizerFast, SpeechT5HifiGan from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline, StableDiffusionMixin logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import AudioLDMPipeline\n >>> import torch\n >>> import scipy\n\n >>> repo_id = "cvssp/audioldm-s-full-v2"\n >>> pipe = AudioLDMPipeline.from_pretrained(repo_id, torch_dtype=torch.float16)\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "Techno music with a strong, upbeat tempo and high melodic riffs"\n >>> audio = pipe(prompt, num_inference_steps=10, audio_length_in_s=5.0).audios[0]\n\n >>> # save the audio sample as a .wav file\n >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio)\n ```\n' class AudioLDMPipeline(DiffusionPipeline, StableDiffusionMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' def __init__(self, vae: AutoencoderKL, text_encoder: ClapTextModelWithProjection, tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast], unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, vocoder: SpeechT5HifiGan): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, vocoder=vocoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def _encode_prompt(self, prompt, device, num_waveforms_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLAP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask.to(device)) prompt_embeds = prompt_embeds.text_embeds prompt_embeds = F.normalize(prompt_embeds, dim=-1) prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) (bs_embed, seq_len) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt) prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') uncond_input_ids = uncond_input.input_ids.to(device) attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder(uncond_input_ids, attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds.text_embeds negative_prompt_embeds = F.normalize(negative_prompt_embeds, dim=-1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents mel_spectrogram = self.vae.decode(latents).sample return mel_spectrogram def mel_spectrogram_to_waveform(self, mel_spectrogram): if mel_spectrogram.dim() == 4: mel_spectrogram = mel_spectrogram.squeeze(1) waveform = self.vocoder(mel_spectrogram) waveform = waveform.cpu().float() return waveform def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, audio_length_in_s, vocoder_upsample_factor, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor if audio_length_in_s < min_audio_length_in_s: raise ValueError(f'`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but is {audio_length_in_s}.') if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0: raise ValueError(f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of {self.vae_scale_factor}.") if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(self.vocoder.config.model_in_dim) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, audio_length_in_s: Optional[float]=None, num_inference_steps: int=10, guidance_scale: float=2.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_waveforms_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: Optional[int]=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, output_type: Optional[str]='np'): vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate if audio_length_in_s is None: audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor height = int(audio_length_in_s / vocoder_upsample_factor) original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate) if height % self.vae_scale_factor != 0: height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor logger.info(f'Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} so that it can be handled by the model. It will be cut to {audio_length_in_s} after the denoising process.') self.check_inputs(prompt, audio_length_in_s, vocoder_upsample_factor, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, device, num_waveforms_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_waveforms_per_prompt, num_channels_latents, height, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=None, class_labels=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) mel_spectrogram = self.decode_latents(latents) audio = self.mel_spectrogram_to_waveform(mel_spectrogram) audio = audio[:, :original_waveform_length] if output_type == 'np': audio = audio.numpy() if not return_dict: return (audio,) return AudioPipelineOutput(audios=audio) # File: diffusers-main/src/diffusers/pipelines/audioldm2/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, is_transformers_version _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.27.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['modeling_audioldm2'] = ['AudioLDM2ProjectionModel', 'AudioLDM2UNet2DConditionModel'] _import_structure['pipeline_audioldm2'] = ['AudioLDM2Pipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.27.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .modeling_audioldm2 import AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel from .pipeline_audioldm2 import AudioLDM2Pipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/audioldm2/modeling_audioldm2.py from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import UNet2DConditionLoadersMixin from ...models.activations import get_activation from ...models.attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor from ...models.embeddings import TimestepEmbedding, Timesteps from ...models.modeling_utils import ModelMixin from ...models.resnet import Downsample2D, ResnetBlock2D, Upsample2D from ...models.transformers.transformer_2d import Transformer2DModel from ...models.unets.unet_2d_blocks import DownBlock2D, UpBlock2D from ...models.unets.unet_2d_condition import UNet2DConditionOutput from ...utils import BaseOutput, is_torch_version, logging logger = logging.get_logger(__name__) def add_special_tokens(hidden_states, attention_mask, sos_token, eos_token): batch_size = hidden_states.shape[0] if attention_mask is not None: new_attn_mask_step = attention_mask.new_ones((batch_size, 1)) attention_mask = torch.concat([new_attn_mask_step, attention_mask, new_attn_mask_step], dim=-1) sos_token = sos_token.expand(batch_size, 1, -1) eos_token = eos_token.expand(batch_size, 1, -1) hidden_states = torch.concat([sos_token, hidden_states, eos_token], dim=1) return (hidden_states, attention_mask) @dataclass class AudioLDM2ProjectionModelOutput(BaseOutput): hidden_states: torch.Tensor attention_mask: Optional[torch.LongTensor] = None class AudioLDM2ProjectionModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, text_encoder_dim, text_encoder_1_dim, langauge_model_dim, use_learned_position_embedding=None, max_seq_length=None): super().__init__() self.projection = nn.Linear(text_encoder_dim, langauge_model_dim) self.projection_1 = nn.Linear(text_encoder_1_dim, langauge_model_dim) self.sos_embed = nn.Parameter(torch.ones(langauge_model_dim)) self.eos_embed = nn.Parameter(torch.ones(langauge_model_dim)) self.sos_embed_1 = nn.Parameter(torch.ones(langauge_model_dim)) self.eos_embed_1 = nn.Parameter(torch.ones(langauge_model_dim)) self.use_learned_position_embedding = use_learned_position_embedding if self.use_learned_position_embedding is not None: self.learnable_positional_embedding = torch.nn.Parameter(torch.zeros((1, text_encoder_1_dim, max_seq_length))) def forward(self, hidden_states: Optional[torch.Tensor]=None, hidden_states_1: Optional[torch.Tensor]=None, attention_mask: Optional[torch.LongTensor]=None, attention_mask_1: Optional[torch.LongTensor]=None): hidden_states = self.projection(hidden_states) (hidden_states, attention_mask) = add_special_tokens(hidden_states, attention_mask, sos_token=self.sos_embed, eos_token=self.eos_embed) if self.use_learned_position_embedding is not None: hidden_states_1 = (hidden_states_1.permute(0, 2, 1) + self.learnable_positional_embedding).permute(0, 2, 1) hidden_states_1 = self.projection_1(hidden_states_1) (hidden_states_1, attention_mask_1) = add_special_tokens(hidden_states_1, attention_mask_1, sos_token=self.sos_embed_1, eos_token=self.eos_embed_1) hidden_states = torch.cat([hidden_states, hidden_states_1], dim=1) if attention_mask is None and attention_mask_1 is not None: attention_mask = attention_mask_1.new_ones(hidden_states[:2]) elif attention_mask is not None and attention_mask_1 is None: attention_mask_1 = attention_mask.new_ones(hidden_states_1[:2]) if attention_mask is not None and attention_mask_1 is not None: attention_mask = torch.cat([attention_mask, attention_mask_1], dim=-1) else: attention_mask = None return AudioLDM2ProjectionModelOutput(hidden_states=hidden_states, attention_mask=attention_mask) class AudioLDM2UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): _supports_gradient_checkpointing = True @register_to_config def __init__(self, sample_size: Optional[int]=None, in_channels: int=4, out_channels: int=4, flip_sin_to_cos: bool=True, freq_shift: int=0, down_block_types: Tuple[str]=('CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D', 'DownBlock2D'), mid_block_type: Optional[str]='UNetMidBlock2DCrossAttn', up_block_types: Tuple[str]=('UpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D'), only_cross_attention: Union[bool, Tuple[bool]]=False, block_out_channels: Tuple[int]=(320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]]=2, downsample_padding: int=1, mid_block_scale_factor: float=1, act_fn: str='silu', norm_num_groups: Optional[int]=32, norm_eps: float=1e-05, cross_attention_dim: Union[int, Tuple[int]]=1280, transformer_layers_per_block: Union[int, Tuple[int]]=1, attention_head_dim: Union[int, Tuple[int]]=8, num_attention_heads: Optional[Union[int, Tuple[int]]]=None, use_linear_projection: bool=False, class_embed_type: Optional[str]=None, num_class_embeds: Optional[int]=None, upcast_attention: bool=False, resnet_time_scale_shift: str='default', time_embedding_type: str='positional', time_embedding_dim: Optional[int]=None, time_embedding_act_fn: Optional[str]=None, timestep_post_act: Optional[str]=None, time_cond_proj_dim: Optional[int]=None, conv_in_kernel: int=3, conv_out_kernel: int=3, projection_class_embeddings_input_dim: Optional[int]=None, class_embeddings_concat: bool=False): super().__init__() self.sample_size = sample_size if num_attention_heads is not None: raise ValueError('At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.') num_attention_heads = num_attention_heads or attention_head_dim if len(down_block_types) != len(up_block_types): raise ValueError(f'Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.') if len(block_out_channels) != len(down_block_types): raise ValueError(f'Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.') if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError(f'Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.') if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError(f'Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}.') if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError(f'Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.') if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError(f'Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}.') if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError(f'Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}.') conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding) if time_embedding_type == 'positional': time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError(f'{time_embedding_type} does not exist. Please make sure to use `positional`.') self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim) if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == 'timestep': self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == 'identity': self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == 'projection': if projection_class_embeddings_input_dim is None: raise ValueError("`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set") self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == 'simple_projection': if projection_class_embeddings_input_dim is None: raise ValueError("`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set") self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): only_cross_attention = [only_cross_attention] * len(down_block_types) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim output_channel = block_out_channels[0] for (i, down_block_type) in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block(down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift) self.down_blocks.append(down_block) if mid_block_type == 'UNetMidBlock2DCrossAttn': self.mid_block = UNetMidBlock2DCrossAttn(transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention) else: raise ValueError(f'unknown mid_block_type : {mid_block_type}. Should be `UNetMidBlock2DCrossAttn` for AudioLDM2.') self.num_upsamplers = 0 reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for (i, up_block_type) in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block(up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift) self.up_blocks.append(up_block) prev_output_channel = output_channel if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps) self.conv_act = get_activation(act_fn) else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) def set_attention_slice(self, slice_size): sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, 'set_attention_slice'): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == 'auto': slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == 'max': slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError(f'You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.') for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f'size {size} has to be smaller or equal to {dim}.') def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, 'set_attention_slice'): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value def forward(self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, class_labels: Optional[torch.Tensor]=None, timestep_cond: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None, return_dict: bool=True, encoder_hidden_states_1: Optional[torch.Tensor]=None, encoder_attention_mask_1: Optional[torch.Tensor]=None) -> Union[UNet2DConditionOutput, Tuple]: default_overall_up_factor = 2 ** self.num_upsamplers forward_upsample_size = False upsample_size = None if any((s % default_overall_up_factor != 0 for s in sample.shape[-2:])): logger.info('Forward upsample size to force interpolation output size.') forward_upsample_size = True if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) if encoder_attention_mask is not None: encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) if encoder_attention_mask_1 is not None: encoder_attention_mask_1 = (1 - encoder_attention_mask_1.to(sample.dtype)) * -10000.0 encoder_attention_mask_1 = encoder_attention_mask_1.unsqueeze(1) timesteps = timestep if not torch.is_tensor(timesteps): is_mps = sample.device.type == 'mps' if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError('class_labels should be provided when num_class_embeds > 0') if self.config.class_embed_type == 'timestep': class_labels = self.time_proj(class_labels) class_labels = class_labels.to(dtype=sample.dtype) class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) if self.config.class_embeddings_concat: emb = torch.cat([emb, class_emb], dim=-1) else: emb = emb + class_emb emb = emb + aug_emb if aug_emb is not None else emb if self.time_embed_act is not None: emb = self.time_embed_act(emb) sample = self.conv_in(sample) down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, 'has_cross_attention') and downsample_block.has_cross_attention: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, encoder_hidden_states_1=encoder_hidden_states_1, encoder_attention_mask_1=encoder_attention_mask_1) else: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb) down_block_res_samples += res_samples if self.mid_block is not None: sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, encoder_hidden_states_1=encoder_hidden_states_1, encoder_attention_mask_1=encoder_attention_mask_1) for (i, upsample_block) in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets):] down_block_res_samples = down_block_res_samples[:-len(upsample_block.resnets)] if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, 'has_cross_attention') and upsample_block.has_cross_attention: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, upsample_size=upsample_size, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, encoder_hidden_states_1=encoder_hidden_states_1, encoder_attention_mask_1=encoder_attention_mask_1) else: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size) if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) if not return_dict: return (sample,) return UNet2DConditionOutput(sample=sample) def get_down_block(down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample, resnet_eps, resnet_act_fn, transformer_layers_per_block=1, num_attention_heads=None, resnet_groups=None, cross_attention_dim=None, downsample_padding=None, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift='default'): down_block_type = down_block_type[7:] if down_block_type.startswith('UNetRes') else down_block_type if down_block_type == 'DownBlock2D': return DownBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift) elif down_block_type == 'CrossAttnDownBlock2D': if cross_attention_dim is None: raise ValueError('cross_attention_dim must be specified for CrossAttnDownBlock2D') return CrossAttnDownBlock2D(num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift) raise ValueError(f'{down_block_type} does not exist.') def get_up_block(up_block_type, num_layers, in_channels, out_channels, prev_output_channel, temb_channels, add_upsample, resnet_eps, resnet_act_fn, transformer_layers_per_block=1, num_attention_heads=None, resnet_groups=None, cross_attention_dim=None, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift='default'): up_block_type = up_block_type[7:] if up_block_type.startswith('UNetRes') else up_block_type if up_block_type == 'UpBlock2D': return UpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift) elif up_block_type == 'CrossAttnUpBlock2D': if cross_attention_dim is None: raise ValueError('cross_attention_dim must be specified for CrossAttnUpBlock2D') return CrossAttnUpBlock2D(num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift) raise ValueError(f'{up_block_type} does not exist.') class CrossAttnDownBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads=1, cross_attention_dim=1280, output_scale_factor=1.0, downsample_padding=1, add_downsample=True, use_linear_projection=False, only_cross_attention=False, upcast_attention=False): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4: raise ValueError(f'Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}') self.cross_attention_dim = cross_attention_dim for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) for j in range(len(cross_attention_dim)): attentions.append(Transformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block, cross_attention_dim=cross_attention_dim[j], norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, double_self_attention=True if cross_attention_dim[j] is None else False)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList([Downsample2D(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states_1: Optional[torch.Tensor]=None, encoder_attention_mask_1: Optional[torch.Tensor]=None): output_states = () num_layers = len(self.resnets) num_attention_per_layer = len(self.attentions) // num_layers encoder_hidden_states_1 = encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states encoder_attention_mask_1 = encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask for i in range(num_layers): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(self.resnets[i]), hidden_states, temb, **ckpt_kwargs) for (idx, cross_attention_dim) in enumerate(self.cross_attention_dim): if cross_attention_dim is not None and idx <= 1: forward_encoder_hidden_states = encoder_hidden_states forward_encoder_attention_mask = encoder_attention_mask elif cross_attention_dim is not None and idx > 1: forward_encoder_hidden_states = encoder_hidden_states_1 forward_encoder_attention_mask = encoder_attention_mask_1 else: forward_encoder_hidden_states = None forward_encoder_attention_mask = None hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(self.attentions[i * num_attention_per_layer + idx], return_dict=False), hidden_states, forward_encoder_hidden_states, None, None, cross_attention_kwargs, attention_mask, forward_encoder_attention_mask, **ckpt_kwargs)[0] else: hidden_states = self.resnets[i](hidden_states, temb) for (idx, cross_attention_dim) in enumerate(self.cross_attention_dim): if cross_attention_dim is not None and idx <= 1: forward_encoder_hidden_states = encoder_hidden_states forward_encoder_attention_mask = encoder_attention_mask elif cross_attention_dim is not None and idx > 1: forward_encoder_hidden_states = encoder_hidden_states_1 forward_encoder_attention_mask = encoder_attention_mask_1 else: forward_encoder_hidden_states = None forward_encoder_attention_mask = None hidden_states = self.attentions[i * num_attention_per_layer + idx](hidden_states, attention_mask=attention_mask, encoder_hidden_states=forward_encoder_hidden_states, encoder_attention_mask=forward_encoder_attention_mask, return_dict=False)[0] output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return (hidden_states, output_states) class UNetMidBlock2DCrossAttn(nn.Module): def __init__(self, in_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads=1, output_scale_factor=1.0, cross_attention_dim=1280, use_linear_projection=False, upcast_attention=False): super().__init__() self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4: raise ValueError(f'Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}') self.cross_attention_dim = cross_attention_dim resnets = [ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)] attentions = [] for i in range(num_layers): for j in range(len(cross_attention_dim)): attentions.append(Transformer2DModel(num_attention_heads, in_channels // num_attention_heads, in_channels=in_channels, num_layers=transformer_layers_per_block, cross_attention_dim=cross_attention_dim[j], norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, double_self_attention=True if cross_attention_dim[j] is None else False)) resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states_1: Optional[torch.Tensor]=None, encoder_attention_mask_1: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.resnets[0](hidden_states, temb) num_attention_per_layer = len(self.attentions) // (len(self.resnets) - 1) encoder_hidden_states_1 = encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states encoder_attention_mask_1 = encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask for i in range(len(self.resnets[1:])): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} for (idx, cross_attention_dim) in enumerate(self.cross_attention_dim): if cross_attention_dim is not None and idx <= 1: forward_encoder_hidden_states = encoder_hidden_states forward_encoder_attention_mask = encoder_attention_mask elif cross_attention_dim is not None and idx > 1: forward_encoder_hidden_states = encoder_hidden_states_1 forward_encoder_attention_mask = encoder_attention_mask_1 else: forward_encoder_hidden_states = None forward_encoder_attention_mask = None hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(self.attentions[i * num_attention_per_layer + idx], return_dict=False), hidden_states, forward_encoder_hidden_states, None, None, cross_attention_kwargs, attention_mask, forward_encoder_attention_mask, **ckpt_kwargs)[0] hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(self.resnets[i + 1]), hidden_states, temb, **ckpt_kwargs) else: for (idx, cross_attention_dim) in enumerate(self.cross_attention_dim): if cross_attention_dim is not None and idx <= 1: forward_encoder_hidden_states = encoder_hidden_states forward_encoder_attention_mask = encoder_attention_mask elif cross_attention_dim is not None and idx > 1: forward_encoder_hidden_states = encoder_hidden_states_1 forward_encoder_attention_mask = encoder_attention_mask_1 else: forward_encoder_hidden_states = None forward_encoder_attention_mask = None hidden_states = self.attentions[i * num_attention_per_layer + idx](hidden_states, attention_mask=attention_mask, encoder_hidden_states=forward_encoder_hidden_states, encoder_attention_mask=forward_encoder_attention_mask, return_dict=False)[0] hidden_states = self.resnets[i + 1](hidden_states, temb) return hidden_states class CrossAttnUpBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads=1, cross_attention_dim=1280, output_scale_factor=1.0, add_upsample=True, use_linear_projection=False, only_cross_attention=False, upcast_attention=False): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4: raise ValueError(f'Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}') self.cross_attention_dim = cross_attention_dim for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(ResnetBlock2D(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) for j in range(len(cross_attention_dim)): attentions.append(Transformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block, cross_attention_dim=cross_attention_dim[j], norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, double_self_attention=True if cross_attention_dim[j] is None else False)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, upsample_size: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states_1: Optional[torch.Tensor]=None, encoder_attention_mask_1: Optional[torch.Tensor]=None): num_layers = len(self.resnets) num_attention_per_layer = len(self.attentions) // num_layers encoder_hidden_states_1 = encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states encoder_attention_mask_1 = encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask for i in range(num_layers): res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(self.resnets[i]), hidden_states, temb, **ckpt_kwargs) for (idx, cross_attention_dim) in enumerate(self.cross_attention_dim): if cross_attention_dim is not None and idx <= 1: forward_encoder_hidden_states = encoder_hidden_states forward_encoder_attention_mask = encoder_attention_mask elif cross_attention_dim is not None and idx > 1: forward_encoder_hidden_states = encoder_hidden_states_1 forward_encoder_attention_mask = encoder_attention_mask_1 else: forward_encoder_hidden_states = None forward_encoder_attention_mask = None hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(self.attentions[i * num_attention_per_layer + idx], return_dict=False), hidden_states, forward_encoder_hidden_states, None, None, cross_attention_kwargs, attention_mask, forward_encoder_attention_mask, **ckpt_kwargs)[0] else: hidden_states = self.resnets[i](hidden_states, temb) for (idx, cross_attention_dim) in enumerate(self.cross_attention_dim): if cross_attention_dim is not None and idx <= 1: forward_encoder_hidden_states = encoder_hidden_states forward_encoder_attention_mask = encoder_attention_mask elif cross_attention_dim is not None and idx > 1: forward_encoder_hidden_states = encoder_hidden_states_1 forward_encoder_attention_mask = encoder_attention_mask_1 else: forward_encoder_hidden_states = None forward_encoder_attention_mask = None hidden_states = self.attentions[i * num_attention_per_layer + idx](hidden_states, attention_mask=attention_mask, encoder_hidden_states=forward_encoder_hidden_states, encoder_attention_mask=forward_encoder_attention_mask, return_dict=False)[0] if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states # File: diffusers-main/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import ClapFeatureExtractor, ClapModel, GPT2Model, RobertaTokenizer, RobertaTokenizerFast, SpeechT5HifiGan, T5EncoderModel, T5Tokenizer, T5TokenizerFast, VitsModel, VitsTokenizer from ...models import AutoencoderKL from ...schedulers import KarrasDiffusionSchedulers from ...utils import is_accelerate_available, is_accelerate_version, is_librosa_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .modeling_audioldm2 import AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel if is_librosa_available(): import librosa logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import scipy\n >>> import torch\n >>> from diffusers import AudioLDM2Pipeline\n\n >>> repo_id = "cvssp/audioldm2"\n >>> pipe = AudioLDM2Pipeline.from_pretrained(repo_id, torch_dtype=torch.float16)\n >>> pipe = pipe.to("cuda")\n\n >>> # define the prompts\n >>> prompt = "The sound of a hammer hitting a wooden surface."\n >>> negative_prompt = "Low quality."\n\n >>> # set the seed for generator\n >>> generator = torch.Generator("cuda").manual_seed(0)\n\n >>> # run the generation\n >>> audio = pipe(\n ... prompt,\n ... negative_prompt=negative_prompt,\n ... num_inference_steps=200,\n ... audio_length_in_s=10.0,\n ... num_waveforms_per_prompt=3,\n ... generator=generator,\n ... ).audios\n\n >>> # save the best audio sample (index 0) as a .wav file\n >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio[0])\n ```\n ```\n #Using AudioLDM2 for Text To Speech\n >>> import scipy\n >>> import torch\n >>> from diffusers import AudioLDM2Pipeline\n\n >>> repo_id = "anhnct/audioldm2_gigaspeech"\n >>> pipe = AudioLDM2Pipeline.from_pretrained(repo_id, torch_dtype=torch.float16)\n >>> pipe = pipe.to("cuda")\n\n >>> # define the prompts\n >>> prompt = "A female reporter is speaking"\n >>> transcript = "wish you have a good day"\n\n >>> # set the seed for generator\n >>> generator = torch.Generator("cuda").manual_seed(0)\n\n >>> # run the generation\n >>> audio = pipe(\n ... prompt,\n ... transcription=transcript,\n ... num_inference_steps=200,\n ... audio_length_in_s=10.0,\n ... num_waveforms_per_prompt=2,\n ... generator=generator,\n ... max_new_tokens=512, #Must set max_new_tokens equa to 512 for TTS\n ... ).audios\n\n >>> # save the best audio sample (index 0) as a .wav file\n >>> scipy.io.wavfile.write("tts.wav", rate=16000, data=audio[0])\n ```\n' def prepare_inputs_for_generation(inputs_embeds, attention_mask=None, past_key_values=None, **kwargs): if past_key_values is not None: inputs_embeds = inputs_embeds[:, -1:] return {'inputs_embeds': inputs_embeds, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache')} class AudioLDM2Pipeline(DiffusionPipeline): def __init__(self, vae: AutoencoderKL, text_encoder: ClapModel, text_encoder_2: Union[T5EncoderModel, VitsModel], projection_model: AudioLDM2ProjectionModel, language_model: GPT2Model, tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast], tokenizer_2: Union[T5Tokenizer, T5TokenizerFast, VitsTokenizer], feature_extractor: ClapFeatureExtractor, unet: AudioLDM2UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, vocoder: SpeechT5HifiGan): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, projection_model=projection_model, language_model=language_model, tokenizer=tokenizer, tokenizer_2=tokenizer_2, feature_extractor=feature_extractor, unet=unet, scheduler=scheduler, vocoder=vocoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_model_cpu_offload(self, gpu_id=0): if is_accelerate_available() and is_accelerate_version('>=', '0.17.0.dev0'): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.') device = torch.device(f'cuda:{gpu_id}') if self.device.type != 'cpu': self.to('cpu', silence_dtype_warnings=True) torch.cuda.empty_cache() model_sequence = [self.text_encoder.text_model, self.text_encoder.text_projection, self.text_encoder_2, self.projection_model, self.language_model, self.unet, self.vae, self.vocoder, self.text_encoder] hook = None for cpu_offloaded_model in model_sequence: (_, hook) = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) self.final_offload_hook = hook def generate_language_model(self, inputs_embeds: torch.Tensor=None, max_new_tokens: int=8, **model_kwargs): max_new_tokens = max_new_tokens if max_new_tokens is not None else self.language_model.config.max_new_tokens model_kwargs = self.language_model._get_initial_cache_position(inputs_embeds, model_kwargs) for _ in range(max_new_tokens): model_inputs = prepare_inputs_for_generation(inputs_embeds, **model_kwargs) output = self.language_model(**model_inputs, return_dict=True) next_hidden_states = output.last_hidden_state inputs_embeds = torch.cat([inputs_embeds, next_hidden_states[:, -1:, :]], dim=1) model_kwargs = self.language_model._update_model_kwargs_for_generation(output, model_kwargs) return inputs_embeds[:, -max_new_tokens:, :] def encode_prompt(self, prompt, device, num_waveforms_per_prompt, do_classifier_free_guidance, transcription=None, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, generated_prompt_embeds: Optional[torch.Tensor]=None, negative_generated_prompt_embeds: Optional[torch.Tensor]=None, attention_mask: Optional[torch.LongTensor]=None, negative_attention_mask: Optional[torch.LongTensor]=None, max_new_tokens: Optional[int]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] is_vits_text_encoder = isinstance(self.text_encoder_2, VitsModel) if is_vits_text_encoder: text_encoders = [self.text_encoder, self.text_encoder_2.text_encoder] else: text_encoders = [self.text_encoder, self.text_encoder_2] if prompt_embeds is None: prompt_embeds_list = [] attention_mask_list = [] for (tokenizer, text_encoder) in zip(tokenizers, text_encoders): use_prompt = isinstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast, T5Tokenizer, T5TokenizerFast)) text_inputs = tokenizer(prompt if use_prompt else transcription, padding='max_length' if isinstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast, VitsTokenizer)) else True, max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because {text_encoder.config.model_type} can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids.to(device) attention_mask = attention_mask.to(device) if text_encoder.config.model_type == 'clap': prompt_embeds = text_encoder.get_text_features(text_input_ids, attention_mask=attention_mask) prompt_embeds = prompt_embeds[:, None, :] attention_mask = attention_mask.new_ones((batch_size, 1)) elif is_vits_text_encoder: for (text_input_id, text_attention_mask) in zip(text_input_ids, attention_mask): for (idx, phoneme_id) in enumerate(text_input_id): if phoneme_id == 0: text_input_id[idx] = 182 text_attention_mask[idx] = 1 break prompt_embeds = text_encoder(text_input_ids, attention_mask=attention_mask, padding_mask=attention_mask.unsqueeze(-1)) prompt_embeds = prompt_embeds[0] else: prompt_embeds = text_encoder(text_input_ids, attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] prompt_embeds_list.append(prompt_embeds) attention_mask_list.append(attention_mask) projection_output = self.projection_model(hidden_states=prompt_embeds_list[0], hidden_states_1=prompt_embeds_list[1], attention_mask=attention_mask_list[0], attention_mask_1=attention_mask_list[1]) projected_prompt_embeds = projection_output.hidden_states projected_attention_mask = projection_output.attention_mask generated_prompt_embeds = self.generate_language_model(projected_prompt_embeds, attention_mask=projected_attention_mask, max_new_tokens=max_new_tokens) prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) attention_mask = attention_mask.to(device=device) if attention_mask is not None else torch.ones(prompt_embeds.shape[:2], dtype=torch.long, device=device) generated_prompt_embeds = generated_prompt_embeds.to(dtype=self.language_model.dtype, device=device) (bs_embed, seq_len, hidden_size) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len, hidden_size) attention_mask = attention_mask.repeat(1, num_waveforms_per_prompt) attention_mask = attention_mask.view(bs_embed * num_waveforms_per_prompt, seq_len) (bs_embed, seq_len, hidden_size) = generated_prompt_embeds.shape generated_prompt_embeds = generated_prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) generated_prompt_embeds = generated_prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len, hidden_size) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt negative_prompt_embeds_list = [] negative_attention_mask_list = [] max_length = prompt_embeds.shape[1] for (tokenizer, text_encoder) in zip(tokenizers, text_encoders): uncond_input = tokenizer(uncond_tokens, padding='max_length', max_length=tokenizer.model_max_length if isinstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast, VitsTokenizer)) else max_length, truncation=True, return_tensors='pt') uncond_input_ids = uncond_input.input_ids.to(device) negative_attention_mask = uncond_input.attention_mask.to(device) if text_encoder.config.model_type == 'clap': negative_prompt_embeds = text_encoder.get_text_features(uncond_input_ids, attention_mask=negative_attention_mask) negative_prompt_embeds = negative_prompt_embeds[:, None, :] negative_attention_mask = negative_attention_mask.new_ones((batch_size, 1)) elif is_vits_text_encoder: negative_prompt_embeds = torch.zeros(batch_size, tokenizer.model_max_length, text_encoder.config.hidden_size).to(dtype=self.text_encoder_2.dtype, device=device) negative_attention_mask = torch.zeros(batch_size, tokenizer.model_max_length).to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = text_encoder(uncond_input_ids, attention_mask=negative_attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_attention_mask_list.append(negative_attention_mask) projection_output = self.projection_model(hidden_states=negative_prompt_embeds_list[0], hidden_states_1=negative_prompt_embeds_list[1], attention_mask=negative_attention_mask_list[0], attention_mask_1=negative_attention_mask_list[1]) negative_projected_prompt_embeds = projection_output.hidden_states negative_projected_attention_mask = projection_output.attention_mask negative_generated_prompt_embeds = self.generate_language_model(negative_projected_prompt_embeds, attention_mask=negative_projected_attention_mask, max_new_tokens=max_new_tokens) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) negative_attention_mask = negative_attention_mask.to(device=device) if negative_attention_mask is not None else torch.ones(negative_prompt_embeds.shape[:2], dtype=torch.long, device=device) negative_generated_prompt_embeds = negative_generated_prompt_embeds.to(dtype=self.language_model.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len, -1) negative_attention_mask = negative_attention_mask.repeat(1, num_waveforms_per_prompt) negative_attention_mask = negative_attention_mask.view(batch_size * num_waveforms_per_prompt, seq_len) seq_len = negative_generated_prompt_embeds.shape[1] negative_generated_prompt_embeds = negative_generated_prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) negative_generated_prompt_embeds = negative_generated_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len, -1) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) attention_mask = torch.cat([negative_attention_mask, attention_mask]) generated_prompt_embeds = torch.cat([negative_generated_prompt_embeds, generated_prompt_embeds]) return (prompt_embeds, attention_mask, generated_prompt_embeds) def mel_spectrogram_to_waveform(self, mel_spectrogram): if mel_spectrogram.dim() == 4: mel_spectrogram = mel_spectrogram.squeeze(1) waveform = self.vocoder(mel_spectrogram) waveform = waveform.cpu().float() return waveform def score_waveforms(self, text, audio, num_waveforms_per_prompt, device, dtype): if not is_librosa_available(): logger.info('Automatic scoring of the generated audio waveforms against the input prompt text requires the `librosa` package to resample the generated waveforms. Returning the audios in the order they were generated. To enable automatic scoring, install `librosa` with: `pip install librosa`.') return audio inputs = self.tokenizer(text, return_tensors='pt', padding=True) resampled_audio = librosa.resample(audio.numpy(), orig_sr=self.vocoder.config.sampling_rate, target_sr=self.feature_extractor.sampling_rate) inputs['input_features'] = self.feature_extractor(list(resampled_audio), return_tensors='pt', sampling_rate=self.feature_extractor.sampling_rate).input_features.type(dtype) inputs = inputs.to(device) logits_per_text = self.text_encoder(**inputs).logits_per_text indices = torch.argsort(logits_per_text, dim=1, descending=True)[:, :num_waveforms_per_prompt] audio = torch.index_select(audio, 0, indices.reshape(-1).cpu()) return audio def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, audio_length_in_s, vocoder_upsample_factor, callback_steps, transcription=None, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, generated_prompt_embeds=None, negative_generated_prompt_embeds=None, attention_mask=None, negative_attention_mask=None): min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor if audio_length_in_s < min_audio_length_in_s: raise ValueError(f'`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but is {audio_length_in_s}.') if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0: raise ValueError(f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of {self.vae_scale_factor}.") if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and (prompt_embeds is None or generated_prompt_embeds is None): raise ValueError('Provide either `prompt`, or `prompt_embeds` and `generated_prompt_embeds`. Cannot leave `prompt` undefined without specifying both `prompt_embeds` and `generated_prompt_embeds`.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_embeds is not None and negative_generated_prompt_embeds is None: raise ValueError('Cannot forward `negative_prompt_embeds` without `negative_generated_prompt_embeds`. Ensure thatboth arguments are specified') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if attention_mask is not None and attention_mask.shape != prompt_embeds.shape[:2]: raise ValueError(f'`attention_mask should have the same batch size and sequence length as `prompt_embeds`, but got:`attention_mask: {attention_mask.shape} != `prompt_embeds` {prompt_embeds.shape}') if transcription is None: if self.text_encoder_2.config.model_type == 'vits': raise ValueError('Cannot forward without transcription. Please make sure to have transcription') elif transcription is not None and (not isinstance(transcription, str) and (not isinstance(transcription, list))): raise ValueError(f'`transcription` has to be of type `str` or `list` but is {type(transcription)}') if generated_prompt_embeds is not None and negative_generated_prompt_embeds is not None: if generated_prompt_embeds.shape != negative_generated_prompt_embeds.shape: raise ValueError(f'`generated_prompt_embeds` and `negative_generated_prompt_embeds` must have the same shape when passed directly, but got: `generated_prompt_embeds` {generated_prompt_embeds.shape} != `negative_generated_prompt_embeds` {negative_generated_prompt_embeds.shape}.') if negative_attention_mask is not None and negative_attention_mask.shape != negative_prompt_embeds.shape[:2]: raise ValueError(f'`attention_mask should have the same batch size and sequence length as `prompt_embeds`, but got:`attention_mask: {negative_attention_mask.shape} != `prompt_embeds` {negative_prompt_embeds.shape}') def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(self.vocoder.config.model_in_dim) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, transcription: Union[str, List[str]]=None, audio_length_in_s: Optional[float]=None, num_inference_steps: int=200, guidance_scale: float=3.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_waveforms_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, generated_prompt_embeds: Optional[torch.Tensor]=None, negative_generated_prompt_embeds: Optional[torch.Tensor]=None, attention_mask: Optional[torch.LongTensor]=None, negative_attention_mask: Optional[torch.LongTensor]=None, max_new_tokens: Optional[int]=None, return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: Optional[int]=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, output_type: Optional[str]='np'): vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate if audio_length_in_s is None: audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor height = int(audio_length_in_s / vocoder_upsample_factor) original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate) if height % self.vae_scale_factor != 0: height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor logger.info(f'Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} so that it can be handled by the model. It will be cut to {audio_length_in_s} after the denoising process.') self.check_inputs(prompt, audio_length_in_s, vocoder_upsample_factor, callback_steps, transcription, negative_prompt, prompt_embeds, negative_prompt_embeds, generated_prompt_embeds, negative_generated_prompt_embeds, attention_mask, negative_attention_mask) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, attention_mask, generated_prompt_embeds) = self.encode_prompt(prompt, device, num_waveforms_per_prompt, do_classifier_free_guidance, transcription, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, generated_prompt_embeds=generated_prompt_embeds, negative_generated_prompt_embeds=negative_generated_prompt_embeds, attention_mask=attention_mask, negative_attention_mask=negative_attention_mask, max_new_tokens=max_new_tokens) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_waveforms_per_prompt, num_channels_latents, height, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=generated_prompt_embeds, encoder_hidden_states_1=prompt_embeds, encoder_attention_mask_1=attention_mask, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) self.maybe_free_model_hooks() if not output_type == 'latent': latents = 1 / self.vae.config.scaling_factor * latents mel_spectrogram = self.vae.decode(latents).sample else: return AudioPipelineOutput(audios=latents) audio = self.mel_spectrogram_to_waveform(mel_spectrogram) audio = audio[:, :original_waveform_length] if num_waveforms_per_prompt > 1 and prompt is not None: audio = self.score_waveforms(text=prompt, audio=audio, num_waveforms_per_prompt=num_waveforms_per_prompt, device=device, dtype=prompt_embeds.dtype) if output_type == 'np': audio = audio.numpy() if not return_dict: return (audio,) return AudioPipelineOutput(audios=audio) # File: diffusers-main/src/diffusers/pipelines/aura_flow/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_aura_flow'] = ['AuraFlowPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_aura_flow import AuraFlowPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py import inspect from typing import List, Optional, Tuple, Union import torch from transformers import T5Tokenizer, UMT5EncoderModel from ...image_processor import VaeImageProcessor from ...models import AuraFlowTransformer2DModel, AutoencoderKL from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import AuraFlowPipeline\n\n >>> pipe = AuraFlowPipeline.from_pretrained("fal/AuraFlow", torch_dtype=torch.float16)\n >>> pipe = pipe.to("cuda")\n >>> prompt = "A cat holding a sign that says hello world"\n >>> image = pipe(prompt).images[0]\n >>> image.save("aura_flow.png")\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class AuraFlowPipeline(DiffusionPipeline): _optional_components = [] model_cpu_offload_seq = 'text_encoder->transformer->vae' def __init__(self, tokenizer: T5Tokenizer, text_encoder: UMT5EncoderModel, vae: AutoencoderKL, transformer: AuraFlowTransformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler): super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, 'vae') and self.vae is not None else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def check_inputs(self, prompt, height, width, negative_prompt, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError('Must provide `prompt_attention_mask` when specifying `prompt_embeds`.') if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError('Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError(f'`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask` {negative_prompt_attention_mask.shape}.') def encode_prompt(self, prompt: Union[str, List[str]], negative_prompt: Union[str, List[str]]=None, do_classifier_free_guidance: bool=True, num_images_per_prompt: int=1, device: Optional[torch.device]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, max_sequence_length: int=256): if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = max_sequence_length if prompt_embeds is None: text_inputs = self.tokenizer(prompt, truncation=True, max_length=max_length, padding='max_length', return_tensors='pt') text_input_ids = text_inputs['input_ids'] untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because T5 can only handle sequences up to {max_length} tokens: {removed_text}') text_inputs = {k: v.to(device) for (k, v) in text_inputs.items()} prompt_embeds = self.text_encoder(**text_inputs)[0] prompt_attention_mask = text_inputs['attention_mask'].unsqueeze(-1).expand(prompt_embeds.shape) prompt_embeds = prompt_embeds * prompt_attention_mask if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.reshape(bs_embed, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, truncation=True, max_length=max_length, padding='max_length', return_tensors='pt') uncond_input = {k: v.to(device) for (k, v) in uncond_input.items()} negative_prompt_embeds = self.text_encoder(**uncond_input)[0] negative_prompt_attention_mask = uncond_input['attention_mask'].unsqueeze(-1).expand(negative_prompt_embeds.shape) negative_prompt_embeds = negative_prompt_embeds * negative_prompt_attention_mask if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.reshape(bs_embed, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) else: negative_prompt_embeds = None negative_prompt_attention_mask = None return (prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): if latents is not None: return latents.to(device=device, dtype=dtype) shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, negative_prompt: Union[str, List[str]]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: float=3.5, num_images_per_prompt: Optional[int]=1, height: Optional[int]=1024, width: Optional[int]=1024, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, max_sequence_length: int=256, output_type: Optional[str]='pil', return_dict: bool=True) -> Union[ImagePipelineOutput, Tuple]: height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask) = self.encode_prompt(prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=max_sequence_length) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) latent_channels = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, latent_channels, height, width, prompt_embeds.dtype, device, generator, latents) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents timestep = torch.tensor([t / 1000]).expand(latent_model_input.shape[0]) timestep = timestep.to(latents.device, dtype=latents.dtype) noise_pred = self.transformer(latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == 'latent': image = latents else: needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/auto_pipeline.py from collections import OrderedDict from huggingface_hub.utils import validate_hf_hub_args from ..configuration_utils import ConfigMixin from ..utils import is_sentencepiece_available from .aura_flow import AuraFlowPipeline from .controlnet import StableDiffusionControlNetImg2ImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionXLControlNetImg2ImgPipeline, StableDiffusionXLControlNetInpaintPipeline, StableDiffusionXLControlNetPipeline from .deepfloyd_if import IFImg2ImgPipeline, IFInpaintingPipeline, IFPipeline from .flux import FluxControlNetPipeline, FluxImg2ImgPipeline, FluxInpaintPipeline, FluxPipeline from .hunyuandit import HunyuanDiTPipeline from .kandinsky import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyImg2ImgPipeline, KandinskyInpaintCombinedPipeline, KandinskyInpaintPipeline, KandinskyPipeline from .kandinsky2_2 import KandinskyV22CombinedPipeline, KandinskyV22Img2ImgCombinedPipeline, KandinskyV22Img2ImgPipeline, KandinskyV22InpaintCombinedPipeline, KandinskyV22InpaintPipeline, KandinskyV22Pipeline from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline from .lumina import LuminaText2ImgPipeline from .pag import HunyuanDiTPAGPipeline, PixArtSigmaPAGPipeline, StableDiffusion3PAGPipeline, StableDiffusionControlNetPAGPipeline, StableDiffusionPAGPipeline, StableDiffusionXLControlNetPAGImg2ImgPipeline, StableDiffusionXLControlNetPAGPipeline, StableDiffusionXLPAGImg2ImgPipeline, StableDiffusionXLPAGInpaintPipeline, StableDiffusionXLPAGPipeline from .pixart_alpha import PixArtAlphaPipeline, PixArtSigmaPipeline from .stable_cascade import StableCascadeCombinedPipeline, StableCascadeDecoderPipeline from .stable_diffusion import StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline from .stable_diffusion_3 import StableDiffusion3Img2ImgPipeline, StableDiffusion3InpaintPipeline, StableDiffusion3Pipeline from .stable_diffusion_xl import StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLPipeline from .wuerstchen import WuerstchenCombinedPipeline, WuerstchenDecoderPipeline AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict([('stable-diffusion', StableDiffusionPipeline), ('stable-diffusion-xl', StableDiffusionXLPipeline), ('stable-diffusion-3', StableDiffusion3Pipeline), ('stable-diffusion-3-pag', StableDiffusion3PAGPipeline), ('if', IFPipeline), ('hunyuan', HunyuanDiTPipeline), ('hunyuan-pag', HunyuanDiTPAGPipeline), ('kandinsky', KandinskyCombinedPipeline), ('kandinsky22', KandinskyV22CombinedPipeline), ('kandinsky3', Kandinsky3Pipeline), ('stable-diffusion-controlnet', StableDiffusionControlNetPipeline), ('stable-diffusion-xl-controlnet', StableDiffusionXLControlNetPipeline), ('wuerstchen', WuerstchenCombinedPipeline), ('cascade', StableCascadeCombinedPipeline), ('lcm', LatentConsistencyModelPipeline), ('pixart-alpha', PixArtAlphaPipeline), ('pixart-sigma', PixArtSigmaPipeline), ('stable-diffusion-pag', StableDiffusionPAGPipeline), ('stable-diffusion-controlnet-pag', StableDiffusionControlNetPAGPipeline), ('stable-diffusion-xl-pag', StableDiffusionXLPAGPipeline), ('stable-diffusion-xl-controlnet-pag', StableDiffusionXLControlNetPAGPipeline), ('pixart-sigma-pag', PixArtSigmaPAGPipeline), ('auraflow', AuraFlowPipeline), ('flux', FluxPipeline), ('flux-controlnet', FluxControlNetPipeline), ('lumina', LuminaText2ImgPipeline)]) AUTO_IMAGE2IMAGE_PIPELINES_MAPPING = OrderedDict([('stable-diffusion', StableDiffusionImg2ImgPipeline), ('stable-diffusion-xl', StableDiffusionXLImg2ImgPipeline), ('stable-diffusion-3', StableDiffusion3Img2ImgPipeline), ('if', IFImg2ImgPipeline), ('kandinsky', KandinskyImg2ImgCombinedPipeline), ('kandinsky22', KandinskyV22Img2ImgCombinedPipeline), ('kandinsky3', Kandinsky3Img2ImgPipeline), ('stable-diffusion-controlnet', StableDiffusionControlNetImg2ImgPipeline), ('stable-diffusion-xl-controlnet', StableDiffusionXLControlNetImg2ImgPipeline), ('stable-diffusion-xl-pag', StableDiffusionXLPAGImg2ImgPipeline), ('stable-diffusion-xl-controlnet-pag', StableDiffusionXLControlNetPAGImg2ImgPipeline), ('lcm', LatentConsistencyModelImg2ImgPipeline), ('flux', FluxImg2ImgPipeline)]) AUTO_INPAINT_PIPELINES_MAPPING = OrderedDict([('stable-diffusion', StableDiffusionInpaintPipeline), ('stable-diffusion-xl', StableDiffusionXLInpaintPipeline), ('stable-diffusion-3', StableDiffusion3InpaintPipeline), ('if', IFInpaintingPipeline), ('kandinsky', KandinskyInpaintCombinedPipeline), ('kandinsky22', KandinskyV22InpaintCombinedPipeline), ('stable-diffusion-controlnet', StableDiffusionControlNetInpaintPipeline), ('stable-diffusion-xl-controlnet', StableDiffusionXLControlNetInpaintPipeline), ('stable-diffusion-xl-pag', StableDiffusionXLPAGInpaintPipeline), ('flux', FluxInpaintPipeline)]) _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict([('kandinsky', KandinskyPipeline), ('kandinsky22', KandinskyV22Pipeline), ('wuerstchen', WuerstchenDecoderPipeline), ('cascade', StableCascadeDecoderPipeline)]) _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict([('kandinsky', KandinskyImg2ImgPipeline), ('kandinsky22', KandinskyV22Img2ImgPipeline)]) _AUTO_INPAINT_DECODER_PIPELINES_MAPPING = OrderedDict([('kandinsky', KandinskyInpaintPipeline), ('kandinsky22', KandinskyV22InpaintPipeline)]) if is_sentencepiece_available(): from .kolors import KolorsImg2ImgPipeline, KolorsPipeline from .pag import KolorsPAGPipeline AUTO_TEXT2IMAGE_PIPELINES_MAPPING['kolors'] = KolorsPipeline AUTO_TEXT2IMAGE_PIPELINES_MAPPING['kolors-pag'] = KolorsPAGPipeline AUTO_IMAGE2IMAGE_PIPELINES_MAPPING['kolors'] = KolorsImg2ImgPipeline SUPPORTED_TASKS_MAPPINGS = [AUTO_TEXT2IMAGE_PIPELINES_MAPPING, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, AUTO_INPAINT_PIPELINES_MAPPING, _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING, _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING, _AUTO_INPAINT_DECODER_PIPELINES_MAPPING] def _get_connected_pipeline(pipeline_cls): if pipeline_cls in _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING.values(): return _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False) if pipeline_cls in _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING.values(): return _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False) if pipeline_cls in _AUTO_INPAINT_DECODER_PIPELINES_MAPPING.values(): return _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False) def _get_task_class(mapping, pipeline_class_name, throw_error_if_not_exist: bool=True): def get_model(pipeline_class_name): for task_mapping in SUPPORTED_TASKS_MAPPINGS: for (model_name, pipeline) in task_mapping.items(): if pipeline.__name__ == pipeline_class_name: return model_name model_name = get_model(pipeline_class_name) if model_name is not None: task_class = mapping.get(model_name, None) if task_class is not None: return task_class if throw_error_if_not_exist: raise ValueError(f"AutoPipeline can't find a pipeline linked to {pipeline_class_name} for {model_name}") class AutoPipelineForText2Image(ConfigMixin): config_name = 'model_index.json' def __init__(self, *args, **kwargs): raise EnvironmentError(f'{self.__class__.__name__} is designed to be instantiated using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or `{self.__class__.__name__}.from_pipe(pipeline)` methods.') @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_or_path, **kwargs): cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) token = kwargs.pop('token', None) local_files_only = kwargs.pop('local_files_only', False) revision = kwargs.pop('revision', None) load_config_kwargs = {'cache_dir': cache_dir, 'force_download': force_download, 'proxies': proxies, 'token': token, 'local_files_only': local_files_only, 'revision': revision} config = cls.load_config(pretrained_model_or_path, **load_config_kwargs) orig_class_name = config['_class_name'] if 'controlnet' in kwargs: orig_class_name = config['_class_name'].replace('Pipeline', 'ControlNetPipeline') if 'enable_pag' in kwargs: enable_pag = kwargs.pop('enable_pag') if enable_pag: orig_class_name = orig_class_name.replace('Pipeline', 'PAGPipeline') text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, orig_class_name) kwargs = {**load_config_kwargs, **kwargs} return text_2_image_cls.from_pretrained(pretrained_model_or_path, **kwargs) @classmethod def from_pipe(cls, pipeline, **kwargs): original_config = dict(pipeline.config) original_cls_name = pipeline.__class__.__name__ text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, original_cls_name) if 'controlnet' in kwargs: if kwargs['controlnet'] is not None: to_replace = 'PAGPipeline' if 'PAG' in text_2_image_cls.__name__ else 'Pipeline' text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, text_2_image_cls.__name__.replace('ControlNet', '').replace(to_replace, 'ControlNet' + to_replace)) else: text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, text_2_image_cls.__name__.replace('ControlNet', '')) if 'enable_pag' in kwargs: enable_pag = kwargs.pop('enable_pag') if enable_pag: text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, text_2_image_cls.__name__.replace('PAG', '').replace('Pipeline', 'PAGPipeline')) else: text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, text_2_image_cls.__name__.replace('PAG', '')) (expected_modules, optional_kwargs) = text_2_image_cls._get_signature_keys(text_2_image_cls) pretrained_model_name_or_path = original_config.pop('_name_or_path', None) passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} original_class_obj = {k: pipeline.components[k] for (k, v) in pipeline.components.items() if k in expected_modules and k not in passed_class_obj} passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} original_pipe_kwargs = {k: original_config[k] for (k, v) in original_config.items() if k in optional_kwargs and k not in passed_pipe_kwargs} additional_pipe_kwargs = [k[1:] for k in original_config.keys() if k.startswith('_') and k[1:] in optional_kwargs and (k[1:] not in passed_pipe_kwargs)] for k in additional_pipe_kwargs: original_pipe_kwargs[k] = original_config.pop(f'_{k}') text_2_image_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs} unused_original_config = {f"{('' if k.startswith('_') else '_')}{k}": original_config[k] for (k, v) in original_config.items() if k not in text_2_image_kwargs} missing_modules = set(expected_modules) - set(pipeline._optional_components) - set(text_2_image_kwargs.keys()) if len(missing_modules) > 0: raise ValueError(f'Pipeline {text_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed') model = text_2_image_cls(**text_2_image_kwargs) model.register_to_config(_name_or_path=pretrained_model_name_or_path) model.register_to_config(**unused_original_config) return model class AutoPipelineForImage2Image(ConfigMixin): config_name = 'model_index.json' def __init__(self, *args, **kwargs): raise EnvironmentError(f'{self.__class__.__name__} is designed to be instantiated using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or `{self.__class__.__name__}.from_pipe(pipeline)` methods.') @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_or_path, **kwargs): cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) token = kwargs.pop('token', None) local_files_only = kwargs.pop('local_files_only', False) revision = kwargs.pop('revision', None) load_config_kwargs = {'cache_dir': cache_dir, 'force_download': force_download, 'proxies': proxies, 'token': token, 'local_files_only': local_files_only, 'revision': revision} config = cls.load_config(pretrained_model_or_path, **load_config_kwargs) orig_class_name = config['_class_name'] to_replace = 'Img2ImgPipeline' if 'Img2Img' in config['_class_name'] else 'Pipeline' if 'controlnet' in kwargs: orig_class_name = orig_class_name.replace(to_replace, 'ControlNet' + to_replace) if 'enable_pag' in kwargs: enable_pag = kwargs.pop('enable_pag') if enable_pag: orig_class_name = orig_class_name.replace(to_replace, 'PAG' + to_replace) image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, orig_class_name) kwargs = {**load_config_kwargs, **kwargs} return image_2_image_cls.from_pretrained(pretrained_model_or_path, **kwargs) @classmethod def from_pipe(cls, pipeline, **kwargs): original_config = dict(pipeline.config) original_cls_name = pipeline.__class__.__name__ image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, original_cls_name) if 'controlnet' in kwargs: if kwargs['controlnet'] is not None: to_replace = 'Img2ImgPipeline' if 'PAG' in image_2_image_cls.__name__: to_replace = 'PAG' + to_replace image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, image_2_image_cls.__name__.replace('ControlNet', '').replace(to_replace, 'ControlNet' + to_replace)) else: image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, image_2_image_cls.__name__.replace('ControlNet', '')) if 'enable_pag' in kwargs: enable_pag = kwargs.pop('enable_pag') if enable_pag: image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, image_2_image_cls.__name__.replace('PAG', '').replace('Img2ImgPipeline', 'PAGImg2ImgPipeline')) else: image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, image_2_image_cls.__name__.replace('PAG', '')) (expected_modules, optional_kwargs) = image_2_image_cls._get_signature_keys(image_2_image_cls) pretrained_model_name_or_path = original_config.pop('_name_or_path', None) passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} original_class_obj = {k: pipeline.components[k] for (k, v) in pipeline.components.items() if k in expected_modules and k not in passed_class_obj} passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} original_pipe_kwargs = {k: original_config[k] for (k, v) in original_config.items() if k in optional_kwargs and k not in passed_pipe_kwargs} additional_pipe_kwargs = [k[1:] for k in original_config.keys() if k.startswith('_') and k[1:] in optional_kwargs and (k[1:] not in passed_pipe_kwargs)] for k in additional_pipe_kwargs: original_pipe_kwargs[k] = original_config.pop(f'_{k}') image_2_image_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs} unused_original_config = {f"{('' if k.startswith('_') else '_')}{k}": original_config[k] for (k, v) in original_config.items() if k not in image_2_image_kwargs} missing_modules = set(expected_modules) - set(pipeline._optional_components) - set(image_2_image_kwargs.keys()) if len(missing_modules) > 0: raise ValueError(f'Pipeline {image_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed') model = image_2_image_cls(**image_2_image_kwargs) model.register_to_config(_name_or_path=pretrained_model_name_or_path) model.register_to_config(**unused_original_config) return model class AutoPipelineForInpainting(ConfigMixin): config_name = 'model_index.json' def __init__(self, *args, **kwargs): raise EnvironmentError(f'{self.__class__.__name__} is designed to be instantiated using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or `{self.__class__.__name__}.from_pipe(pipeline)` methods.') @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_or_path, **kwargs): cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) token = kwargs.pop('token', None) local_files_only = kwargs.pop('local_files_only', False) revision = kwargs.pop('revision', None) load_config_kwargs = {'cache_dir': cache_dir, 'force_download': force_download, 'proxies': proxies, 'token': token, 'local_files_only': local_files_only, 'revision': revision} config = cls.load_config(pretrained_model_or_path, **load_config_kwargs) orig_class_name = config['_class_name'] to_replace = 'InpaintPipeline' if 'Inpaint' in config['_class_name'] else 'Pipeline' if 'controlnet' in kwargs: orig_class_name = orig_class_name.replace(to_replace, 'ControlNet' + to_replace) if 'enable_pag' in kwargs: enable_pag = kwargs.pop('enable_pag') if enable_pag: orig_class_name = orig_class_name.replace(to_replace, 'PAG' + to_replace) inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, orig_class_name) kwargs = {**load_config_kwargs, **kwargs} return inpainting_cls.from_pretrained(pretrained_model_or_path, **kwargs) @classmethod def from_pipe(cls, pipeline, **kwargs): original_config = dict(pipeline.config) original_cls_name = pipeline.__class__.__name__ inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, original_cls_name) if 'controlnet' in kwargs: if kwargs['controlnet'] is not None: inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, inpainting_cls.__name__.replace('ControlNet', '').replace('InpaintPipeline', 'ControlNetInpaintPipeline')) else: inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, inpainting_cls.__name__.replace('ControlNetInpaintPipeline', 'InpaintPipeline')) if 'enable_pag' in kwargs: enable_pag = kwargs.pop('enable_pag') if enable_pag: inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, inpainting_cls.__name__.replace('PAG', '').replace('InpaintPipeline', 'PAGInpaintPipeline')) else: inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, inpainting_cls.__name__.replace('PAGInpaintPipeline', 'InpaintPipeline')) (expected_modules, optional_kwargs) = inpainting_cls._get_signature_keys(inpainting_cls) pretrained_model_name_or_path = original_config.pop('_name_or_path', None) passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} original_class_obj = {k: pipeline.components[k] for (k, v) in pipeline.components.items() if k in expected_modules and k not in passed_class_obj} passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} original_pipe_kwargs = {k: original_config[k] for (k, v) in original_config.items() if k in optional_kwargs and k not in passed_pipe_kwargs} additional_pipe_kwargs = [k[1:] for k in original_config.keys() if k.startswith('_') and k[1:] in optional_kwargs and (k[1:] not in passed_pipe_kwargs)] for k in additional_pipe_kwargs: original_pipe_kwargs[k] = original_config.pop(f'_{k}') inpainting_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs} unused_original_config = {f"{('' if k.startswith('_') else '_')}{k}": original_config[k] for (k, v) in original_config.items() if k not in inpainting_kwargs} missing_modules = set(expected_modules) - set(pipeline._optional_components) - set(inpainting_kwargs.keys()) if len(missing_modules) > 0: raise ValueError(f'Pipeline {inpainting_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed') model = inpainting_cls(**inpainting_kwargs) model.register_to_config(_name_or_path=pretrained_model_name_or_path) model.register_to_config(**unused_original_config) return model # File: diffusers-main/src/diffusers/pipelines/blip_diffusion/__init__.py from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import OptionalDependencyNotAvailable, is_torch_available, is_transformers_available try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .blip_image_processing import BlipImageProcessor from .modeling_blip2 import Blip2QFormerModel from .modeling_ctx_clip import ContextCLIPTextModel from .pipeline_blip_diffusion import BlipDiffusionPipeline # File: diffusers-main/src/diffusers/pipelines/blip_diffusion/blip_image_processing.py """""" from typing import Dict, List, Optional, Union import numpy as np import torch from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from transformers.image_transforms import convert_to_rgb, resize, to_channel_dimension_format from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images from transformers.utils import TensorType, is_vision_available, logging from diffusers.utils import numpy_to_pil if is_vision_available(): import PIL.Image logger = logging.get_logger(__name__) class BlipImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=True, do_center_crop: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'height': 224, 'width': 224} size = get_size_dict(size, default_to_square=True) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb self.do_center_crop = do_center_crop def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size) if 'height' not in size or 'width' not in size: raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}') output_size = (size['height'], size['width']) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, do_center_crop: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, do_convert_rgb: bool=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') if do_convert_rgb: images = [convert_to_rgb(image) for image in images] images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] if do_center_crop: images = [self.center_crop(image, size, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] encoded_outputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors) return encoded_outputs def postprocess(self, sample: torch.Tensor, output_type: str='pil'): if output_type not in ['pt', 'np', 'pil']: raise ValueError(f"output_type={output_type} is not supported. Make sure to choose one of ['pt', 'np', or 'pil']") sample = (sample / 2 + 0.5).clamp(0, 1) if output_type == 'pt': return sample sample = sample.cpu().permute(0, 2, 3, 1).numpy() if output_type == 'np': return sample sample = numpy_to_pil(sample) return sample # File: diffusers-main/src/diffusers/pipelines/blip_diffusion/modeling_blip2.py from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from transformers import BertTokenizer from transformers.activations import QuickGELUActivation as QuickGELU from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from transformers.models.blip_2.configuration_blip_2 import Blip2Config, Blip2VisionConfig from transformers.models.blip_2.modeling_blip_2 import Blip2Encoder, Blip2PreTrainedModel, Blip2QFormerAttention, Blip2QFormerIntermediate, Blip2QFormerOutput from transformers.pytorch_utils import apply_chunking_to_forward from transformers.utils import logging, replace_return_docstrings logger = logging.get_logger(__name__) class Blip2TextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.config = config def forward(self, input_ids=None, position_ids=None, query_embeds=None, past_key_values_length=0): if input_ids is not None: seq_length = input_ids.size()[1] else: seq_length = 0 if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length].clone() if input_ids is not None: embeddings = self.word_embeddings(input_ids) if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings if query_embeds is not None: batch_size = embeddings.shape[0] query_embeds = query_embeds.repeat(batch_size, 1, 1) embeddings = torch.cat((query_embeds, embeddings), dim=1) else: embeddings = query_embeds embeddings = embeddings.to(query_embeds.dtype) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class Blip2VisionEmbeddings(nn.Module): def __init__(self, config: Blip2VisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding[:, :embeddings.size(1), :].to(target_dtype) return embeddings class Blip2QFormerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, query_length=0): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions else None next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, 'gradient_checkpointing', False) and self.training: if use_cache: logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions, query_length) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint(create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, query_length) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if layer_module.has_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class Blip2QFormerLayer(nn.Module): def __init__(self, config, layer_idx): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = Blip2QFormerAttention(config) self.layer_idx = layer_idx if layer_idx % config.cross_attention_frequency == 0: self.crossattention = Blip2QFormerAttention(config, is_cross_attention=True) self.has_cross_attention = True else: self.has_cross_attention = False self.intermediate = Blip2QFormerIntermediate(config) self.intermediate_query = Blip2QFormerIntermediate(config) self.output_query = Blip2QFormerOutput(config) self.output = Blip2QFormerOutput(config) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, query_length=0): self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if query_length > 0: query_attention_output = attention_output[:, :query_length, :] if self.has_cross_attention: if encoder_hidden_states is None: raise ValueError('encoder_hidden_states must be given for cross-attention layers') cross_attention_outputs = self.crossattention(query_attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions) query_attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] layer_output = apply_chunking_to_forward(self.feed_forward_chunk_query, self.chunk_size_feed_forward, self.seq_len_dim, query_attention_output) if attention_output.shape[1] > query_length: layer_output_text = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[:, query_length:, :]) layer_output = torch.cat([layer_output, layer_output_text], dim=1) else: layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def feed_forward_chunk_query(self, attention_output): intermediate_output = self.intermediate_query(attention_output) layer_output = self.output_query(intermediate_output, attention_output) return layer_output class ProjLayer(nn.Module): def __init__(self, in_dim, out_dim, hidden_dim, drop_p=0.1, eps=1e-12): super().__init__() self.dense1 = nn.Linear(in_dim, hidden_dim) self.act_fn = QuickGELU() self.dense2 = nn.Linear(hidden_dim, out_dim) self.dropout = nn.Dropout(drop_p) self.LayerNorm = nn.LayerNorm(out_dim, eps=eps) def forward(self, x): x_in = x x = self.LayerNorm(x) x = self.dropout(self.dense2(self.act_fn(self.dense1(x)))) + x_in return x class Blip2VisionModel(Blip2PreTrainedModel): main_input_name = 'pixel_values' config_class = Blip2VisionConfig def __init__(self, config: Blip2VisionConfig): super().__init__(config) self.config = config embed_dim = config.hidden_size self.embeddings = Blip2VisionEmbeddings(config) self.pre_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = Blip2Encoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.post_init() @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Blip2VisionConfig) def forward(self, pixel_values: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layernorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def get_input_embeddings(self): return self.embeddings class Blip2QFormerModel(Blip2PreTrainedModel): def __init__(self, config: Blip2Config): super().__init__(config) self.config = config self.embeddings = Blip2TextEmbeddings(config.qformer_config) self.visual_encoder = Blip2VisionModel(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) if not hasattr(config, 'tokenizer') or config.tokenizer is None: self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', truncation_side='right') else: self.tokenizer = BertTokenizer.from_pretrained(config.tokenizer, truncation_side='right') self.tokenizer.add_special_tokens({'bos_token': '[DEC]'}) self.proj_layer = ProjLayer(in_dim=config.qformer_config.hidden_size, out_dim=config.qformer_config.hidden_size, hidden_dim=config.qformer_config.hidden_size * 4, drop_p=0.1, eps=1e-12) self.encoder = Blip2QFormerEncoder(config.qformer_config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: Tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor: if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError('Wrong shape for input_ids (shape {}) or attention_mask (shape {})'.format(input_shape, attention_mask.shape)) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward(self, text_input=None, image_input=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None): text = self.tokenizer(text_input, return_tensors='pt', padding=True) text = text.to(self.device) input_ids = text.input_ids batch_size = input_ids.shape[0] query_atts = torch.ones((batch_size, self.query_tokens.size()[1]), dtype=torch.long).to(self.device) attention_mask = torch.cat([query_atts, text.attention_mask], dim=1) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict past_key_values_length = past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0 query_length = self.query_tokens.shape[1] embedding_output = self.embeddings(input_ids=input_ids, query_embeds=self.query_tokens, past_key_values_length=past_key_values_length) input_shape = embedding_output.size()[:-1] (batch_size, seq_length) = input_shape device = embedding_output.device image_embeds_frozen = self.visual_encoder(image_input).last_hidden_state encoder_hidden_states = image_embeds_frozen if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device) if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, list): (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states[0].size() else: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.qformer_config.num_hidden_layers) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, query_length=query_length) sequence_output = encoder_outputs[0] pooled_output = sequence_output[:, 0, :] if not return_dict: return self.proj_layer(sequence_output[:, :query_length, :]) return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) # File: diffusers-main/src/diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py from typing import Optional, Tuple, Union import torch from torch import nn from transformers import CLIPPreTrainedModel from transformers.modeling_outputs import BaseModelOutputWithPooling from transformers.models.clip.configuration_clip import CLIPTextConfig from transformers.models.clip.modeling_clip import CLIPEncoder def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]=None): (bsz, src_len) = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class ContextCLIPTextModel(CLIPPreTrainedModel): config_class = CLIPTextConfig _no_split_modules = ['CLIPEncoderLayer'] def __init__(self, config: CLIPTextConfig): super().__init__(config) self.text_model = ContextCLIPTextTransformer(config) self.post_init() def forward(self, ctx_embeddings: torch.Tensor=None, ctx_begin_pos: list=None, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: return self.text_model(ctx_embeddings=ctx_embeddings, ctx_begin_pos=ctx_begin_pos, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class ContextCLIPTextTransformer(nn.Module): def __init__(self, config: CLIPTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = ContextCLIPTextEmbeddings(config) self.encoder = CLIPEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim) def forward(self, ctx_embeddings: torch.Tensor, ctx_begin_pos: list, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError('You have to specify either input_ids') input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids, ctx_embeddings=ctx_embeddings, ctx_begin_pos=ctx_begin_pos) (bsz, seq_len) = input_shape if ctx_embeddings is not None: seq_len += ctx_embeddings.size(1) causal_attention_mask = self._build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to(hidden_states.device) if attention_mask is not None: attention_mask = _expand_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder(inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0], device=input_ids.device), input_ids.to(torch.int).argmax(dim=-1)] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def _build_causal_attention_mask(self, bsz, seq_len, dtype): mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) mask.fill_(torch.tensor(torch.finfo(dtype).min)) mask.triu_(1) mask = mask.unsqueeze(1) return mask class ContextCLIPTextEmbeddings(nn.Module): def __init__(self, config: CLIPTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1))) def forward(self, ctx_embeddings: torch.Tensor, ctx_begin_pos: list, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.Tensor]=None) -> torch.Tensor: if ctx_embeddings is None: ctx_len = 0 else: ctx_len = ctx_embeddings.shape[1] seq_length = (input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]) + ctx_len if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) input_embeds_ctx = [] bsz = inputs_embeds.shape[0] if ctx_embeddings is not None: for i in range(bsz): cbp = ctx_begin_pos[i] prefix = inputs_embeds[i, :cbp] suffix = inputs_embeds[i, cbp:] input_embeds_ctx.append(torch.cat([prefix, ctx_embeddings[i], suffix], dim=0)) inputs_embeds = torch.stack(input_embeds_ctx, dim=0) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings # File: diffusers-main/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py from typing import List, Optional, Union import PIL.Image import torch from transformers import CLIPTokenizer from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import PNDMScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .blip_image_processing import BlipImageProcessor from .modeling_blip2 import Blip2QFormerModel from .modeling_ctx_clip import ContextCLIPTextModel logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers.pipelines import BlipDiffusionPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> blip_diffusion_pipe = BlipDiffusionPipeline.from_pretrained(\n ... "Salesforce/blipdiffusion", torch_dtype=torch.float16\n ... ).to("cuda")\n\n\n >>> cond_subject = "dog"\n >>> tgt_subject = "dog"\n >>> text_prompt_input = "swimming underwater"\n\n >>> cond_image = load_image(\n ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/dog.jpg"\n ... )\n >>> guidance_scale = 7.5\n >>> num_inference_steps = 25\n >>> negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate"\n\n\n >>> output = blip_diffusion_pipe(\n ... text_prompt_input,\n ... cond_image,\n ... cond_subject,\n ... tgt_subject,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=num_inference_steps,\n ... neg_prompt=negative_prompt,\n ... height=512,\n ... width=512,\n ... ).images\n >>> output[0].save("image.png")\n ```\n' class BlipDiffusionPipeline(DiffusionPipeline): model_cpu_offload_seq = 'qformer->text_encoder->unet->vae' def __init__(self, tokenizer: CLIPTokenizer, text_encoder: ContextCLIPTextModel, vae: AutoencoderKL, unet: UNet2DConditionModel, scheduler: PNDMScheduler, qformer: Blip2QFormerModel, image_processor: BlipImageProcessor, ctx_begin_pos: int=2, mean: List[float]=None, std: List[float]=None): super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet, scheduler=scheduler, qformer=qformer, image_processor=image_processor) self.register_to_config(ctx_begin_pos=ctx_begin_pos, mean=mean, std=std) def get_query_embeddings(self, input_image, src_subject): return self.qformer(image_input=input_image, text_input=src_subject, return_dict=False) def _build_prompt(self, prompts, tgt_subjects, prompt_strength=1.0, prompt_reps=20): rv = [] for (prompt, tgt_subject) in zip(prompts, tgt_subjects): prompt = f'a {tgt_subject} {prompt.strip()}' rv.append(', '.join([prompt] * int(prompt_strength * prompt_reps))) return rv def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) latents = latents * self.scheduler.init_noise_sigma return latents def encode_prompt(self, query_embeds, prompt, device=None): device = device or self._execution_device max_len = self.text_encoder.text_model.config.max_position_embeddings max_len -= self.qformer.config.num_query_tokens tokenized_prompt = self.tokenizer(prompt, padding='max_length', truncation=True, max_length=max_len, return_tensors='pt').to(device) batch_size = query_embeds.shape[0] ctx_begin_pos = [self.config.ctx_begin_pos] * batch_size text_embeddings = self.text_encoder(input_ids=tokenized_prompt.input_ids, ctx_embeddings=query_embeds, ctx_begin_pos=ctx_begin_pos)[0] return text_embeddings @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: List[str], reference_image: PIL.Image.Image, source_subject_category: List[str], target_subject_category: List[str], latents: Optional[torch.Tensor]=None, guidance_scale: float=7.5, height: int=512, width: int=512, num_inference_steps: int=50, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, neg_prompt: Optional[str]='', prompt_strength: float=1.0, prompt_reps: int=20, output_type: Optional[str]='pil', return_dict: bool=True): device = self._execution_device reference_image = self.image_processor.preprocess(reference_image, image_mean=self.config.mean, image_std=self.config.std, return_tensors='pt')['pixel_values'] reference_image = reference_image.to(device) if isinstance(prompt, str): prompt = [prompt] if isinstance(source_subject_category, str): source_subject_category = [source_subject_category] if isinstance(target_subject_category, str): target_subject_category = [target_subject_category] batch_size = len(prompt) prompt = self._build_prompt(prompts=prompt, tgt_subjects=target_subject_category, prompt_strength=prompt_strength, prompt_reps=prompt_reps) query_embeds = self.get_query_embeddings(reference_image, source_subject_category) text_embeddings = self.encode_prompt(query_embeds, prompt, device) do_classifier_free_guidance = guidance_scale > 1.0 if do_classifier_free_guidance: max_length = self.text_encoder.text_model.config.max_position_embeddings uncond_input = self.tokenizer([neg_prompt] * batch_size, padding='max_length', max_length=max_length, return_tensors='pt') uncond_embeddings = self.text_encoder(input_ids=uncond_input.input_ids.to(device), ctx_embeddings=None)[0] text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) scale_down_factor = 2 ** (len(self.unet.config.block_out_channels) - 1) latents = self.prepare_latents(batch_size=batch_size, num_channels=self.unet.config.in_channels, height=height // scale_down_factor, width=width // scale_down_factor, generator=generator, latents=latents, dtype=self.unet.dtype, device=device) extra_set_kwargs = {} self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) for (i, t) in enumerate(self.progress_bar(self.scheduler.timesteps)): do_classifier_free_guidance = guidance_scale > 1.0 latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents noise_pred = self.unet(latent_model_input, timestep=t, encoder_hidden_states=text_embeddings, down_block_additional_residuals=None, mid_block_additional_residual=None)['sample'] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents)['prev_sample'] image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/cogvideo/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_cogvideox'] = ['CogVideoXPipeline'] _import_structure['pipeline_cogvideox_video2video'] = ['CogVideoXVideoToVideoPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_cogvideox import CogVideoXPipeline from .pipeline_cogvideox_video2video import CogVideoXVideoToVideoPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py import inspect import math from typing import Callable, Dict, List, Optional, Tuple, Union import torch from transformers import T5EncoderModel, T5Tokenizer from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel from ...models.embeddings import get_3d_rotary_pos_embed from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from .pipeline_output import CogVideoXPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```python\n >>> import torch\n >>> from diffusers import CogVideoXPipeline\n >>> from diffusers.utils import export_to_video\n\n >>> # Models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b"\n >>> pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16).to("cuda")\n >>> prompt = (\n ... "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. "\n ... "The panda\'s fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other "\n ... "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, "\n ... "casting a gentle glow on the scene. The panda\'s face is expressive, showing concentration and joy as it plays. "\n ... "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical "\n ... "atmosphere of this unique musical performance."\n ... )\n >>> video = pipe(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0]\n >>> export_to_video(video, "output.mp4", fps=8)\n ```\n' def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): tw = tgt_width th = tgt_height (h, w) = src r = h / w if r > th / tw: resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return ((crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)) def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class CogVideoXPipeline(DiffusionPipeline): _optional_components = [] model_cpu_offload_seq = 'text_encoder->transformer->vae' _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, vae: AutoencoderKLCogVideoX, transformer: CogVideoXTransformer3DModel, scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler]): super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler) self.vae_scale_factor_spatial = 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, 'vae') and self.vae is not None else 8 self.vae_scale_factor_temporal = self.vae.config.temporal_compression_ratio if hasattr(self, 'vae') and self.vae is not None else 4 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) def _get_t5_prompt_embeds(self, prompt: Union[str, List[str]]=None, num_videos_per_prompt: int=1, max_sequence_length: int=226, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1:-1]) logger.warning(f'The following part of your input was truncated because `max_sequence_length` is set to {max_sequence_length} tokens: {removed_text}') prompt_embeds = self.text_encoder(text_input_ids.to(device))[0] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds def encode_prompt(self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]]=None, do_classifier_free_guidance: bool=True, num_videos_per_prompt: int=1, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, max_sequence_length: int=226, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_t5_prompt_embeds(prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') negative_prompt_embeds = self._get_t5_prompt_embeds(prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype) return (prompt_embeds, negative_prompt_embeds) def prepare_latents(self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None): shape = (batch_size, (num_frames - 1) // self.vae_scale_factor_temporal + 1, num_channels_latents, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def decode_latents(self, latents: torch.Tensor) -> torch.Tensor: latents = latents.permute(0, 2, 1, 3, 4) latents = 1 / self.vae.config.scaling_factor * latents frames = self.vae.decode(latents).sample return frames def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds=None, negative_prompt_embeds=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def fuse_qkv_projections(self) -> None: self.fusing_transformer = True self.transformer.fuse_qkv_projections() def unfuse_qkv_projections(self) -> None: if not self.fusing_transformer: logger.warning('The Transformer was not initially fused for QKV projections. Doing nothing.') else: self.transformer.unfuse_qkv_projections() self.fusing_transformer = False def _prepare_rotary_positional_embeddings(self, height: int, width: int, num_frames: int, device: torch.device) -> Tuple[torch.Tensor, torch.Tensor]: grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) base_size_width = 720 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) base_size_height = 480 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) grid_crops_coords = get_resize_crop_region_for_grid((grid_height, grid_width), base_size_width, base_size_height) (freqs_cos, freqs_sin) = get_3d_rotary_pos_embed(embed_dim=self.transformer.config.attention_head_dim, crops_coords=grid_crops_coords, grid_size=(grid_height, grid_width), temporal_size=num_frames) freqs_cos = freqs_cos.to(device=device) freqs_sin = freqs_sin.to(device=device) return (freqs_cos, freqs_sin) @property def guidance_scale(self): return self._guidance_scale @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[str, List[str]]]=None, negative_prompt: Optional[Union[str, List[str]]]=None, height: int=480, width: int=720, num_frames: int=49, num_inference_steps: int=50, timesteps: Optional[List[int]]=None, guidance_scale: float=6, use_dynamic_cfg: bool=False, num_videos_per_prompt: int=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: str='pil', return_dict: bool=True, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=226) -> Union[CogVideoXPipelineOutput, Tuple]: if num_frames > 49: raise ValueError('The number of frames must be less than 49 for now due to static positional embeddings. This will be updated in the future to remove this limitation.') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.transformer.config.sample_size * self.vae_scale_factor_spatial width = width or self.transformer.config.sample_size * self.vae_scale_factor_spatial num_videos_per_prompt = 1 self.check_inputs(prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds, negative_prompt_embeds) self._guidance_scale = guidance_scale self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, negative_prompt, do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) self._num_timesteps = len(timesteps) latent_channels = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_videos_per_prompt, latent_channels, num_frames, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) image_rotary_emb = self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) if self.transformer.config.use_rotary_positional_embeddings else None num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: old_pred_original_sample = None for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) timestep = t.expand(latent_model_input.shape[0]) noise_pred = self.transformer(hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, image_rotary_emb=image_rotary_emb, return_dict=False)[0] noise_pred = noise_pred.float() if use_dynamic_cfg: self._guidance_scale = 1 + guidance_scale * ((1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2) if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if not isinstance(self.scheduler, CogVideoXDPMScheduler): latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] else: (latents, old_pred_original_sample) = self.scheduler.step(noise_pred, old_pred_original_sample, t, timesteps[i - 1] if i > 0 else None, latents, **extra_step_kwargs, return_dict=False) latents = latents.to(prompt_embeds.dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if not output_type == 'latent': video = self.decode_latents(latents) video = self.video_processor.postprocess_video(video=video, output_type=output_type) else: video = latents self.maybe_free_model_hooks() if not return_dict: return (video,) return CogVideoXPipelineOutput(frames=video) # File: diffusers-main/src/diffusers/pipelines/cogvideo/pipeline_cogvideox_video2video.py import inspect import math from typing import Callable, Dict, List, Optional, Tuple, Union import torch from PIL import Image from transformers import T5EncoderModel, T5Tokenizer from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel from ...models.embeddings import get_3d_rotary_pos_embed from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from .pipeline_output import CogVideoXPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```python\n >>> import torch\n >>> from diffusers import CogVideoXDPMScheduler, CogVideoXVideoToVideoPipeline\n >>> from diffusers.utils import export_to_video, load_video\n\n >>> # Models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b"\n >>> pipe = CogVideoXVideoToVideoPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16)\n >>> pipe.to("cuda")\n >>> pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config)\n\n >>> input_video = load_video(\n ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/hiker.mp4"\n ... )\n >>> prompt = (\n ... "An astronaut stands triumphantly at the peak of a towering mountain. Panorama of rugged peaks and "\n ... "valleys. Very futuristic vibe and animated aesthetic. Highlights of purple and golden colors in "\n ... "the scene. The sky is looks like an animated/cartoonish dream of galaxies, nebulae, stars, planets, "\n ... "moons, but the remainder of the scene is mostly realistic."\n ... )\n\n >>> video = pipe(\n ... video=input_video, prompt=prompt, strength=0.8, guidance_scale=6, num_inference_steps=50\n ... ).frames[0]\n >>> export_to_video(video, "output.mp4", fps=8)\n ```\n' def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): tw = tgt_width th = tgt_height (h, w) = src r = h / w if r > th / tw: resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return ((crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)) def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') class CogVideoXVideoToVideoPipeline(DiffusionPipeline): _optional_components = [] model_cpu_offload_seq = 'text_encoder->transformer->vae' _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, vae: AutoencoderKLCogVideoX, transformer: CogVideoXTransformer3DModel, scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler]): super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler) self.vae_scale_factor_spatial = 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, 'vae') and self.vae is not None else 8 self.vae_scale_factor_temporal = self.vae.config.temporal_compression_ratio if hasattr(self, 'vae') and self.vae is not None else 4 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) def _get_t5_prompt_embeds(self, prompt: Union[str, List[str]]=None, num_videos_per_prompt: int=1, max_sequence_length: int=226, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1:-1]) logger.warning(f'The following part of your input was truncated because `max_sequence_length` is set to {max_sequence_length} tokens: {removed_text}') prompt_embeds = self.text_encoder(text_input_ids.to(device))[0] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds def encode_prompt(self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]]=None, do_classifier_free_guidance: bool=True, num_videos_per_prompt: int=1, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, max_sequence_length: int=226, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_t5_prompt_embeds(prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') negative_prompt_embeds = self._get_t5_prompt_embeds(prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype) return (prompt_embeds, negative_prompt_embeds) def prepare_latents(self, video: Optional[torch.Tensor]=None, batch_size: int=1, num_channels_latents: int=16, height: int=60, width: int=90, dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, generator: Optional[torch.Generator]=None, latents: Optional[torch.Tensor]=None, timestep: Optional[torch.Tensor]=None): num_frames = (video.size(2) - 1) // self.vae_scale_factor_temporal + 1 if latents is None else latents.size(1) shape = (batch_size, num_frames, num_channels_latents, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: if isinstance(generator, list): if len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') init_latents = [retrieve_latents(self.vae.encode(video[i].unsqueeze(0)), generator[i]) for i in range(batch_size)] else: init_latents = [retrieve_latents(self.vae.encode(vid.unsqueeze(0)), generator) for vid in video] init_latents = torch.cat(init_latents, dim=0).to(dtype).permute(0, 2, 1, 3, 4) init_latents = self.vae.config.scaling_factor * init_latents noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self.scheduler.add_noise(init_latents, noise, timestep) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def decode_latents(self, latents: torch.Tensor) -> torch.Tensor: latents = latents.permute(0, 2, 1, 3, 4) latents = 1 / self.vae.config.scaling_factor * latents frames = self.vae.decode(latents).sample return frames def get_timesteps(self, num_inference_steps, timesteps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = timesteps[t_start * self.scheduler.order:] return (timesteps, num_inference_steps - t_start) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, strength, negative_prompt, callback_on_step_end_tensor_inputs, video=None, latents=None, prompt_embeds=None, negative_prompt_embeds=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if video is not None and latents is not None: raise ValueError('Only one of `video` or `latents` should be provided') def fuse_qkv_projections(self) -> None: self.fusing_transformer = True self.transformer.fuse_qkv_projections() def unfuse_qkv_projections(self) -> None: if not self.fusing_transformer: logger.warning('The Transformer was not initially fused for QKV projections. Doing nothing.') else: self.transformer.unfuse_qkv_projections() self.fusing_transformer = False def _prepare_rotary_positional_embeddings(self, height: int, width: int, num_frames: int, device: torch.device) -> Tuple[torch.Tensor, torch.Tensor]: grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) base_size_width = 720 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) base_size_height = 480 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) grid_crops_coords = get_resize_crop_region_for_grid((grid_height, grid_width), base_size_width, base_size_height) (freqs_cos, freqs_sin) = get_3d_rotary_pos_embed(embed_dim=self.transformer.config.attention_head_dim, crops_coords=grid_crops_coords, grid_size=(grid_height, grid_width), temporal_size=num_frames) freqs_cos = freqs_cos.to(device=device) freqs_sin = freqs_sin.to(device=device) return (freqs_cos, freqs_sin) @property def guidance_scale(self): return self._guidance_scale @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, video: List[Image.Image]=None, prompt: Optional[Union[str, List[str]]]=None, negative_prompt: Optional[Union[str, List[str]]]=None, height: int=480, width: int=720, num_inference_steps: int=50, timesteps: Optional[List[int]]=None, strength: float=0.8, guidance_scale: float=6, use_dynamic_cfg: bool=False, num_videos_per_prompt: int=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: str='pil', return_dict: bool=True, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=226) -> Union[CogVideoXPipelineOutput, Tuple]: if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.transformer.config.sample_size * self.vae_scale_factor_spatial width = width or self.transformer.config.sample_size * self.vae_scale_factor_spatial num_videos_per_prompt = 1 self.check_inputs(prompt, height, width, strength, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds, negative_prompt_embeds) self._guidance_scale = guidance_scale self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, negative_prompt, do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, timesteps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) self._num_timesteps = len(timesteps) if latents is None: video = self.video_processor.preprocess_video(video, height=height, width=width) video = video.to(device=device, dtype=prompt_embeds.dtype) latent_channels = self.transformer.config.in_channels latents = self.prepare_latents(video, batch_size * num_videos_per_prompt, latent_channels, height, width, prompt_embeds.dtype, device, generator, latents, latent_timestep) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) image_rotary_emb = self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) if self.transformer.config.use_rotary_positional_embeddings else None num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: old_pred_original_sample = None for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) timestep = t.expand(latent_model_input.shape[0]) noise_pred = self.transformer(hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, image_rotary_emb=image_rotary_emb, return_dict=False)[0] noise_pred = noise_pred.float() if use_dynamic_cfg: self._guidance_scale = 1 + guidance_scale * ((1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2) if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if not isinstance(self.scheduler, CogVideoXDPMScheduler): latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] else: (latents, old_pred_original_sample) = self.scheduler.step(noise_pred, old_pred_original_sample, t, timesteps[i - 1] if i > 0 else None, latents, **extra_step_kwargs, return_dict=False) latents = latents.to(prompt_embeds.dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if not output_type == 'latent': video = self.decode_latents(latents) video = self.video_processor.postprocess_video(video=video, output_type=output_type) else: video = latents self.maybe_free_model_hooks() if not return_dict: return (video,) return CogVideoXPipelineOutput(frames=video) # File: diffusers-main/src/diffusers/pipelines/cogvideo/pipeline_output.py from dataclasses import dataclass import torch from diffusers.utils import BaseOutput @dataclass class CogVideoXPipelineOutput(BaseOutput): frames: torch.Tensor # File: diffusers-main/src/diffusers/pipelines/consistency_models/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {'pipeline_consistency_models': ['ConsistencyModelPipeline']} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_consistency_models import ConsistencyModelPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: diffusers-main/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py from typing import Callable, List, Optional, Union import torch from ...models import UNet2DModel from ...schedulers import CMStochasticIterativeScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n\n >>> from diffusers import ConsistencyModelPipeline\n\n >>> device = "cuda"\n >>> # Load the cd_imagenet64_l2 checkpoint.\n >>> model_id_or_path = "openai/diffusers-cd_imagenet64_l2"\n >>> pipe = ConsistencyModelPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)\n >>> pipe.to(device)\n\n >>> # Onestep Sampling\n >>> image = pipe(num_inference_steps=1).images[0]\n >>> image.save("cd_imagenet64_l2_onestep_sample.png")\n\n >>> # Onestep sampling, class-conditional image generation\n >>> # ImageNet-64 class label 145 corresponds to king penguins\n >>> image = pipe(num_inference_steps=1, class_labels=145).images[0]\n >>> image.save("cd_imagenet64_l2_onestep_sample_penguin.png")\n\n >>> # Multistep sampling, class-conditional image generation\n >>> # Timesteps can be explicitly specified; the particular timesteps below are from the original GitHub repo:\n >>> # https://github.com/openai/consistency_models/blob/main/scripts/launch.sh#L77\n >>> image = pipe(num_inference_steps=None, timesteps=[22, 0], class_labels=145).images[0]\n >>> image.save("cd_imagenet64_l2_multistep_sample_penguin.png")\n ```\n' class ConsistencyModelPipeline(DiffusionPipeline): model_cpu_offload_seq = 'unet' def __init__(self, unet: UNet2DModel, scheduler: CMStochasticIterativeScheduler) -> None: super().__init__() self.register_modules(unet=unet, scheduler=scheduler) self.safety_checker = None def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) latents = latents * self.scheduler.init_noise_sigma return latents def postprocess_image(self, sample: torch.Tensor, output_type: str='pil'): if output_type not in ['pt', 'np', 'pil']: raise ValueError(f"output_type={output_type} is not supported. Make sure to choose one of ['pt', 'np', or 'pil']") sample = (sample / 2 + 0.5).clamp(0, 1) if output_type == 'pt': return sample sample = sample.cpu().permute(0, 2, 3, 1).numpy() if output_type == 'np': return sample sample = self.numpy_to_pil(sample) return sample def prepare_class_labels(self, batch_size, device, class_labels=None): if self.unet.config.num_class_embeds is not None: if isinstance(class_labels, list): class_labels = torch.tensor(class_labels, dtype=torch.int) elif isinstance(class_labels, int): assert batch_size == 1, 'Batch size must be 1 if classes is an int' class_labels = torch.tensor([class_labels], dtype=torch.int) elif class_labels is None: class_labels = torch.randint(0, self.unet.config.num_class_embeds, size=(batch_size,)) class_labels = class_labels.to(device) else: class_labels = None return class_labels def check_inputs(self, num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps): if num_inference_steps is None and timesteps is None: raise ValueError('Exactly one of `num_inference_steps` or `timesteps` must be supplied.') if num_inference_steps is not None and timesteps is not None: logger.warning(f'Both `num_inference_steps`: {num_inference_steps} and `timesteps`: {timesteps} are supplied; `timesteps` will be used over `num_inference_steps`.') if latents is not None: expected_shape = (batch_size, 3, img_size, img_size) if latents.shape != expected_shape: raise ValueError(f'The shape of latents is {latents.shape} but is expected to be {expected_shape}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, batch_size: int=1, class_labels: Optional[Union[torch.Tensor, List[int], int]]=None, num_inference_steps: int=1, timesteps: List[int]=None, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1): img_size = self.unet.config.sample_size device = self._execution_device self.check_inputs(num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps) sample = self.prepare_latents(batch_size=batch_size, num_channels=self.unet.config.in_channels, height=img_size, width=img_size, dtype=self.unet.dtype, device=device, generator=generator, latents=latents) class_labels = self.prepare_class_labels(batch_size, device, class_labels=class_labels) if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps) timesteps = self.scheduler.timesteps with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): scaled_sample = self.scheduler.scale_model_input(sample, t) model_output = self.unet(scaled_sample, t, class_labels=class_labels, return_dict=False)[0] sample = self.scheduler.step(model_output, t, sample, generator=generator)[0] progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, sample) image = self.postprocess_image(sample, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/controlnet/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['multicontrolnet'] = ['MultiControlNetModel'] _import_structure['pipeline_controlnet'] = ['StableDiffusionControlNetPipeline'] _import_structure['pipeline_controlnet_blip_diffusion'] = ['BlipDiffusionControlNetPipeline'] _import_structure['pipeline_controlnet_img2img'] = ['StableDiffusionControlNetImg2ImgPipeline'] _import_structure['pipeline_controlnet_inpaint'] = ['StableDiffusionControlNetInpaintPipeline'] _import_structure['pipeline_controlnet_inpaint_sd_xl'] = ['StableDiffusionXLControlNetInpaintPipeline'] _import_structure['pipeline_controlnet_sd_xl'] = ['StableDiffusionXLControlNetPipeline'] _import_structure['pipeline_controlnet_sd_xl_img2img'] = ['StableDiffusionXLControlNetImg2ImgPipeline'] try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_flax_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) else: _import_structure['pipeline_flax_controlnet'] = ['FlaxStableDiffusionControlNetPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_blip_diffusion import BlipDiffusionControlNetPipeline from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline from .pipeline_controlnet_inpaint_sd_xl import StableDiffusionXLControlNetInpaintPipeline from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline from .pipeline_controlnet_sd_xl_img2img import StableDiffusionXLControlNetImg2ImgPipeline try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_flax_and_transformers_objects import * else: from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/controlnet/multicontrolnet.py import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging logger = logging.get_logger(__name__) class MultiControlNetModel(ModelMixin): def __init__(self, controlnets: Union[List[ControlNetModel], Tuple[ControlNetModel]]): super().__init__() self.nets = nn.ModuleList(controlnets) def forward(self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, controlnet_cond: List[torch.tensor], conditioning_scale: List[float], class_labels: Optional[torch.Tensor]=None, timestep_cond: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guess_mode: bool=False, return_dict: bool=True) -> Union[ControlNetOutput, Tuple]: for (i, (image, scale, controlnet)) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)): (down_samples, mid_sample) = controlnet(sample=sample, timestep=timestep, encoder_hidden_states=encoder_hidden_states, controlnet_cond=image, conditioning_scale=scale, class_labels=class_labels, timestep_cond=timestep_cond, attention_mask=attention_mask, added_cond_kwargs=added_cond_kwargs, cross_attention_kwargs=cross_attention_kwargs, guess_mode=guess_mode, return_dict=return_dict) if i == 0: (down_block_res_samples, mid_block_res_sample) = (down_samples, mid_sample) else: down_block_res_samples = [samples_prev + samples_curr for (samples_prev, samples_curr) in zip(down_block_res_samples, down_samples)] mid_block_res_sample += mid_sample return (down_block_res_samples, mid_block_res_sample) def save_pretrained(self, save_directory: Union[str, os.PathLike], is_main_process: bool=True, save_function: Callable=None, safe_serialization: bool=True, variant: Optional[str]=None): for (idx, controlnet) in enumerate(self.nets): suffix = '' if idx == 0 else f'_{idx}' controlnet.save_pretrained(save_directory + suffix, is_main_process=is_main_process, save_function=save_function, safe_serialization=safe_serialization, variant=variant) @classmethod def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs): idx = 0 controlnets = [] model_path_to_load = pretrained_model_path while os.path.isdir(model_path_to_load): controlnet = ControlNetModel.from_pretrained(model_path_to_load, **kwargs) controlnets.append(controlnet) idx += 1 model_path_to_load = pretrained_model_path + f'_{idx}' logger.info(f'{len(controlnets)} controlnets loaded from {pretrained_model_path}.') if len(controlnets) == 0: raise ValueError(f"No ControlNets found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}.") return cls(controlnets) # File: diffusers-main/src/diffusers/pipelines/controlnet/pipeline_controlnet.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .multicontrolnet import MultiControlNetModel logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> # !pip install opencv-python transformers accelerate\n >>> from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler\n >>> from diffusers.utils import load_image\n >>> import numpy as np\n >>> import torch\n\n >>> import cv2\n >>> from PIL import Image\n\n >>> # download an image\n >>> image = load_image(\n ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"\n ... )\n >>> image = np.array(image)\n\n >>> # get canny image\n >>> image = cv2.Canny(image, 100, 200)\n >>> image = image[:, :, None]\n >>> image = np.concatenate([image, image, image], axis=2)\n >>> canny_image = Image.fromarray(image)\n\n >>> # load control net and stable diffusion v1-5\n >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)\n >>> pipe = StableDiffusionControlNetPipeline.from_pretrained(\n ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16\n ... )\n\n >>> # speed up diffusion process with faster scheduler and memory optimization\n >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)\n >>> # remove following line if xformers is not installed\n >>> pipe.enable_xformers_memory_efficient_attention()\n\n >>> pipe.enable_model_cpu_offload()\n\n >>> # generate image\n >>> generator = torch.manual_seed(0)\n >>> image = pipe(\n ... "futuristic-looking woman", num_inference_steps=20, generator=generator, image=canny_image\n ... ).images[0]\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionControlNetPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection=None, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, image, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.controlnet, torch._dynamo.eval_frame.OptimizedModule) if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): self.check_image(image, prompt, prompt_embeds) elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if not isinstance(image, list): raise TypeError('For multiple controlnets: `image` must be type `list`') elif any((isinstance(i, list) for i in image)): transposed_image = [list(t) for t in zip(*image)] if len(transposed_image) != len(self.controlnet.nets): raise ValueError(f'For multiple controlnets: if you pass`image` as a list of list, each sublist must have the same length as the number of controlnets, but the sublists in `image` got {len(transposed_image)} images and {len(self.controlnet.nets)} ControlNets.') for image_ in transposed_image: self.check_image(image_, prompt, prompt_embeds) elif len(image) != len(self.controlnet.nets): raise ValueError(f'For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets.') else: for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if isinstance(controlnet_conditioning_scale, list): if any((isinstance(i, list) for i in controlnet_conditioning_scale)): raise ValueError('A single batch of varying conditioning scale settings (e.g. [[1.0, 0.5], [0.2, 0.8]]) is not supported at the moment. The conditioning scale must be fixed across the batch.') elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(self.controlnet.nets): raise ValueError('For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have the same length as the number of controlnets') else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError(f'`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list.') if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError(f'`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}.') for (start, end) in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError(f'control guidance start: {start} cannot be larger or equal to control guidance end: {end}.') if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if not image_is_pil and (not image_is_tensor) and (not image_is_np) and (not image_is_pil_list) and (not image_is_tensor_list) and (not image_is_np_list): raise TypeError(f'image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}') if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError(f'If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}') def prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): image = torch.cat([image] * 2) return image def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, guess_mode: bool=False, control_guidance_start: Union[float, List[float]]=0.0, control_guidance_end: Union[float, List[float]]=1.0, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) self.check_inputs(prompt, image, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions guess_mode = guess_mode or global_pool_conditions text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) if isinstance(controlnet, ControlNetModel): image = self.prepare_image(image=image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) (height, width) = image.shape[-2:] elif isinstance(controlnet, MultiControlNetModel): images = [] if isinstance(image[0], list): image = [list(t) for t in zip(*image)] for image_ in image: image_ = self.prepare_image(image=image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) images.append(image_) image = images (height, width) = image[0].shape[-2:] else: assert False (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) self._num_timesteps = len(timesteps) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None controlnet_keep = [] for i in range(len(timesteps)): keeps = [1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for (s, e) in zip(control_guidance_start, control_guidance_end)] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order is_unet_compiled = is_compiled_module(self.unet) is_controlnet_compiled = is_compiled_module(self.controlnet) is_torch_higher_equal_2_1 = is_torch_version('>=', '2.1') with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: torch._inductor.cudagraph_mark_step_begin() latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if guess_mode and self.do_classifier_free_guidance: control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for (c, s) in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] (down_block_res_samples, mid_block_res_sample) = self.controlnet(control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=image, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False) if guess_mode and self.do_classifier_free_guidance: down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.unet.to('cpu') self.controlnet.to('cpu') torch.cuda.empty_cache() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py from typing import List, Optional, Union import PIL.Image import torch from transformers import CLIPTokenizer from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel from ...schedulers import PNDMScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..blip_diffusion.blip_image_processing import BlipImageProcessor from ..blip_diffusion.modeling_blip2 import Blip2QFormerModel from ..blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers.pipelines import BlipDiffusionControlNetPipeline\n >>> from diffusers.utils import load_image\n >>> from controlnet_aux import CannyDetector\n >>> import torch\n\n >>> blip_diffusion_pipe = BlipDiffusionControlNetPipeline.from_pretrained(\n ... "Salesforce/blipdiffusion-controlnet", torch_dtype=torch.float16\n ... ).to("cuda")\n\n >>> style_subject = "flower"\n >>> tgt_subject = "teapot"\n >>> text_prompt = "on a marble table"\n\n >>> cldm_cond_image = load_image(\n ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/kettle.jpg"\n ... ).resize((512, 512))\n >>> canny = CannyDetector()\n >>> cldm_cond_image = canny(cldm_cond_image, 30, 70, output_type="pil")\n >>> style_image = load_image(\n ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/flower.jpg"\n ... )\n >>> guidance_scale = 7.5\n >>> num_inference_steps = 50\n >>> negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate"\n\n\n >>> output = blip_diffusion_pipe(\n ... text_prompt,\n ... style_image,\n ... cldm_cond_image,\n ... style_subject,\n ... tgt_subject,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=num_inference_steps,\n ... neg_prompt=negative_prompt,\n ... height=512,\n ... width=512,\n ... ).images\n >>> output[0].save("image.png")\n ```\n' class BlipDiffusionControlNetPipeline(DiffusionPipeline): model_cpu_offload_seq = 'qformer->text_encoder->unet->vae' def __init__(self, tokenizer: CLIPTokenizer, text_encoder: ContextCLIPTextModel, vae: AutoencoderKL, unet: UNet2DConditionModel, scheduler: PNDMScheduler, qformer: Blip2QFormerModel, controlnet: ControlNetModel, image_processor: BlipImageProcessor, ctx_begin_pos: int=2, mean: List[float]=None, std: List[float]=None): super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet, scheduler=scheduler, qformer=qformer, controlnet=controlnet, image_processor=image_processor) self.register_to_config(ctx_begin_pos=ctx_begin_pos, mean=mean, std=std) def get_query_embeddings(self, input_image, src_subject): return self.qformer(image_input=input_image, text_input=src_subject, return_dict=False) def _build_prompt(self, prompts, tgt_subjects, prompt_strength=1.0, prompt_reps=20): rv = [] for (prompt, tgt_subject) in zip(prompts, tgt_subjects): prompt = f'a {tgt_subject} {prompt.strip()}' rv.append(', '.join([prompt] * int(prompt_strength * prompt_reps))) return rv def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) latents = latents * self.scheduler.init_noise_sigma return latents def encode_prompt(self, query_embeds, prompt, device=None): device = device or self._execution_device max_len = self.text_encoder.text_model.config.max_position_embeddings max_len -= self.qformer.config.num_query_tokens tokenized_prompt = self.tokenizer(prompt, padding='max_length', truncation=True, max_length=max_len, return_tensors='pt').to(device) batch_size = query_embeds.shape[0] ctx_begin_pos = [self.config.ctx_begin_pos] * batch_size text_embeddings = self.text_encoder(input_ids=tokenized_prompt.input_ids, ctx_embeddings=query_embeds, ctx_begin_pos=ctx_begin_pos)[0] return text_embeddings def prepare_control_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False): image = self.image_processor.preprocess(image, size={'width': width, 'height': height}, do_rescale=True, do_center_crop=False, do_normalize=False, return_tensors='pt')['pixel_values'].to(device) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance: image = torch.cat([image] * 2) return image @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: List[str], reference_image: PIL.Image.Image, condtioning_image: PIL.Image.Image, source_subject_category: List[str], target_subject_category: List[str], latents: Optional[torch.Tensor]=None, guidance_scale: float=7.5, height: int=512, width: int=512, num_inference_steps: int=50, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, neg_prompt: Optional[str]='', prompt_strength: float=1.0, prompt_reps: int=20, output_type: Optional[str]='pil', return_dict: bool=True): device = self._execution_device reference_image = self.image_processor.preprocess(reference_image, image_mean=self.config.mean, image_std=self.config.std, return_tensors='pt')['pixel_values'] reference_image = reference_image.to(device) if isinstance(prompt, str): prompt = [prompt] if isinstance(source_subject_category, str): source_subject_category = [source_subject_category] if isinstance(target_subject_category, str): target_subject_category = [target_subject_category] batch_size = len(prompt) prompt = self._build_prompt(prompts=prompt, tgt_subjects=target_subject_category, prompt_strength=prompt_strength, prompt_reps=prompt_reps) query_embeds = self.get_query_embeddings(reference_image, source_subject_category) text_embeddings = self.encode_prompt(query_embeds, prompt, device) do_classifier_free_guidance = guidance_scale > 1.0 if do_classifier_free_guidance: max_length = self.text_encoder.text_model.config.max_position_embeddings uncond_input = self.tokenizer([neg_prompt] * batch_size, padding='max_length', max_length=max_length, return_tensors='pt') uncond_embeddings = self.text_encoder(input_ids=uncond_input.input_ids.to(device), ctx_embeddings=None)[0] text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) scale_down_factor = 2 ** (len(self.unet.config.block_out_channels) - 1) latents = self.prepare_latents(batch_size=batch_size, num_channels=self.unet.config.in_channels, height=height // scale_down_factor, width=width // scale_down_factor, generator=generator, latents=latents, dtype=self.unet.dtype, device=device) extra_set_kwargs = {} self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) cond_image = self.prepare_control_image(image=condtioning_image, width=width, height=height, batch_size=batch_size, num_images_per_prompt=1, device=device, dtype=self.controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance) for (i, t) in enumerate(self.progress_bar(self.scheduler.timesteps)): do_classifier_free_guidance = guidance_scale > 1.0 latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents (down_block_res_samples, mid_block_res_sample) = self.controlnet(latent_model_input, t, encoder_hidden_states=text_embeddings, controlnet_cond=cond_image, return_dict=False) noise_pred = self.unet(latent_model_input, timestep=t, encoder_hidden_states=text_embeddings, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample)['sample'] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents)['prev_sample'] image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .multicontrolnet import MultiControlNetModel logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> # !pip install opencv-python transformers accelerate\n >>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler\n >>> from diffusers.utils import load_image\n >>> import numpy as np\n >>> import torch\n\n >>> import cv2\n >>> from PIL import Image\n\n >>> # download an image\n >>> image = load_image(\n ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"\n ... )\n >>> np_image = np.array(image)\n\n >>> # get canny image\n >>> np_image = cv2.Canny(np_image, 100, 200)\n >>> np_image = np_image[:, :, None]\n >>> np_image = np.concatenate([np_image, np_image, np_image], axis=2)\n >>> canny_image = Image.fromarray(np_image)\n\n >>> # load control net and stable diffusion v1-5\n >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)\n >>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(\n ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16\n ... )\n\n >>> # speed up diffusion process with faster scheduler and memory optimization\n >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)\n >>> pipe.enable_model_cpu_offload()\n\n >>> # generate image\n >>> generator = torch.manual_seed(0)\n >>> image = pipe(\n ... "futuristic-looking woman",\n ... num_inference_steps=20,\n ... generator=generator,\n ... image=image,\n ... control_image=canny_image,\n ... ).images[0]\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def prepare_image(image): if isinstance(image, torch.Tensor): if image.ndim == 3: image = image.unsqueeze(0) image = image.to(dtype=torch.float32) else: if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert('RGB'))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 return image class StableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection=None, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, image, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning(f'You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)} prompts. The conditionings will be fixed across the prompts.') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.controlnet, torch._dynamo.eval_frame.OptimizedModule) if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): self.check_image(image, prompt, prompt_embeds) elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if not isinstance(image, list): raise TypeError('For multiple controlnets: `image` must be type `list`') elif any((isinstance(i, list) for i in image)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif len(image) != len(self.controlnet.nets): raise ValueError(f'For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets.') for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if isinstance(controlnet_conditioning_scale, list): if any((isinstance(i, list) for i in controlnet_conditioning_scale)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(self.controlnet.nets): raise ValueError('For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have the same length as the number of controlnets') else: assert False if len(control_guidance_start) != len(control_guidance_end): raise ValueError(f'`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list.') if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError(f'`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}.') for (start, end) in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError(f'control guidance start: {start} cannot be larger or equal to control guidance end: {end}.') if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if not image_is_pil and (not image_is_tensor) and (not image_is_np) and (not image_is_pil_list) and (not image_is_tensor_list) and (not image_is_np_list): raise TypeError(f'image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}') if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError(f'If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}') def prepare_control_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): image = torch.cat([image] * 2) return image def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} ') init_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: deprecation_message = f'You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, control_image: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, strength: float=0.8, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=0.8, guess_mode: bool=False, control_guidance_start: Union[float, List[float]]=0.0, control_guidance_end: Union[float, List[float]]=1.0, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) self.check_inputs(prompt, control_image, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions guess_mode = guess_mode or global_pool_conditions text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) if isinstance(controlnet, ControlNetModel): control_image = self.prepare_control_image(image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) elif isinstance(controlnet, MultiControlNetModel): control_images = [] for control_image_ in control_image: control_image_ = self.prepare_control_image(image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) control_images.append(control_image_) control_image = control_images else: assert False self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) self._num_timesteps = len(timesteps) if latents is None: latents = self.prepare_latents(image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None controlnet_keep = [] for i in range(len(timesteps)): keeps = [1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for (s, e) in zip(control_guidance_start, control_guidance_end)] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if guess_mode and self.do_classifier_free_guidance: control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for (c, s) in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] (down_block_res_samples, mid_block_res_sample) = self.controlnet(control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=control_image, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False) if guess_mode and self.do_classifier_free_guidance: down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.unet.to('cpu') self.controlnet.to('cpu') torch.cuda.empty_cache() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .multicontrolnet import MultiControlNetModel logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> # !pip install transformers accelerate\n >>> from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, DDIMScheduler\n >>> from diffusers.utils import load_image\n >>> import numpy as np\n >>> import torch\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png"\n ... )\n >>> init_image = init_image.resize((512, 512))\n\n >>> generator = torch.Generator(device="cpu").manual_seed(1)\n\n >>> mask_image = load_image(\n ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png"\n ... )\n >>> mask_image = mask_image.resize((512, 512))\n\n\n >>> def make_canny_condition(image):\n ... image = np.array(image)\n ... image = cv2.Canny(image, 100, 200)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... image = Image.fromarray(image)\n ... return image\n\n\n >>> control_image = make_canny_condition(init_image)\n\n >>> controlnet = ControlNetModel.from_pretrained(\n ... "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16\n ... )\n >>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(\n ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16\n ... )\n\n >>> pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)\n >>> pipe.enable_model_cpu_offload()\n\n >>> # generate image\n >>> image = pipe(\n ... "a handsome man with ray-ban sunglasses",\n ... num_inference_steps=20,\n ... generator=generator,\n ... eta=1.0,\n ... image=init_image,\n ... mask_image=mask_image,\n ... control_image=control_image,\n ... ).images[0]\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection=None, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True) self.control_image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def check_inputs(self, prompt, image, mask_image, height, width, callback_steps, output_type, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None, padding_mask_crop=None): if height is not None and height % 8 != 0 or (width is not None and width % 8 != 0): raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if padding_mask_crop is not None: if not isinstance(image, PIL.Image.Image): raise ValueError(f'The image should be a PIL image when inpainting mask crop, but is of type {type(image)}.') if not isinstance(mask_image, PIL.Image.Image): raise ValueError(f'The mask image should be a PIL image when inpainting mask crop, but is of type {type(mask_image)}.') if output_type != 'pil': raise ValueError(f'The output type should be PIL when inpainting mask crop, but is {output_type}.') if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning(f'You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)} prompts. The conditionings will be fixed across the prompts.') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.controlnet, torch._dynamo.eval_frame.OptimizedModule) if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): self.check_image(image, prompt, prompt_embeds) elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if not isinstance(image, list): raise TypeError('For multiple controlnets: `image` must be type `list`') elif any((isinstance(i, list) for i in image)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif len(image) != len(self.controlnet.nets): raise ValueError(f'For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets.') for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if isinstance(controlnet_conditioning_scale, list): if any((isinstance(i, list) for i in controlnet_conditioning_scale)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(self.controlnet.nets): raise ValueError('For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have the same length as the number of controlnets') else: assert False if len(control_guidance_start) != len(control_guidance_end): raise ValueError(f'`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list.') if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError(f'`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}.') for (start, end) in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError(f'control guidance start: {start} cannot be larger or equal to control guidance end: {end}.') if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if not image_is_pil and (not image_is_tensor) and (not image_is_np) and (not image_is_pil_list) and (not image_is_tensor_list) and (not image_is_np_list): raise TypeError(f'image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}') if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError(f'If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}') def prepare_control_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, crops_coords, resize_mode, do_classifier_free_guidance=False, guess_mode=False): image = self.control_image_processor.preprocess(image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): image = torch.cat([image] * 2) return image def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, image=None, timestep=None, is_strength_max=True, return_noise=False, return_image_latents=False): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if (image is None or timestep is None) and (not is_strength_max): raise ValueError('Since strength < 1. initial latents are to be initialised as a combination of Image + Noise.However, either the image or the noise timestep has not been provided.') if return_image_latents or (latents is None and (not is_strength_max)): image = image.to(device=device, dtype=dtype) if image.shape[1] == 4: image_latents = image else: image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents else: noise = latents.to(device) latents = noise * self.scheduler.init_noise_sigma outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def prepare_mask_latents(self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance): mask = torch.nn.functional.interpolate(mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)) mask = mask.to(device=device, dtype=dtype) masked_image = masked_image.to(device=device, dtype=dtype) if masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = self._encode_vae_image(masked_image, generator=generator) if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError(f"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number of masks that you pass is divisible by the total requested batch size.") mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError(f"The passed images and the required batch size don't match. Images are supposed to be duplicated to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed. Make sure the number of images that you pass is divisible by the total requested batch size.") masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask masked_image_latents = torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return (mask, masked_image_latents) def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(image.shape[0])] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) image_latents = self.vae.config.scaling_factor * image_latents return image_latents @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, mask_image: PipelineImageInput=None, control_image: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, padding_mask_crop: Optional[int]=None, strength: float=1.0, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=0.5, guess_mode: bool=False, control_guidance_start: Union[float, List[float]]=0.0, control_guidance_end: Union[float, List[float]]=1.0, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) self.check_inputs(prompt, control_image, mask_image, height, width, callback_steps, output_type, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs, padding_mask_crop) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if padding_mask_crop is not None: (height, width) = self.image_processor.get_default_height_width(image, height, width) crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) resize_mode = 'fill' else: crops_coords = None resize_mode = 'default' device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions guess_mode = guess_mode or global_pool_conditions text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) if isinstance(controlnet, ControlNetModel): control_image = self.prepare_control_image(image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, crops_coords=crops_coords, resize_mode=resize_mode, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) elif isinstance(controlnet, MultiControlNetModel): control_images = [] for control_image_ in control_image: control_image_ = self.prepare_control_image(image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, crops_coords=crops_coords, resize_mode=resize_mode, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) control_images.append(control_image_) control_image = control_images else: assert False original_image = image init_image = self.image_processor.preprocess(image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode) init_image = init_image.to(dtype=torch.float32) mask = self.mask_processor.preprocess(mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords) masked_image = init_image * (mask < 0.5) (_, _, height, width) = init_image.shape self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps=num_inference_steps, strength=strength, device=device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) is_strength_max = strength == 1.0 self._num_timesteps = len(timesteps) num_channels_latents = self.vae.config.latent_channels num_channels_unet = self.unet.config.in_channels return_image_latents = num_channels_unet == 4 latents_outputs = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, image=init_image, timestep=latent_timestep, is_strength_max=is_strength_max, return_noise=True, return_image_latents=return_image_latents) if return_image_latents: (latents, noise, image_latents) = latents_outputs else: (latents, noise) = latents_outputs (mask, masked_image_latents) = self.prepare_mask_latents(mask, masked_image, batch_size * num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator, self.do_classifier_free_guidance) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None controlnet_keep = [] for i in range(len(timesteps)): keeps = [1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for (s, e) in zip(control_guidance_start, control_guidance_end)] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if guess_mode and self.do_classifier_free_guidance: control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for (c, s) in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] (down_block_res_samples, mid_block_res_sample) = self.controlnet(control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=control_image, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False) if guess_mode and self.do_classifier_free_guidance: down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) if num_channels_unet == 9: latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if num_channels_unet == 4: init_latents_proper = image_latents if self.do_classifier_free_guidance: (init_mask, _) = mask.chunk(2) else: init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise(init_latents_proper, noise, torch.tensor([noise_timestep])) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.unet.to('cpu') self.controlnet.to('cpu') torch.cuda.empty_cache() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if padding_mask_crop is not None: image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, is_invisible_watermark_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from .multicontrolnet import MultiControlNetModel if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker logger = logging.get_logger(__name__) def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> # !pip install transformers accelerate\n >>> from diffusers import StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, DDIMScheduler\n >>> from diffusers.utils import load_image\n >>> from PIL import Image\n >>> import numpy as np\n >>> import torch\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png"\n ... )\n >>> init_image = init_image.resize((1024, 1024))\n\n >>> generator = torch.Generator(device="cpu").manual_seed(1)\n\n >>> mask_image = load_image(\n ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png"\n ... )\n >>> mask_image = mask_image.resize((1024, 1024))\n\n\n >>> def make_canny_condition(image):\n ... image = np.array(image)\n ... image = cv2.Canny(image, 100, 200)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... image = Image.fromarray(image)\n ... return image\n\n\n >>> control_image = make_canny_condition(init_image)\n\n >>> controlnet = ControlNetModel.from_pretrained(\n ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16\n ... )\n >>> pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16\n ... )\n\n >>> pipe.enable_model_cpu_offload()\n\n >>> # generate image\n >>> image = pipe(\n ... "a handsome man with ray-ban sunglasses",\n ... num_inference_steps=20,\n ... generator=generator,\n ... eta=1.0,\n ... image=init_image,\n ... mask_image=mask_image,\n ... control_image=control_image,\n ... ).images[0]\n ```\n' def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class StableDiffusionXLControlNetInpaintPipeline(DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin, IPAdapterMixin, TextualInversionLoaderMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'image_encoder', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'add_neg_time_ids', 'mask', 'masked_image_latents'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, requires_aesthetics_score: bool=False, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None, feature_extractor: Optional[CLIPImageProcessor]=None, image_encoder: Optional[CLIPVisionModelWithProjection]=None): super().__init__() if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, controlnet=controlnet, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True) self.control_image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if not image_is_pil and (not image_is_tensor) and (not image_is_np) and (not image_is_pil_list) and (not image_is_tensor_list) and (not image_is_np_list): raise TypeError(f'image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}') if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError(f'If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}') def check_inputs(self, prompt, prompt_2, image, mask_image, strength, num_inference_steps, callback_steps, output_type, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None, padding_mask_crop=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if num_inference_steps is None: raise ValueError('`num_inference_steps` cannot be None.') elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: raise ValueError(f'`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type {type(num_inference_steps)}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if padding_mask_crop is not None: if not isinstance(image, PIL.Image.Image): raise ValueError(f'The image should be a PIL image when inpainting mask crop, but is of type {type(image)}.') if not isinstance(mask_image, PIL.Image.Image): raise ValueError(f'The mask image should be a PIL image when inpainting mask crop, but is of type {type(mask_image)}.') if output_type != 'pil': raise ValueError(f'The output type should be PIL when inpainting mask crop, but is {output_type}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning(f'You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)} prompts. The conditionings will be fixed across the prompts.') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.controlnet, torch._dynamo.eval_frame.OptimizedModule) if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): self.check_image(image, prompt, prompt_embeds) elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if not isinstance(image, list): raise TypeError('For multiple controlnets: `image` must be type `list`') elif any((isinstance(i, list) for i in image)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif len(image) != len(self.controlnet.nets): raise ValueError(f'For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets.') for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if isinstance(controlnet_conditioning_scale, list): if any((isinstance(i, list) for i in controlnet_conditioning_scale)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(self.controlnet.nets): raise ValueError('For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have the same length as the number of controlnets') else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError(f'`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list.') if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError(f'`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}.') for (start, end) in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError(f'control guidance start: {start} cannot be larger or equal to control guidance end: {end}.') if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_control_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, crops_coords, resize_mode, do_classifier_free_guidance=False, guess_mode=False): image = self.control_image_processor.preprocess(image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): image = torch.cat([image] * 2) return image def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, image=None, timestep=None, is_strength_max=True, add_noise=True, return_noise=False, return_image_latents=False): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if (image is None or timestep is None) and (not is_strength_max): raise ValueError('Since strength < 1. initial latents are to be initialised as a combination of Image + Noise.However, either the image or the noise timestep has not been provided.') if return_image_latents or (latents is None and (not is_strength_max)): image = image.to(device=device, dtype=dtype) if image.shape[1] == 4: image_latents = image else: image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None and add_noise: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents elif add_noise: noise = latents.to(device) latents = noise * self.scheduler.init_noise_sigma else: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = image_latents.to(device) outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): dtype = image.dtype if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list): image_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(image.shape[0])] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) image_latents = image_latents.to(dtype) image_latents = self.vae.config.scaling_factor * image_latents return image_latents def prepare_mask_latents(self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance): mask = torch.nn.functional.interpolate(mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)) mask = mask.to(device=device, dtype=dtype) if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError(f"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number of masks that you pass is divisible by the total requested batch size.") mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask masked_image_latents = None if masked_image is not None: masked_image = masked_image.to(device=device, dtype=dtype) masked_image_latents = self._encode_vae_image(masked_image, generator=generator) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError(f"The passed images and the required batch size don't match. Images are supposed to be duplicated to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed. Make sure the number of images that you pass is divisible by the total requested batch size.") masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) masked_image_latents = torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return (mask, masked_image_latents) def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) else: discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - denoising_start * self.scheduler.config.num_train_timesteps)) num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: num_inference_steps = num_inference_steps + 1 t_start = len(self.scheduler.timesteps) - num_inference_steps timesteps = self.scheduler.timesteps[t_start:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start) return (timesteps, num_inference_steps) def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, dtype, text_encoder_projection_dim=None): if self.config.requires_aesthetics_score: add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) add_neg_time_ids = list(original_size + crops_coords_top_left + (negative_aesthetic_score,)) else: add_time_ids = list(original_size + crops_coords_top_left + target_size) add_neg_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim > passed_add_embed_dim and expected_add_embed_dim - passed_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model.') elif expected_add_embed_dim < passed_add_embed_dim and passed_add_embed_dim - expected_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model.') elif expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) return (add_time_ids, add_neg_time_ids) def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, mask_image: PipelineImageInput=None, control_image: Union[PipelineImageInput, List[PipelineImageInput]]=None, height: Optional[int]=None, width: Optional[int]=None, padding_mask_crop: Optional[int]=None, strength: float=0.9999, num_inference_steps: int=50, denoising_start: Optional[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, guess_mode: bool=False, control_guidance_start: Union[float, List[float]]=0.0, control_guidance_end: Union[float, List[float]]=1.0, guidance_rescale: float=0.0, original_size: Tuple[int, int]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Tuple[int, int]=None, aesthetic_score: float=6.0, negative_aesthetic_score: float=2.5, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) self.check_inputs(prompt, prompt_2, control_image, mask_image, strength, num_inference_steps, callback_steps, output_type, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs, padding_mask_crop) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device, denoising_start=denoising_start if denoising_value_valid(denoising_start) else None) if num_inference_steps < 1: raise ValueError(f'After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipelinesteps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.') latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) is_strength_max = strength == 1.0 self._num_timesteps = len(timesteps) if padding_mask_crop is not None: (height, width) = self.image_processor.get_default_height_width(image, height, width) crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) resize_mode = 'fill' else: crops_coords = None resize_mode = 'default' original_image = image init_image = self.image_processor.preprocess(image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode) init_image = init_image.to(dtype=torch.float32) if isinstance(controlnet, ControlNetModel): control_image = self.prepare_control_image(image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, crops_coords=crops_coords, resize_mode=resize_mode, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) elif isinstance(controlnet, MultiControlNetModel): control_images = [] for control_image_ in control_image: control_image_ = self.prepare_control_image(image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, crops_coords=crops_coords, resize_mode=resize_mode, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) control_images.append(control_image_) control_image = control_images else: raise ValueError(f'{controlnet.__class__} is not supported.') mask = self.mask_processor.preprocess(mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords) masked_image = init_image * (mask < 0.5) (_, _, height, width) = init_image.shape num_channels_latents = self.vae.config.latent_channels num_channels_unet = self.unet.config.in_channels return_image_latents = num_channels_unet == 4 add_noise = True if denoising_start is None else False latents_outputs = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, image=init_image, timestep=latent_timestep, is_strength_max=is_strength_max, add_noise=add_noise, return_noise=True, return_image_latents=return_image_latents) if return_image_latents: (latents, noise, image_latents) = latents_outputs else: (latents, noise) = latents_outputs (mask, masked_image_latents) = self.prepare_mask_latents(mask, masked_image, batch_size * num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator, self.do_classifier_free_guidance) if num_channels_unet == 9: num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image} = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of `pipeline.unet` or your `mask_image` or `image` input.') elif num_channels_unet != 4: raise ValueError(f'The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}.') extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) controlnet_keep = [] for i in range(len(timesteps)): keeps = [1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for (s, e) in zip(control_guidance_start, control_guidance_end)] controlnet_keep.append(keeps if isinstance(controlnet, MultiControlNetModel) else keeps[0]) (height, width) = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim (add_time_ids, add_neg_time_ids) = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if denoising_end is not None and denoising_start is not None and denoising_value_valid(denoising_end) and denoising_value_valid(denoising_start) and (denoising_start >= denoising_end): raise ValueError(f'`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: ' + f' {denoising_end} when using type float.') elif denoising_end is not None and denoising_value_valid(denoising_end): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if guess_mode and self.do_classifier_free_guidance: control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] controlnet_added_cond_kwargs = {'text_embeds': add_text_embeds.chunk(2)[1], 'time_ids': add_time_ids.chunk(2)[1]} else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds controlnet_added_cond_kwargs = added_cond_kwargs if isinstance(controlnet_keep[i], list): cond_scale = [c * s for (c, s) in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] (down_block_res_samples, mid_block_res_sample) = self.controlnet(control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=control_image, conditioning_scale=cond_scale, guess_mode=guess_mode, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False) if guess_mode and self.do_classifier_free_guidance: down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = image_embeds if num_channels_unet == 9: latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if num_channels_unet == 4: init_latents_proper = image_latents if self.do_classifier_free_guidance: (init_mask, _) = mask.chunk(2) else: init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise(init_latents_proper, noise, torch.tensor([noise_timestep])) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.unet.to('cpu') self.controlnet.to('cpu') torch.cuda.empty_cache() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: return StableDiffusionXLPipelineOutput(images=latents) if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) if padding_mask_crop is not None: image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers.utils.import_utils import is_invisible_watermark_available from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker from .multicontrolnet import MultiControlNetModel logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> # !pip install opencv-python transformers accelerate\n >>> from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL\n >>> from diffusers.utils import load_image\n >>> import numpy as np\n >>> import torch\n\n >>> import cv2\n >>> from PIL import Image\n\n >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"\n >>> negative_prompt = "low quality, bad quality, sketches"\n\n >>> # download an image\n >>> image = load_image(\n ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"\n ... )\n\n >>> # initialize the models and pipeline\n >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization\n >>> controlnet = ControlNetModel.from_pretrained(\n ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16\n ... )\n >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)\n >>> pipe = StableDiffusionXLControlNetPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16\n ... )\n >>> pipe.enable_model_cpu_offload()\n\n >>> # get canny image\n >>> image = np.array(image)\n >>> image = cv2.Canny(image, 100, 200)\n >>> image = image[:, :, None]\n >>> image = np.concatenate([image, image, image], axis=2)\n >>> canny_image = Image.fromarray(image)\n\n >>> # generate image\n >>> image = pipe(\n ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image\n ... ).images[0]\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionXLControlNetPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->image_encoder->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'feature_extractor', 'image_encoder'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'negative_add_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None, feature_extractor: CLIPImageProcessor=None, image_encoder: CLIPVisionModelWithProjection=None): super().__init__() if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, controlnet=controlnet, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, prompt_2, image, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, negative_pooled_prompt_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning(f'You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)} prompts. The conditionings will be fixed across the prompts.') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.controlnet, torch._dynamo.eval_frame.OptimizedModule) if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): self.check_image(image, prompt, prompt_embeds) elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if not isinstance(image, list): raise TypeError('For multiple controlnets: `image` must be type `list`') elif any((isinstance(i, list) for i in image)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif len(image) != len(self.controlnet.nets): raise ValueError(f'For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets.') for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if isinstance(controlnet_conditioning_scale, list): if any((isinstance(i, list) for i in controlnet_conditioning_scale)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(self.controlnet.nets): raise ValueError('For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have the same length as the number of controlnets') else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError(f'`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list.') if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError(f'`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}.') for (start, end) in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError(f'control guidance start: {start} cannot be larger or equal to control guidance end: {end}.') if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if not image_is_pil and (not image_is_tensor) and (not image_is_np) and (not image_is_pil_list) and (not image_is_tensor_list) and (not image_is_np_list): raise TypeError(f'image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}') if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError(f'If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}') def prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): image = torch.cat([image] * 2) return image def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, guess_mode: bool=False, control_guidance_start: Union[float, List[float]]=0.0, control_guidance_end: Union[float, List[float]]=1.0, original_size: Tuple[int, int]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Tuple[int, int]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) self.check_inputs(prompt, prompt_2, image, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, negative_pooled_prompt_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions guess_mode = guess_mode or global_pool_conditions text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt, prompt_2, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) if isinstance(controlnet, ControlNetModel): image = self.prepare_image(image=image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) (height, width) = image.shape[-2:] elif isinstance(controlnet, MultiControlNetModel): images = [] for image_ in image: image_ = self.prepare_image(image=image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) images.append(image_) image = images (height, width) = image[0].shape[-2:] else: assert False (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) self._num_timesteps = len(timesteps) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) controlnet_keep = [] for i in range(len(timesteps)): keeps = [1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for (s, e) in zip(control_guidance_start, control_guidance_end)] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) if isinstance(image, list): original_size = original_size or image[0].shape[-2:] else: original_size = original_size or image.shape[-2:] target_size = target_size or (height, width) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids(negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order if self.denoising_end is not None and isinstance(self.denoising_end, float) and (self.denoising_end > 0) and (self.denoising_end < 1): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - self.denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] is_unet_compiled = is_compiled_module(self.unet) is_controlnet_compiled = is_compiled_module(self.controlnet) is_torch_higher_equal_2_1 = is_torch_version('>=', '2.1') with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: torch._inductor.cudagraph_mark_step_begin() latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if guess_mode and self.do_classifier_free_guidance: control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] controlnet_added_cond_kwargs = {'text_embeds': add_text_embeds.chunk(2)[1], 'time_ids': add_time_ids.chunk(2)[1]} else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds controlnet_added_cond_kwargs = added_cond_kwargs if isinstance(controlnet_keep[i], list): cond_scale = [c * s for (c, s) in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] (down_block_res_samples, mid_block_res_sample) = self.controlnet(control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=image, conditioning_scale=cond_scale, guess_mode=guess_mode, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False) if guess_mode and self.do_classifier_free_guidance: down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) negative_add_time_ids = callback_outputs.pop('negative_add_time_ids', negative_add_time_ids) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) has_latents_mean = hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == 'latent': if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers.utils.import_utils import is_invisible_watermark_available from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker from .multicontrolnet import MultiControlNetModel logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> # pip install accelerate transformers safetensors diffusers\n\n >>> import torch\n >>> import numpy as np\n >>> from PIL import Image\n\n >>> from transformers import DPTImageProcessor, DPTForDepthEstimation\n >>> from diffusers import ControlNetModel, StableDiffusionXLControlNetImg2ImgPipeline, AutoencoderKL\n >>> from diffusers.utils import load_image\n\n\n >>> depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda")\n >>> feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas")\n >>> controlnet = ControlNetModel.from_pretrained(\n ... "diffusers/controlnet-depth-sdxl-1.0-small",\n ... variant="fp16",\n ... use_safetensors=True,\n ... torch_dtype=torch.float16,\n ... )\n >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)\n >>> pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-base-1.0",\n ... controlnet=controlnet,\n ... vae=vae,\n ... variant="fp16",\n ... use_safetensors=True,\n ... torch_dtype=torch.float16,\n ... )\n >>> pipe.enable_model_cpu_offload()\n\n\n >>> def get_depth_map(image):\n ... image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda")\n ... with torch.no_grad(), torch.autocast("cuda"):\n ... depth_map = depth_estimator(image).predicted_depth\n\n ... depth_map = torch.nn.functional.interpolate(\n ... depth_map.unsqueeze(1),\n ... size=(1024, 1024),\n ... mode="bicubic",\n ... align_corners=False,\n ... )\n ... depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)\n ... depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)\n ... depth_map = (depth_map - depth_min) / (depth_max - depth_min)\n ... image = torch.cat([depth_map] * 3, dim=1)\n ... image = image.permute(0, 2, 3, 1).cpu().numpy()[0]\n ... image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8))\n ... return image\n\n\n >>> prompt = "A robot, 4k photo"\n >>> image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((1024, 1024))\n >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization\n >>> depth_image = get_depth_map(image)\n\n >>> images = pipe(\n ... prompt,\n ... image=image,\n ... control_image=depth_image,\n ... strength=0.99,\n ... num_inference_steps=50,\n ... controlnet_conditioning_scale=controlnet_conditioning_scale,\n ... ).images\n >>> images[0].save(f"robot_cat.png")\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') class StableDiffusionXLControlNetImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin, IPAdapterMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->image_encoder->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'feature_extractor', 'image_encoder'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'add_neg_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, requires_aesthetics_score: bool=False, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None, feature_extractor: CLIPImageProcessor=None, image_encoder: CLIPVisionModelWithProjection=None): super().__init__() if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, controlnet=controlnet, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, prompt_2, image, strength, num_inference_steps, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if num_inference_steps is None: raise ValueError('`num_inference_steps` cannot be None.') elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: raise ValueError(f'`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type {type(num_inference_steps)}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning(f'You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)} prompts. The conditionings will be fixed across the prompts.') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.controlnet, torch._dynamo.eval_frame.OptimizedModule) if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): self.check_image(image, prompt, prompt_embeds) elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if not isinstance(image, list): raise TypeError('For multiple controlnets: `image` must be type `list`') elif any((isinstance(i, list) for i in image)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif len(image) != len(self.controlnet.nets): raise ValueError(f'For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets.') for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if isinstance(controlnet_conditioning_scale, list): if any((isinstance(i, list) for i in controlnet_conditioning_scale)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(self.controlnet.nets): raise ValueError('For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have the same length as the number of controlnets') else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError(f'`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list.') if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError(f'`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}.') for (start, end) in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError(f'control guidance start: {start} cannot be larger or equal to control guidance end: {end}.') if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if not image_is_pil and (not image_is_tensor) and (not image_is_np) and (not image_is_pil_list) and (not image_is_tensor_list) and (not image_is_np_list): raise TypeError(f'image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}') if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError(f'If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}') def prepare_control_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): image = torch.cat([image] * 2) return image def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') latents_mean = latents_std = None if hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None: latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.text_encoder_2.to('cpu') torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} ') init_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) if latents_mean is not None and latents_std is not None: latents_mean = latents_mean.to(device=device, dtype=dtype) latents_std = latents_std.to(device=device, dtype=dtype) init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std else: init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype, text_encoder_projection_dim=None): if self.config.requires_aesthetics_score: add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) add_neg_time_ids = list(negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)) else: add_time_ids = list(original_size + crops_coords_top_left + target_size) add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim > passed_add_embed_dim and expected_add_embed_dim - passed_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model.') elif expected_add_embed_dim < passed_add_embed_dim and passed_add_embed_dim - expected_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model.') elif expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) return (add_time_ids, add_neg_time_ids) def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, control_image: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, strength: float=0.8, num_inference_steps: int=50, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=0.8, guess_mode: bool=False, control_guidance_start: Union[float, List[float]]=0.0, control_guidance_end: Union[float, List[float]]=1.0, original_size: Tuple[int, int]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Tuple[int, int]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, aesthetic_score: float=6.0, negative_aesthetic_score: float=2.5, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) self.check_inputs(prompt, prompt_2, control_image, strength, num_inference_steps, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions guess_mode = guess_mode or global_pool_conditions text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt, prompt_2, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) if isinstance(controlnet, ControlNetModel): control_image = self.prepare_control_image(image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) (height, width) = control_image.shape[-2:] elif isinstance(controlnet, MultiControlNetModel): control_images = [] for control_image_ in control_image: control_image_ = self.prepare_control_image(image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) control_images.append(control_image_) control_image = control_images (height, width) = control_image[0].shape[-2:] else: assert False self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) self._num_timesteps = len(timesteps) if latents is None: latents = self.prepare_latents(image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, True) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) controlnet_keep = [] for i in range(len(timesteps)): keeps = [1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for (s, e) in zip(control_guidance_start, control_guidance_end)] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) if isinstance(control_image, list): original_size = original_size or control_image[0].shape[-2:] else: original_size = original_size or control_image.shape[-2:] target_size = target_size or (height, width) if negative_original_size is None: negative_original_size = original_size if negative_target_size is None: negative_target_size = target_size add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim (add_time_ids, add_neg_time_ids) = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if guess_mode and self.do_classifier_free_guidance: control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] controlnet_added_cond_kwargs = {'text_embeds': add_text_embeds.chunk(2)[1], 'time_ids': add_time_ids.chunk(2)[1]} else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds controlnet_added_cond_kwargs = added_cond_kwargs if isinstance(controlnet_keep[i], list): cond_scale = [c * s for (c, s) in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] (down_block_res_samples, mid_block_res_sample) = self.controlnet(control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=control_image, conditioning_scale=cond_scale, guess_mode=guess_mode, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False) if guess_mode and self.do_classifier_free_guidance: down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) add_neg_time_ids = callback_outputs.pop('add_neg_time_ids', add_neg_time_ids) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.unet.to('cpu') self.controlnet.to('cpu') torch.cuda.empty_cache() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) has_latents_mean = hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents return StableDiffusionXLPipelineOutput(images=image) if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py import warnings from functools import partial from typing import Dict, List, Optional, Union import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict from flax.jax_utils import unreplicate from flax.training.common_utils import shard from PIL import Image from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel from ...models import FlaxAutoencoderKL, FlaxControlNetModel, FlaxUNet2DConditionModel from ...schedulers import FlaxDDIMScheduler, FlaxDPMSolverMultistepScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring from ..pipeline_flax_utils import FlaxDiffusionPipeline from ..stable_diffusion import FlaxStableDiffusionPipelineOutput from ..stable_diffusion.safety_checker_flax import FlaxStableDiffusionSafetyChecker logger = logging.get_logger(__name__) DEBUG = False EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import jax\n >>> import numpy as np\n >>> import jax.numpy as jnp\n >>> from flax.jax_utils import replicate\n >>> from flax.training.common_utils import shard\n >>> from diffusers.utils import load_image, make_image_grid\n >>> from PIL import Image\n >>> from diffusers import FlaxStableDiffusionControlNetPipeline, FlaxControlNetModel\n\n\n >>> def create_key(seed=0):\n ... return jax.random.PRNGKey(seed)\n\n\n >>> rng = create_key(0)\n\n >>> # get canny image\n >>> canny_image = load_image(\n ... "https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_10_output_0.jpeg"\n ... )\n\n >>> prompts = "best quality, extremely detailed"\n >>> negative_prompts = "monochrome, lowres, bad anatomy, worst quality, low quality"\n\n >>> # load control net and stable diffusion v1-5\n >>> controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(\n ... "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.float32\n ... )\n >>> pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(\n ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.float32\n ... )\n >>> params["controlnet"] = controlnet_params\n\n >>> num_samples = jax.device_count()\n >>> rng = jax.random.split(rng, jax.device_count())\n\n >>> prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)\n >>> negative_prompt_ids = pipe.prepare_text_inputs([negative_prompts] * num_samples)\n >>> processed_image = pipe.prepare_image_inputs([canny_image] * num_samples)\n\n >>> p_params = replicate(params)\n >>> prompt_ids = shard(prompt_ids)\n >>> negative_prompt_ids = shard(negative_prompt_ids)\n >>> processed_image = shard(processed_image)\n\n >>> output = pipe(\n ... prompt_ids=prompt_ids,\n ... image=processed_image,\n ... params=p_params,\n ... prng_seed=rng,\n ... num_inference_steps=50,\n ... neg_prompt_ids=negative_prompt_ids,\n ... jit=True,\n ... ).images\n\n >>> output_images = pipe.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:])))\n >>> output_images = make_image_grid(output_images, num_samples // 4, 4)\n >>> output_images.save("generated_image.png")\n ```\n' class FlaxStableDiffusionControlNetPipeline(FlaxDiffusionPipeline): def __init__(self, vae: FlaxAutoencoderKL, text_encoder: FlaxCLIPTextModel, tokenizer: CLIPTokenizer, unet: FlaxUNet2DConditionModel, controlnet: FlaxControlNetModel, scheduler: Union[FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler], safety_checker: FlaxStableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, dtype: jnp.dtype=jnp.float32): super().__init__() self.dtype = dtype if safety_checker is None: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def prepare_text_inputs(self, prompt: Union[str, List[str]]): if not isinstance(prompt, (str, list)): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='np') return text_input.input_ids def prepare_image_inputs(self, image: Union[Image.Image, List[Image.Image]]): if not isinstance(image, (Image.Image, list)): raise ValueError(f'image has to be of type `PIL.Image.Image` or list but is {type(image)}') if isinstance(image, Image.Image): image = [image] processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image]) return processed_images def _get_has_nsfw_concepts(self, features, params): has_nsfw_concepts = self.safety_checker(features, params) return has_nsfw_concepts def _run_safety_checker(self, images, safety_model_params, jit=False): pil_images = [Image.fromarray(image) for image in images] features = self.feature_extractor(pil_images, return_tensors='np').pixel_values if jit: features = shard(features) has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) has_nsfw_concepts = unshard(has_nsfw_concepts) safety_model_params = unreplicate(safety_model_params) else: has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) images_was_copied = False for (idx, has_nsfw_concept) in enumerate(has_nsfw_concepts): if has_nsfw_concept: if not images_was_copied: images_was_copied = True images = images.copy() images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) if any(has_nsfw_concepts): warnings.warn('Potential NSFW content was detected in one or more images. A black image will be returned instead. Try again with a different prompt and/or seed.') return (images, has_nsfw_concepts) def _generate(self, prompt_ids: jnp.ndarray, image: jnp.ndarray, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int, guidance_scale: float, latents: Optional[jnp.ndarray]=None, neg_prompt_ids: Optional[jnp.ndarray]=None, controlnet_conditioning_scale: float=1.0): (height, width) = image.shape[-2:] if height % 64 != 0 or width % 64 != 0: raise ValueError(f'`height` and `width` have to be divisible by 64 but are {height} and {width}.') prompt_embeds = self.text_encoder(prompt_ids, params=params['text_encoder'])[0] batch_size = prompt_ids.shape[0] max_length = prompt_ids.shape[-1] if neg_prompt_ids is None: uncond_input = self.tokenizer([''] * batch_size, padding='max_length', max_length=max_length, return_tensors='np').input_ids else: uncond_input = neg_prompt_ids negative_prompt_embeds = self.text_encoder(uncond_input, params=params['text_encoder'])[0] context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) image = jnp.concatenate([image] * 2) latents_shape = (batch_size, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor) if latents is None: latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) elif latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}') def loop_body(step, args): (latents, scheduler_state) = args latents_input = jnp.concatenate([latents] * 2) t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] timestep = jnp.broadcast_to(t, latents_input.shape[0]) latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) (down_block_res_samples, mid_block_res_sample) = self.controlnet.apply({'params': params['controlnet']}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=context, controlnet_cond=image, conditioning_scale=controlnet_conditioning_scale, return_dict=False) noise_pred = self.unet.apply({'params': params['unet']}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=context, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample).sample (noise_pred_uncond, noise_prediction_text) = jnp.split(noise_pred, 2, axis=0) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) (latents, scheduler_state) = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() return (latents, scheduler_state) scheduler_state = self.scheduler.set_timesteps(params['scheduler'], num_inference_steps=num_inference_steps, shape=latents_shape) latents = latents * params['scheduler'].init_noise_sigma if DEBUG: for i in range(num_inference_steps): (latents, scheduler_state) = loop_body(i, (latents, scheduler_state)) else: (latents, _) = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.apply({'params': params['vae']}, latents, method=self.vae.decode).sample image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) return image @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt_ids: jnp.ndarray, image: jnp.ndarray, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int=50, guidance_scale: Union[float, jnp.ndarray]=7.5, latents: jnp.ndarray=None, neg_prompt_ids: jnp.ndarray=None, controlnet_conditioning_scale: Union[float, jnp.ndarray]=1.0, return_dict: bool=True, jit: bool=False): (height, width) = image.shape[-2:] if isinstance(guidance_scale, float): guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) if len(prompt_ids.shape) > 2: guidance_scale = guidance_scale[:, None] if isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = jnp.array([controlnet_conditioning_scale] * prompt_ids.shape[0]) if len(prompt_ids.shape) > 2: controlnet_conditioning_scale = controlnet_conditioning_scale[:, None] if jit: images = _p_generate(self, prompt_ids, image, params, prng_seed, num_inference_steps, guidance_scale, latents, neg_prompt_ids, controlnet_conditioning_scale) else: images = self._generate(prompt_ids, image, params, prng_seed, num_inference_steps, guidance_scale, latents, neg_prompt_ids, controlnet_conditioning_scale) if self.safety_checker is not None: safety_params = params['safety_checker'] images_uint8_casted = (images * 255).round().astype('uint8') (num_devices, batch_size) = images.shape[:2] images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) (images_uint8_casted, has_nsfw_concept) = self._run_safety_checker(images_uint8_casted, safety_params, jit) images = np.array(images) if any(has_nsfw_concept): for (i, is_nsfw) in enumerate(has_nsfw_concept): if is_nsfw: images[i] = np.asarray(images_uint8_casted[i]) images = images.reshape(num_devices, batch_size, height, width, 3) else: images = np.asarray(images) has_nsfw_concept = False if not return_dict: return (images, has_nsfw_concept) return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) @partial(jax.pmap, in_axes=(None, 0, 0, 0, 0, None, 0, 0, 0, 0), static_broadcasted_argnums=(0, 5)) def _p_generate(pipe, prompt_ids, image, params, prng_seed, num_inference_steps, guidance_scale, latents, neg_prompt_ids, controlnet_conditioning_scale): return pipe._generate(prompt_ids, image, params, prng_seed, num_inference_steps, guidance_scale, latents, neg_prompt_ids, controlnet_conditioning_scale) @partial(jax.pmap, static_broadcasted_argnums=(0,)) def _p_get_has_nsfw_concepts(pipe, features, params): return pipe._get_has_nsfw_concepts(features, params) def unshard(x: jnp.ndarray): (num_devices, batch_size) = x.shape[:2] rest = x.shape[2:] return x.reshape(num_devices * batch_size, *rest) def preprocess(image, dtype): image = image.convert('RGB') (w, h) = image.size (w, h) = (x - x % 64 for x in (w, h)) image = image.resize((w, h), resample=PIL_INTERPOLATION['lanczos']) image = jnp.array(image).astype(dtype) / 255.0 image = image[None].transpose(0, 3, 1, 2) return image # File: diffusers-main/src/diffusers/pipelines/controlnet_hunyuandit/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_hunyuandit_controlnet'] = ['HunyuanDiTControlNetPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_hunyuandit_controlnet import HunyuanDiTControlNetPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py import inspect from typing import Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from transformers import BertModel, BertTokenizer, CLIPImageProcessor, MT5Tokenizer, T5EncoderModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...models import AutoencoderKL, HunyuanDiT2DControlNetModel, HunyuanDiT2DModel, HunyuanDiT2DMultiControlNetModel from ...models.embeddings import get_2d_rotary_pos_embed from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...schedulers import DDPMScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n from diffusers import HunyuanDiT2DControlNetModel, HunyuanDiTControlNetPipeline\n import torch\n\n controlnet = HunyuanDiT2DControlNetModel.from_pretrained(\n "Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Canny", torch_dtype=torch.float16\n )\n\n pipe = HunyuanDiTControlNetPipeline.from_pretrained(\n "Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers", controlnet=controlnet, torch_dtype=torch.float16\n )\n pipe.to("cuda")\n\n from diffusers.utils import load_image\n\n cond_image = load_image(\n "https://huggingface.co/Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Canny/resolve/main/canny.jpg?download=true"\n )\n\n ## You may also use English prompt as HunyuanDiT supports both English and Chinese\n prompt = "在夜晚的酒店门前,一座古老的中国风格的狮子雕像矗立着,它的眼睛闪烁着光芒,仿佛在守护着这座建筑。背景是夜晚的酒店前,构图方式是特写,平视,居中构图。这张照片呈现了真实摄影风格,蕴含了中国雕塑文化,同时展现了神秘氛围"\n # prompt="At night, an ancient Chinese-style lion statue stands in front of the hotel, its eyes gleaming as if guarding the building. The background is the hotel entrance at night, with a close-up, eye-level, and centered composition. This photo presents a realistic photographic style, embodies Chinese sculpture culture, and reveals a mysterious atmosphere."\n image = pipe(\n prompt,\n height=1024,\n width=1024,\n control_image=cond_image,\n num_inference_steps=50,\n ).images[0]\n ```\n' STANDARD_RATIO = np.array([1.0, 4.0 / 3.0, 3.0 / 4.0, 16.0 / 9.0, 9.0 / 16.0]) STANDARD_SHAPE = [[(1024, 1024), (1280, 1280)], [(1024, 768), (1152, 864), (1280, 960)], [(768, 1024), (864, 1152), (960, 1280)], [(1280, 768)], [(768, 1280)]] STANDARD_AREA = [np.array([w * h for (w, h) in shapes]) for shapes in STANDARD_SHAPE] SUPPORTED_SHAPE = [(1024, 1024), (1280, 1280), (1024, 768), (1152, 864), (1280, 960), (768, 1024), (864, 1152), (960, 1280), (1280, 768), (768, 1280)] def map_to_standard_shapes(target_width, target_height): target_ratio = target_width / target_height closest_ratio_idx = np.argmin(np.abs(STANDARD_RATIO - target_ratio)) closest_area_idx = np.argmin(np.abs(STANDARD_AREA[closest_ratio_idx] - target_width * target_height)) (width, height) = STANDARD_SHAPE[closest_ratio_idx][closest_area_idx] return (width, height) def get_resize_crop_region_for_grid(src, tgt_size): th = tw = tgt_size (h, w) = src r = h / w if r > 1: resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return ((crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)) def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class HunyuanDiTControlNetPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->text_encoder_2->transformer->vae' _optional_components = ['safety_checker', 'feature_extractor', 'text_encoder_2', 'tokenizer_2', 'text_encoder', 'tokenizer'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'prompt_embeds_2', 'negative_prompt_embeds_2'] def __init__(self, vae: AutoencoderKL, text_encoder: BertModel, tokenizer: BertTokenizer, transformer: HunyuanDiT2DModel, scheduler: DDPMScheduler, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, controlnet: Union[HunyuanDiT2DControlNetModel, List[HunyuanDiT2DControlNetModel], Tuple[HunyuanDiT2DControlNetModel], HunyuanDiT2DMultiControlNetModel], text_encoder_2=T5EncoderModel, tokenizer_2=MT5Tokenizer, requires_safety_checker: bool=True): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, tokenizer_2=tokenizer_2, transformer=transformer, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, text_encoder_2=text_encoder_2, controlnet=controlnet) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, 'vae') and self.vae is not None else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) self.default_sample_size = self.transformer.config.sample_size if hasattr(self, 'transformer') and self.transformer is not None else 128 def encode_prompt(self, prompt: str, device: torch.device=None, dtype: torch.dtype=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, max_sequence_length: Optional[int]=None, text_encoder_index: int=0): if dtype is None: if self.text_encoder_2 is not None: dtype = self.text_encoder_2.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None if device is None: device = self._execution_device tokenizers = [self.tokenizer, self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] tokenizer = tokenizers[text_encoder_index] text_encoder = text_encoders[text_encoder_index] if max_sequence_length is None: if text_encoder_index == 0: max_length = 77 if text_encoder_index == 1: max_length = 256 else: max_length = max_sequence_length if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) prompt_embeds = prompt_embeds[0] prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return (prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, prompt_embeds_2=None, negative_prompt_embeds_2=None, prompt_attention_mask_2=None, negative_prompt_attention_mask_2=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is None and prompt_embeds_2 is None: raise ValueError('Provide either `prompt` or `prompt_embeds_2`. Cannot leave both `prompt` and `prompt_embeds_2` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError('Must provide `prompt_attention_mask` when specifying `prompt_embeds`.') if prompt_embeds_2 is not None and prompt_attention_mask_2 is None: raise ValueError('Must provide `prompt_attention_mask_2` when specifying `prompt_embeds_2`.') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError('Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.') if negative_prompt_embeds_2 is not None and negative_prompt_attention_mask_2 is None: raise ValueError('Must provide `negative_prompt_attention_mask_2` when specifying `negative_prompt_embeds_2`.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds_2 is not None and negative_prompt_embeds_2 is not None: if prompt_embeds_2.shape != negative_prompt_embeds_2.shape: raise ValueError(f'`prompt_embeds_2` and `negative_prompt_embeds_2` must have the same shape when passed directly, but got: `prompt_embeds_2` {prompt_embeds_2.shape} != `negative_prompt_embeds_2` {negative_prompt_embeds_2.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False): if isinstance(image, torch.Tensor): pass else: image = self.image_processor.preprocess(image, height=height, width=width) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): image = torch.cat([image] * 2) return image @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: Optional[int]=50, guidance_scale: Optional[float]=5.0, control_image: PipelineImageInput=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: Optional[float]=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, prompt_embeds_2: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds_2: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, prompt_attention_mask_2: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_attention_mask_2: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], guidance_rescale: float=0.0, original_size: Optional[Tuple[int, int]]=(1024, 1024), target_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), use_resolution_binning: bool=True): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor height = int(height // 16 * 16) width = int(width // 16 * 16) if use_resolution_binning and (height, width) not in SUPPORTED_SHAPE: (width, height) = map_to_standard_shapes(width, height) height = int(height) width = int(width) logger.warning(f'Reshaped to (height, width)=({height}, {width}), Supported shapes are {SUPPORTED_SHAPE}') self.check_inputs(prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, prompt_embeds_2, negative_prompt_embeds_2, prompt_attention_mask_2, negative_prompt_attention_mask_2, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device (prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask) = self.encode_prompt(prompt=prompt, device=device, dtype=self.transformer.dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=77, text_encoder_index=0) (prompt_embeds_2, negative_prompt_embeds_2, prompt_attention_mask_2, negative_prompt_attention_mask_2) = self.encode_prompt(prompt=prompt, device=device, dtype=self.transformer.dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds_2, negative_prompt_embeds=negative_prompt_embeds_2, prompt_attention_mask=prompt_attention_mask_2, negative_prompt_attention_mask=negative_prompt_attention_mask_2, max_sequence_length=256, text_encoder_index=1) if isinstance(self.controlnet, HunyuanDiT2DControlNetModel): control_image = self.prepare_image(image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=False) (height, width) = control_image.shape[-2:] control_image = self.vae.encode(control_image).latent_dist.sample() control_image = control_image * self.vae.config.scaling_factor elif isinstance(self.controlnet, HunyuanDiT2DMultiControlNetModel): control_images = [] for control_image_ in control_image: control_image_ = self.prepare_image(image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=False) control_image_ = self.vae.encode(control_image_).latent_dist.sample() control_image_ = control_image_ * self.vae.config.scaling_factor control_images.append(control_image_) control_image = control_images else: assert False self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) grid_height = height // 8 // self.transformer.config.patch_size grid_width = width // 8 // self.transformer.config.patch_size base_size = 512 // 8 // self.transformer.config.patch_size grid_crops_coords = get_resize_crop_region_for_grid((grid_height, grid_width), base_size) image_rotary_emb = get_2d_rotary_pos_embed(self.transformer.inner_dim // self.transformer.num_heads, grid_crops_coords, (grid_height, grid_width)) style = torch.tensor([0], device=device) target_size = target_size or (height, width) add_time_ids = list(original_size + target_size + crops_coords_top_left) add_time_ids = torch.tensor([add_time_ids], dtype=prompt_embeds.dtype) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask]) prompt_embeds_2 = torch.cat([negative_prompt_embeds_2, prompt_embeds_2]) prompt_attention_mask_2 = torch.cat([negative_prompt_attention_mask_2, prompt_attention_mask_2]) add_time_ids = torch.cat([add_time_ids] * 2, dim=0) style = torch.cat([style] * 2, dim=0) prompt_embeds = prompt_embeds.to(device=device) prompt_attention_mask = prompt_attention_mask.to(device=device) prompt_embeds_2 = prompt_embeds_2.to(device=device) prompt_attention_mask_2 = prompt_attention_mask_2.to(device=device) add_time_ids = add_time_ids.to(dtype=prompt_embeds.dtype, device=device).repeat(batch_size * num_images_per_prompt, 1) style = style.to(device=device).repeat(batch_size * num_images_per_prompt) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) t_expand = torch.tensor([t] * latent_model_input.shape[0], device=device).to(dtype=latent_model_input.dtype) control_block_samples = self.controlnet(latent_model_input, t_expand, encoder_hidden_states=prompt_embeds, text_embedding_mask=prompt_attention_mask, encoder_hidden_states_t5=prompt_embeds_2, text_embedding_mask_t5=prompt_attention_mask_2, image_meta_size=add_time_ids, style=style, image_rotary_emb=image_rotary_emb, return_dict=False, controlnet_cond=control_image, conditioning_scale=controlnet_conditioning_scale)[0] noise_pred = self.transformer(latent_model_input, t_expand, encoder_hidden_states=prompt_embeds, text_embedding_mask=prompt_attention_mask, encoder_hidden_states_t5=prompt_embeds_2, text_embedding_mask_t5=prompt_attention_mask_2, image_meta_size=add_time_ids, style=style, image_rotary_emb=image_rotary_emb, return_dict=False, controlnet_block_samples=control_block_samples)[0] (noise_pred, _) = noise_pred.chunk(2, dim=1) if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) prompt_embeds_2 = callback_outputs.pop('prompt_embeds_2', prompt_embeds_2) negative_prompt_embeds_2 = callback_outputs.pop('negative_prompt_embeds_2', negative_prompt_embeds_2) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/controlnet_sd3/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_stable_diffusion_3_controlnet'] = ['StableDiffusion3ControlNetPipeline'] _import_structure['pipeline_stable_diffusion_3_controlnet_inpainting'] = ['StableDiffusion3ControlNetInpaintingPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_3_controlnet import StableDiffusion3ControlNetPipeline from .pipeline_stable_diffusion_3_controlnet_inpainting import StableDiffusion3ControlNetInpaintingPipeline try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_flax_and_transformers_objects import * else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel, T5TokenizerFast from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, SD3LoraLoaderMixin from ...models.autoencoders import AutoencoderKL from ...models.controlnet_sd3 import SD3ControlNetModel, SD3MultiControlNetModel from ...models.transformers import SD3Transformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from ..stable_diffusion_3.pipeline_output import StableDiffusion3PipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusion3ControlNetPipeline\n >>> from diffusers.models import SD3ControlNetModel, SD3MultiControlNetModel\n >>> from diffusers.utils import load_image\n\n >>> controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Canny", torch_dtype=torch.float16)\n\n >>> pipe = StableDiffusion3ControlNetPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n >>> control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg")\n >>> prompt = "A girl holding a sign that says InstantX"\n >>> image = pipe(prompt, control_image=control_image, controlnet_conditioning_scale=0.7).images[0]\n >>> image.save("sd3.png")\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusion3ControlNetPipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->text_encoder_3->transformer->vae' _optional_components = [] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'negative_pooled_prompt_embeds'] def __init__(self, transformer: SD3Transformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_encoder_2: CLIPTextModelWithProjection, tokenizer_2: CLIPTokenizer, text_encoder_3: T5EncoderModel, tokenizer_3: T5TokenizerFast, controlnet: Union[SD3ControlNetModel, List[SD3ControlNetModel], Tuple[SD3ControlNetModel], SD3MultiControlNetModel]): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, text_encoder_3=text_encoder_3, tokenizer=tokenizer, tokenizer_2=tokenizer_2, tokenizer_3=tokenizer_3, transformer=transformer, scheduler=scheduler, controlnet=controlnet) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, 'vae') and self.vae is not None else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.tokenizer_max_length = self.tokenizer.model_max_length if hasattr(self, 'tokenizer') and self.tokenizer is not None else 77 self.default_sample_size = self.transformer.config.sample_size if hasattr(self, 'transformer') and self.transformer is not None else 128 def _get_t5_prompt_embeds(self, prompt: Union[str, List[str]]=None, num_images_per_prompt: int=1, max_sequence_length: int=256, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if self.text_encoder_3 is None: return torch.zeros((batch_size * num_images_per_prompt, self.tokenizer_max_length, self.transformer.config.joint_attention_dim), device=device, dtype=dtype) text_inputs = self.tokenizer_3(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_3(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because `max_sequence_length` is set to {max_sequence_length} tokens: {removed_text}') prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0] dtype = self.text_encoder_3.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds def _get_clip_prompt_embeds(self, prompt: Union[str, List[str]], num_images_per_prompt: int=1, device: Optional[torch.device]=None, clip_skip: Optional[int]=None, clip_model_index: int=0): device = device or self._execution_device clip_tokenizers = [self.tokenizer, self.tokenizer_2] clip_text_encoders = [self.text_encoder, self.text_encoder_2] tokenizer = clip_tokenizers[clip_model_index] text_encoder = clip_text_encoders[clip_model_index] prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = tokenizer(prompt, padding='max_length', max_length=self.tokenizer_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1) pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) return (prompt_embeds, pooled_prompt_embeds) def encode_prompt(self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], prompt_3: Union[str, List[str]], device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, negative_prompt_3: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor]=None, clip_skip: Optional[int]=None, max_sequence_length: int=256, lora_scale: Optional[float]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_3 = prompt_3 or prompt prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 (prompt_embed, pooled_prompt_embed) = self._get_clip_prompt_embeds(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=0) (prompt_2_embed, pooled_prompt_2_embed) = self._get_clip_prompt_embeds(prompt=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=1) clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1) t5_prompt_embed = self._get_t5_prompt_embeds(prompt=prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) clip_prompt_embeds = torch.nn.functional.pad(clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1])) prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt_3 = negative_prompt_3 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 negative_prompt_3 = batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') (negative_prompt_embed, negative_pooled_prompt_embed) = self._get_clip_prompt_embeds(negative_prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=0) (negative_prompt_2_embed, negative_pooled_prompt_2_embed) = self._get_clip_prompt_embeds(negative_prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=1) negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1) t5_negative_prompt_embed = self._get_t5_prompt_embeds(prompt=negative_prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) negative_clip_prompt_embeds = torch.nn.functional.pad(negative_clip_prompt_embeds, (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1])) negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2) negative_pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1) if self.text_encoder is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def check_inputs(self, prompt, prompt_2, prompt_3, height, width, negative_prompt=None, negative_prompt_2=None, negative_prompt_3=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_3 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') elif prompt_3 is not None and (not isinstance(prompt_3, str) and (not isinstance(prompt_3, list))): raise ValueError(f'`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_3 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f'`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): if latents is not None: return latents.to(device=device, dtype=dtype) shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents def prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False): if isinstance(image, torch.Tensor): pass else: image = self.image_processor.preprocess(image, height=height, width=width) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): image = torch.cat([image] * 2) return image @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, prompt_3: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=28, timesteps: List[int]=None, guidance_scale: float=7.0, control_guidance_start: Union[float, List[float]]=0.0, control_guidance_end: Union[float, List[float]]=1.0, control_image: PipelineImageInput=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, controlnet_pooled_projections: Optional[torch.FloatTensor]=None, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, negative_prompt_3: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, joint_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=256): height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(self.controlnet.nets) if isinstance(self.controlnet, SD3MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) self.check_inputs(prompt, prompt_2, prompt_3, height, width, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device dtype = self.transformer.dtype (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, prompt_3=prompt_3, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, device=device, clip_skip=self.clip_skip, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) if isinstance(self.controlnet, SD3ControlNetModel): control_image = self.prepare_image(image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=False) (height, width) = control_image.shape[-2:] control_image = self.vae.encode(control_image).latent_dist.sample() control_image = control_image * self.vae.config.scaling_factor elif isinstance(self.controlnet, SD3MultiControlNetModel): control_images = [] for control_image_ in control_image: control_image_ = self.prepare_image(image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=False) control_image_ = self.vae.encode(control_image_).latent_dist.sample() control_image_ = control_image_ * self.vae.config.scaling_factor control_images.append(control_image_) control_image = control_images else: assert False if controlnet_pooled_projections is None: controlnet_pooled_projections = torch.zeros_like(pooled_prompt_embeds) else: controlnet_pooled_projections = controlnet_pooled_projections or pooled_prompt_embeds (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) controlnet_keep = [] for i in range(len(timesteps)): keeps = [1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for (s, e) in zip(control_guidance_start, control_guidance_end)] controlnet_keep.append(keeps[0] if isinstance(self.controlnet, SD3ControlNetModel) else keeps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents timestep = t.expand(latent_model_input.shape[0]) if isinstance(controlnet_keep[i], list): cond_scale = [c * s for (c, s) in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] control_block_samples = self.controlnet(hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, pooled_projections=controlnet_pooled_projections, joint_attention_kwargs=self.joint_attention_kwargs, controlnet_cond=control_image, conditioning_scale=cond_scale, return_dict=False)[0] noise_pred = self.transformer(hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, pooled_projections=pooled_prompt_embeds, block_controlnet_hidden_states=control_block_samples, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == 'latent': image = latents else: latents = latents / self.vae.config.scaling_factor + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusion3PipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel, T5TokenizerFast from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, SD3LoraLoaderMixin from ...models.autoencoders import AutoencoderKL from ...models.controlnet_sd3 import SD3ControlNetModel, SD3MultiControlNetModel from ...models.transformers import SD3Transformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from ..stable_diffusion_3.pipeline_output import StableDiffusion3PipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers.utils import load_image, check_min_version\n >>> from diffusers.pipelines import StableDiffusion3ControlNetInpaintingPipeline\n >>> from diffusers.models.controlnet_sd3 import SD3ControlNetModel\n\n >>> controlnet = SD3ControlNetModel.from_pretrained(\n ... "alimama-creative/SD3-Controlnet-Inpainting", use_safetensors=True, extra_conditioning_channels=1\n ... )\n >>> pipe = StableDiffusion3ControlNetInpaintingPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-3-medium-diffusers",\n ... controlnet=controlnet,\n ... torch_dtype=torch.float16,\n ... )\n >>> pipe.text_encoder.to(torch.float16)\n >>> pipe.controlnet.to(torch.float16)\n >>> pipe.to("cuda")\n\n >>> image = load_image(\n ... "https://huggingface.co/alimama-creative/SD3-Controlnet-Inpainting/resolve/main/images/dog.png"\n ... )\n >>> mask = load_image(\n ... "https://huggingface.co/alimama-creative/SD3-Controlnet-Inpainting/resolve/main/images/dog_mask.png"\n ... )\n >>> width = 1024\n >>> height = 1024\n >>> prompt = "A cat is sitting next to a puppy."\n >>> generator = torch.Generator(device="cuda").manual_seed(24)\n >>> res_image = pipe(\n ... negative_prompt="deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, NSFW",\n ... prompt=prompt,\n ... height=height,\n ... width=width,\n ... control_image=image,\n ... control_mask=mask,\n ... num_inference_steps=28,\n ... generator=generator,\n ... controlnet_conditioning_scale=0.95,\n ... guidance_scale=7,\n ... ).images[0]\n >>> res_image.save(f"sd3.png")\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusion3ControlNetInpaintingPipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->text_encoder_3->transformer->vae' _optional_components = [] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'negative_pooled_prompt_embeds'] def __init__(self, transformer: SD3Transformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_encoder_2: CLIPTextModelWithProjection, tokenizer_2: CLIPTokenizer, text_encoder_3: T5EncoderModel, tokenizer_3: T5TokenizerFast, controlnet: Union[SD3ControlNetModel, List[SD3ControlNetModel], Tuple[SD3ControlNetModel], SD3MultiControlNetModel]): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, text_encoder_3=text_encoder_3, tokenizer=tokenizer, tokenizer_2=tokenizer_2, tokenizer_3=tokenizer_3, transformer=transformer, scheduler=scheduler, controlnet=controlnet) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, 'vae') and self.vae is not None else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_resize=True, do_convert_rgb=True, do_normalize=True) self.mask_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_resize=True, do_convert_grayscale=True, do_normalize=False, do_binarize=True) self.tokenizer_max_length = self.tokenizer.model_max_length if hasattr(self, 'tokenizer') and self.tokenizer is not None else 77 self.default_sample_size = self.transformer.config.sample_size if hasattr(self, 'transformer') and self.transformer is not None else 128 def _get_t5_prompt_embeds(self, prompt: Union[str, List[str]]=None, num_images_per_prompt: int=1, max_sequence_length: int=256, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if self.text_encoder_3 is None: return torch.zeros((batch_size * num_images_per_prompt, self.tokenizer_max_length, self.transformer.config.joint_attention_dim), device=device, dtype=dtype) text_inputs = self.tokenizer_3(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_3(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because `max_sequence_length` is set to {max_sequence_length} tokens: {removed_text}') prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0] dtype = self.text_encoder_3.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds def _get_clip_prompt_embeds(self, prompt: Union[str, List[str]], num_images_per_prompt: int=1, device: Optional[torch.device]=None, clip_skip: Optional[int]=None, clip_model_index: int=0): device = device or self._execution_device clip_tokenizers = [self.tokenizer, self.tokenizer_2] clip_text_encoders = [self.text_encoder, self.text_encoder_2] tokenizer = clip_tokenizers[clip_model_index] text_encoder = clip_text_encoders[clip_model_index] prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = tokenizer(prompt, padding='max_length', max_length=self.tokenizer_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1) pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) return (prompt_embeds, pooled_prompt_embeds) def encode_prompt(self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], prompt_3: Union[str, List[str]], device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, negative_prompt_3: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor]=None, clip_skip: Optional[int]=None, max_sequence_length: int=256, lora_scale: Optional[float]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_3 = prompt_3 or prompt prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 (prompt_embed, pooled_prompt_embed) = self._get_clip_prompt_embeds(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=0) (prompt_2_embed, pooled_prompt_2_embed) = self._get_clip_prompt_embeds(prompt=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=1) clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1) t5_prompt_embed = self._get_t5_prompt_embeds(prompt=prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) clip_prompt_embeds = torch.nn.functional.pad(clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1])) prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt_3 = negative_prompt_3 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 negative_prompt_3 = batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') (negative_prompt_embed, negative_pooled_prompt_embed) = self._get_clip_prompt_embeds(negative_prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=0) (negative_prompt_2_embed, negative_pooled_prompt_2_embed) = self._get_clip_prompt_embeds(negative_prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=1) negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1) t5_negative_prompt_embed = self._get_t5_prompt_embeds(prompt=negative_prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) negative_clip_prompt_embeds = torch.nn.functional.pad(negative_clip_prompt_embeds, (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1])) negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2) negative_pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1) if self.text_encoder is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def check_inputs(self, prompt, prompt_2, prompt_3, height, width, negative_prompt=None, negative_prompt_2=None, negative_prompt_3=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_3 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') elif prompt_3 is not None and (not isinstance(prompt_3, str) and (not isinstance(prompt_3, list))): raise ValueError(f'`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_3 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f'`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): if latents is not None: return latents.to(device=device, dtype=dtype) shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents def prepare_image_with_mask(self, image, mask, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False): if isinstance(image, torch.Tensor): pass else: image = self.image_processor.preprocess(image, height=height, width=width) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if isinstance(mask, torch.Tensor): pass else: mask = self.mask_processor.preprocess(mask, height=height, width=width) mask = mask.repeat_interleave(repeat_by, dim=0) mask = mask.to(device=device, dtype=dtype) masked_image = image.clone() masked_image[(mask > 0.5).repeat(1, 3, 1, 1)] = -1 image_latents = self.vae.encode(masked_image).latent_dist.sample() image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor image_latents = image_latents.to(dtype) mask = torch.nn.functional.interpolate(mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)) mask = 1 - mask control_image = torch.cat([image_latents, mask], dim=1) if do_classifier_free_guidance and (not guess_mode): control_image = torch.cat([control_image] * 2) return control_image @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, prompt_3: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=28, timesteps: List[int]=None, guidance_scale: float=7.0, control_guidance_start: Union[float, List[float]]=0.0, control_guidance_end: Union[float, List[float]]=1.0, control_image: PipelineImageInput=None, control_mask: PipelineImageInput=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, controlnet_pooled_projections: Optional[torch.FloatTensor]=None, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, negative_prompt_3: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, joint_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=256): height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(self.controlnet.nets) if isinstance(self.controlnet, SD3MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) self.check_inputs(prompt, prompt_2, prompt_3, height, width, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device dtype = self.transformer.dtype (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, prompt_3=prompt_3, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, device=device, clip_skip=self.clip_skip, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) if isinstance(self.controlnet, SD3ControlNetModel): control_image = self.prepare_image_with_mask(image=control_image, mask=control_mask, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=False) (latent_height, latent_width) = control_image.shape[-2:] height = latent_height * self.vae_scale_factor width = latent_width * self.vae_scale_factor elif isinstance(self.controlnet, SD3MultiControlNetModel): raise NotImplementedError('MultiControlNetModel is not supported for SD3ControlNetInpaintingPipeline.') else: assert False if controlnet_pooled_projections is None: controlnet_pooled_projections = torch.zeros_like(pooled_prompt_embeds) else: controlnet_pooled_projections = controlnet_pooled_projections or pooled_prompt_embeds (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) controlnet_keep = [] for i in range(len(timesteps)): keeps = [1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for (s, e) in zip(control_guidance_start, control_guidance_end)] controlnet_keep.append(keeps[0] if isinstance(self.controlnet, SD3ControlNetModel) else keeps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents timestep = t.expand(latent_model_input.shape[0]) if isinstance(controlnet_keep[i], list): cond_scale = [c * s for (c, s) in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] control_block_samples = self.controlnet(hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, pooled_projections=controlnet_pooled_projections, joint_attention_kwargs=self.joint_attention_kwargs, controlnet_cond=control_image, conditioning_scale=cond_scale, return_dict=False)[0] noise_pred = self.transformer(hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, pooled_projections=pooled_prompt_embeds, block_controlnet_hidden_states=control_block_samples, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == 'latent': image = latents else: latents = latents / self.vae.config.scaling_factor + self.vae.config.shift_factor latents = latents.to(dtype=self.vae.dtype) image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusion3PipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/controlnet_xs/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_controlnet_xs'] = ['StableDiffusionControlNetXSPipeline'] _import_structure['pipeline_controlnet_xs_sd_xl'] = ['StableDiffusionXLControlNetXSPipeline'] try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_flax_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) else: pass if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_controlnet_xs import StableDiffusionControlNetXSPipeline from .pipeline_controlnet_xs_sd_xl import StableDiffusionXLControlNetXSPipeline try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_flax_and_transformers_objects import * else: pass else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> # !pip install opencv-python transformers accelerate\n >>> from diffusers import StableDiffusionControlNetXSPipeline, ControlNetXSAdapter\n >>> from diffusers.utils import load_image\n >>> import numpy as np\n >>> import torch\n\n >>> import cv2\n >>> from PIL import Image\n\n >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"\n >>> negative_prompt = "low quality, bad quality, sketches"\n\n >>> # download an image\n >>> image = load_image(\n ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"\n ... )\n\n >>> # initialize the models and pipeline\n >>> controlnet_conditioning_scale = 0.5\n\n >>> controlnet = ControlNetXSAdapter.from_pretrained(\n ... "UmerHA/Testing-ConrolNetXS-SD2.1-canny", torch_dtype=torch.float16\n ... )\n >>> pipe = StableDiffusionControlNetXSPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-2-1-base", controlnet=controlnet, torch_dtype=torch.float16\n ... )\n >>> pipe.enable_model_cpu_offload()\n\n >>> # get canny image\n >>> image = np.array(image)\n >>> image = cv2.Canny(image, 100, 200)\n >>> image = image[:, :, None]\n >>> image = np.concatenate([image, image, image], axis=2)\n >>> canny_image = Image.fromarray(image)\n >>> # generate image\n >>> image = pipe(\n ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image\n ... ).images[0]\n ```\n' class StableDiffusionControlNetXSPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: Union[UNet2DConditionModel, UNetControlNetXSModel], controlnet: ControlNetXSAdapter, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() if isinstance(unet, UNet2DConditionModel): unet = UNetControlNetXSModel.from_unet(unet, controlnet) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, image, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None): if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.unet, torch._dynamo.eval_frame.OptimizedModule) if isinstance(self.unet, UNetControlNetXSModel) or (is_compiled and isinstance(self.unet._orig_mod, UNetControlNetXSModel)): self.check_image(image, prompt, prompt_embeds) if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') else: assert False (start, end) = (control_guidance_start, control_guidance_end) if start >= end: raise ValueError(f'control guidance start: {start} cannot be larger or equal to control guidance end: {end}.') if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if not image_is_pil and (not image_is_tensor) and (not image_is_np) and (not image_is_pil_list) and (not image_is_tensor_list) and (not image_is_np_list): raise TypeError(f'image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}') if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError(f'If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}') def prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance: image = torch.cat([image] * 2) return image def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, control_guidance_start: float=0.0, control_guidance_end: float=1.0, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents']): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs unet = self.unet._orig_mod if is_compiled_module(self.unet) else self.unet self.check_inputs(prompt, image, negative_prompt, prompt_embeds, negative_prompt_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) image = self.prepare_image(image=image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=unet.dtype, do_classifier_free_guidance=do_classifier_free_guidance) (height, width) = image.shape[-2:] self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) is_controlnet_compiled = is_compiled_module(self.unet) is_torch_higher_equal_2_1 = is_torch_version('>=', '2.1') with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if is_controlnet_compiled and is_torch_higher_equal_2_1: torch._inductor.cudagraph_mark_step_begin() latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) apply_control = i / len(timesteps) >= control_guidance_start and (i + 1) / len(timesteps) <= control_guidance_end noise_pred = self.unet(sample=latent_model_input, timestep=t, encoder_hidden_states=prompt_embeds, controlnet_cond=image, conditioning_scale=controlnet_conditioning_scale, cross_attention_kwargs=cross_attention_kwargs, return_dict=True, apply_control=apply_control).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.unet.to('cpu') self.controlnet.to('cpu') torch.cuda.empty_cache() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers.utils.import_utils import is_invisible_watermark_available from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor from ..pipeline_utils import DiffusionPipeline from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> # !pip install opencv-python transformers accelerate\n >>> from diffusers import StableDiffusionXLControlNetXSPipeline, ControlNetXSAdapter, AutoencoderKL\n >>> from diffusers.utils import load_image\n >>> import numpy as np\n >>> import torch\n\n >>> import cv2\n >>> from PIL import Image\n\n >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"\n >>> negative_prompt = "low quality, bad quality, sketches"\n\n >>> # download an image\n >>> image = load_image(\n ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"\n ... )\n\n >>> # initialize the models and pipeline\n >>> controlnet_conditioning_scale = 0.5\n >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)\n >>> controlnet = ControlNetXSAdapter.from_pretrained(\n ... "UmerHA/Testing-ConrolNetXS-SDXL-canny", torch_dtype=torch.float16\n ... )\n >>> pipe = StableDiffusionXLControlNetXSPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16\n ... )\n >>> pipe.enable_model_cpu_offload()\n\n >>> # get canny image\n >>> image = np.array(image)\n >>> image = cv2.Canny(image, 100, 200)\n >>> image = image[:, :, None]\n >>> image = np.concatenate([image, image, image], axis=2)\n >>> canny_image = Image.fromarray(image)\n\n >>> # generate image\n >>> image = pipe(\n ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image\n ... ).images[0]\n ```\n' class StableDiffusionXLControlNetXSPipeline(DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'negative_add_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: Union[UNet2DConditionModel, UNetControlNetXSModel], controlnet: ControlNetXSAdapter, scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None, feature_extractor: CLIPImageProcessor=None): super().__init__() if isinstance(unet, UNet2DConditionModel): unet = UNetControlNetXSModel.from_unet(unet, controlnet) self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, controlnet=controlnet, scheduler=scheduler, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, prompt_2, image, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None): if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.unet, torch._dynamo.eval_frame.OptimizedModule) if isinstance(self.unet, UNetControlNetXSModel) or (is_compiled and isinstance(self.unet._orig_mod, UNetControlNetXSModel)): self.check_image(image, prompt, prompt_embeds) if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') else: assert False (start, end) = (control_guidance_start, control_guidance_end) if start >= end: raise ValueError(f'control guidance start: {start} cannot be larger or equal to control guidance end: {end}.') if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if not image_is_pil and (not image_is_tensor) and (not image_is_np) and (not image_is_pil_list) and (not image_is_tensor_list) and (not image_is_np_list): raise TypeError(f'image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}') if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError(f'If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}') def prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance: image = torch.cat([image] * 2) return image def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.base_add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, control_guidance_start: float=0.0, control_guidance_end: float=1.0, original_size: Tuple[int, int]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Tuple[int, int]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents']): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs unet = self.unet._orig_mod if is_compiled_module(self.unet) else self.unet self.check_inputs(prompt, prompt_2, image, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt, prompt_2, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip) if isinstance(unet, UNetControlNetXSModel): image = self.prepare_image(image=image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=unet.dtype, do_classifier_free_guidance=do_classifier_free_guidance) (height, width) = image.shape[-2:] else: assert False self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if isinstance(image, list): original_size = original_size or image[0].shape[-2:] else: original_size = original_size or image.shape[-2:] target_size = target_size or (height, width) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids(negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) else: negative_add_time_ids = add_time_ids if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) is_controlnet_compiled = is_compiled_module(self.unet) is_torch_higher_equal_2_1 = is_torch_version('>=', '2.1') with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if is_controlnet_compiled and is_torch_higher_equal_2_1: torch._inductor.cudagraph_mark_step_begin() latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} apply_control = i / len(timesteps) >= control_guidance_start and (i + 1) / len(timesteps) <= control_guidance_end noise_pred = self.unet(sample=latent_model_input, timestep=t, encoder_hidden_states=prompt_embeds, controlnet_cond=image, conditioning_scale=controlnet_conditioning_scale, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=True, apply_control=apply_control).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == 'latent': if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/dance_diffusion/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {'pipeline_dance_diffusion': ['DanceDiffusionPipeline']} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_dance_diffusion import DanceDiffusionPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: diffusers-main/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py from typing import List, Optional, Tuple, Union import torch from ...utils import logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline logger = logging.get_logger(__name__) class DanceDiffusionPipeline(DiffusionPipeline): model_cpu_offload_seq = 'unet' def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__(self, batch_size: int=1, num_inference_steps: int=100, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, audio_length_in_s: Optional[float]=None, return_dict: bool=True) -> Union[AudioPipelineOutput, Tuple]: if audio_length_in_s is None: audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate sample_size = audio_length_in_s * self.unet.config.sample_rate down_scale_factor = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError(f"{audio_length_in_s} is too small. Make sure it's bigger or equal to {3 * down_scale_factor / self.unet.config.sample_rate}.") original_sample_size = int(sample_size) if sample_size % down_scale_factor != 0: sample_size = (audio_length_in_s * self.unet.config.sample_rate // down_scale_factor + 1) * down_scale_factor logger.info(f'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising process.') sample_size = int(sample_size) dtype = next(self.unet.parameters()).dtype shape = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') audio = randn_tensor(shape, generator=generator, device=self._execution_device, dtype=dtype) self.scheduler.set_timesteps(num_inference_steps, device=audio.device) self.scheduler.timesteps = self.scheduler.timesteps.to(dtype) for t in self.progress_bar(self.scheduler.timesteps): model_output = self.unet(audio, t).sample audio = self.scheduler.step(model_output, t, audio).prev_sample audio = audio.clamp(-1, 1).float().cpu().numpy() audio = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=audio) # File: diffusers-main/src/diffusers/pipelines/ddim/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {'pipeline_ddim': ['DDIMPipeline']} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_ddim import DDIMPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: diffusers-main/src/diffusers/pipelines/ddim/pipeline_ddim.py from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class DDIMPipeline(DiffusionPipeline): model_cpu_offload_seq = 'unet' def __init__(self, unet, scheduler): super().__init__() scheduler = DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__(self, batch_size: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, eta: float=0.0, num_inference_steps: int=50, use_clipped_model_output: Optional[bool]=None, output_type: Optional[str]='pil', return_dict: bool=True) -> Union[ImagePipelineOutput, Tuple]: if isinstance(self.unet.config.sample_size, int): image_shape = (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) else: image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') image = randn_tensor(image_shape, generator=generator, device=self._execution_device, dtype=self.unet.dtype) self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): model_output = self.unet(image, t).sample image = self.scheduler.step(model_output, t, image, eta=eta, use_clipped_model_output=use_clipped_model_output, generator=generator).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/ddpm/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {'pipeline_ddpm': ['DDPMPipeline']} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_ddpm import DDPMPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: diffusers-main/src/diffusers/pipelines/ddpm/pipeline_ddpm.py from typing import List, Optional, Tuple, Union import torch from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class DDPMPipeline(DiffusionPipeline): model_cpu_offload_seq = 'unet' def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__(self, batch_size: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, num_inference_steps: int=1000, output_type: Optional[str]='pil', return_dict: bool=True) -> Union[ImagePipelineOutput, Tuple]: if isinstance(self.unet.config.sample_size, int): image_shape = (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) else: image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if self.device.type == 'mps': image = randn_tensor(image_shape, generator=generator) image = image.to(self.device) else: image = randn_tensor(image_shape, generator=generator, device=self.device) self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): model_output = self.unet(image, t).sample image = self.scheduler.step(model_output, t, image, generator=generator).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/deepfloyd_if/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {'timesteps': ['fast27_timesteps', 'smart100_timesteps', 'smart185_timesteps', 'smart27_timesteps', 'smart50_timesteps', 'super100_timesteps', 'super27_timesteps', 'super40_timesteps']} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_if'] = ['IFPipeline'] _import_structure['pipeline_if_img2img'] = ['IFImg2ImgPipeline'] _import_structure['pipeline_if_img2img_superresolution'] = ['IFImg2ImgSuperResolutionPipeline'] _import_structure['pipeline_if_inpainting'] = ['IFInpaintingPipeline'] _import_structure['pipeline_if_inpainting_superresolution'] = ['IFInpaintingSuperResolutionPipeline'] _import_structure['pipeline_if_superresolution'] = ['IFSuperResolutionPipeline'] _import_structure['pipeline_output'] = ['IFPipelineOutput'] _import_structure['safety_checker'] = ['IFSafetyChecker'] _import_structure['watermark'] = ['IFWatermarker'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_if import IFPipeline from .pipeline_if_img2img import IFImg2ImgPipeline from .pipeline_if_img2img_superresolution import IFImg2ImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .pipeline_output import IFPipelineOutput from .safety_checker import IFSafetyChecker from .timesteps import fast27_timesteps, smart27_timesteps, smart50_timesteps, smart100_timesteps, smart185_timesteps, super27_timesteps, super40_timesteps, super100_timesteps from .watermark import IFWatermarker else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/deepfloyd_if/pipeline_if.py import html import inspect import re import urllib.parse as ul from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer from ...loaders import StableDiffusionLoraLoaderMixin from ...models import UNet2DConditionModel from ...schedulers import DDPMScheduler from ...utils import BACKENDS_MAPPING, is_bs4_available, is_ftfy_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import IFPipelineOutput from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker logger = logging.get_logger(__name__) if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import IFPipeline, IFSuperResolutionPipeline, DiffusionPipeline\n >>> from diffusers.utils import pt_to_pil\n >>> import torch\n\n >>> pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)\n >>> pipe.enable_model_cpu_offload()\n\n >>> prompt = \'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"\'\n >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)\n\n >>> image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt").images\n\n >>> # save intermediate image\n >>> pil_image = pt_to_pil(image)\n >>> pil_image[0].save("./if_stage_I.png")\n\n >>> super_res_1_pipe = IFSuperResolutionPipeline.from_pretrained(\n ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16\n ... )\n >>> super_res_1_pipe.enable_model_cpu_offload()\n\n >>> image = super_res_1_pipe(\n ... image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt"\n ... ).images\n\n >>> # save intermediate image\n >>> pil_image = pt_to_pil(image)\n >>> pil_image[0].save("./if_stage_I.png")\n\n >>> safety_modules = {\n ... "feature_extractor": pipe.feature_extractor,\n ... "safety_checker": pipe.safety_checker,\n ... "watermarker": pipe.watermarker,\n ... }\n >>> super_res_2_pipe = DiffusionPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16\n ... )\n >>> super_res_2_pipe.enable_model_cpu_offload()\n\n >>> image = super_res_2_pipe(\n ... prompt=prompt,\n ... image=image,\n ... ).images\n >>> image[0].save("./if_stage_II.png")\n ```\n' class IFPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): tokenizer: T5Tokenizer text_encoder: T5EncoderModel unet: UNet2DConditionModel scheduler: DDPMScheduler feature_extractor: Optional[CLIPImageProcessor] safety_checker: Optional[IFSafetyChecker] watermarker: Optional[IFWatermarker] bad_punct_regex = re.compile('[' + '#®•©™&@·º½¾¿¡§~' + '\\)' + '\\(' + '\\]' + '\\[' + '\\}' + '\\{' + '\\|' + '\\' + '\\/' + '\\*' + ']{1,}') _optional_components = ['tokenizer', 'text_encoder', 'safety_checker', 'feature_extractor', 'watermarker'] model_cpu_offload_seq = 'text_encoder->unet' _exclude_from_cpu_offload = ['watermarker'] def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, unet: UNet2DConditionModel, scheduler: DDPMScheduler, safety_checker: Optional[IFSafetyChecker], feature_extractor: Optional[CLIPImageProcessor], watermarker: Optional[IFWatermarker], requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the IF license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, watermarker=watermarker) self.register_to_config(requires_safety_checker=requires_safety_checker) @torch.no_grad() def encode_prompt(self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool=True, num_images_per_prompt: int=1, device: Optional[torch.device]=None, negative_prompt: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, clean_caption: bool=False): if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = 77 if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {max_length} tokens: {removed_text}') attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.unet is not None: dtype = self.unet.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) else: negative_prompt_embeds = None return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='pt').to(device) (image, nsfw_detected, watermark_detected) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype=dtype)) else: nsfw_detected = None watermark_detected = None return (image, nsfw_detected, watermark_detected) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_intermediate_images(self, batch_size, num_channels, height, width, dtype, device, generator): shape = (batch_size, num_channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') intermediate_images = randn_tensor(shape, generator=generator, device=device, dtype=dtype) intermediate_images = intermediate_images * self.scheduler.init_noise_sigma return intermediate_images def _text_preprocessing(self, text, clean_caption=False): if clean_caption and (not is_bs4_available()): logger.warning(BACKENDS_MAPPING['bs4'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if clean_caption and (not is_ftfy_available()): logger.warning(BACKENDS_MAPPING['ftfy'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub('', 'person', caption) caption = re.sub('\\b((?:https?:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = re.sub('\\b((?:www:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = BeautifulSoup(caption, features='html.parser').text caption = re.sub('@[\\w\\d]+\\b', '', caption) caption = re.sub('[\\u31c0-\\u31ef]+', '', caption) caption = re.sub('[\\u31f0-\\u31ff]+', '', caption) caption = re.sub('[\\u3200-\\u32ff]+', '', caption) caption = re.sub('[\\u3300-\\u33ff]+', '', caption) caption = re.sub('[\\u3400-\\u4dbf]+', '', caption) caption = re.sub('[\\u4dc0-\\u4dff]+', '', caption) caption = re.sub('[\\u4e00-\\u9fff]+', '', caption) caption = re.sub('[\\u002D\\u058A\\u05BE\\u1400\\u1806\\u2010-\\u2015\\u2E17\\u2E1A\\u2E3A\\u2E3B\\u2E40\\u301C\\u3030\\u30A0\\uFE31\\uFE32\\uFE58\\uFE63\\uFF0D]+', '-', caption) caption = re.sub('[`´«»“”¨]', '"', caption) caption = re.sub('[‘’]', "'", caption) caption = re.sub('"?', '', caption) caption = re.sub('&', '', caption) caption = re.sub('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', ' ', caption) caption = re.sub('\\d:\\d\\d\\s+$', '', caption) caption = re.sub('\\\\n', ' ', caption) caption = re.sub('#\\d{1,3}\\b', '', caption) caption = re.sub('#\\d{5,}\\b', '', caption) caption = re.sub('\\b\\d{6,}\\b', '', caption) caption = re.sub('[\\S]+\\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption) caption = re.sub('[\\"\\\']{2,}', '"', caption) caption = re.sub('[\\.]{2,}', ' ', caption) caption = re.sub(self.bad_punct_regex, ' ', caption) caption = re.sub('\\s+\\.\\s+', ' ', caption) regex2 = re.compile('(?:\\-|\\_)') if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, ' ', caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub('\\b[a-zA-Z]{1,3}\\d{3,15}\\b', '', caption) caption = re.sub('\\b[a-zA-Z]+\\d+[a-zA-Z]+\\b', '', caption) caption = re.sub('\\b\\d+[a-zA-Z]+\\d+\\b', '', caption) caption = re.sub('(worldwide\\s+)?(free\\s+)?shipping', '', caption) caption = re.sub('(free\\s)?download(\\sfree)?', '', caption) caption = re.sub('\\bclick\\b\\s(?:for|on)\\s\\w+', '', caption) caption = re.sub('\\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\\simage[s]?)?', '', caption) caption = re.sub('\\bpage\\s+\\d+\\b', '', caption) caption = re.sub('\\b\\d*[a-zA-Z]+\\d+[a-zA-Z]+\\d+[a-zA-Z\\d]*\\b', ' ', caption) caption = re.sub('\\b\\d+\\.?\\d*[xх×]\\d+\\.?\\d*\\b', '', caption) caption = re.sub('\\b\\s+\\:\\s+', ': ', caption) caption = re.sub('(\\D[,\\./])\\b', '\\1 ', caption) caption = re.sub('\\s+', ' ', caption) caption.strip() caption = re.sub('^[\\"\\\']([\\w\\W]+)[\\"\\\']$', '\\1', caption) caption = re.sub("^[\\'\\_,\\-\\:;]", '', caption) caption = re.sub("[\\'\\_,\\-\\:\\-\\+]$", '', caption) caption = re.sub('^\\.\\S+$', '', caption) return caption.strip() @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, num_inference_steps: int=100, timesteps: List[int]=None, guidance_scale: float=7.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, height: Optional[int]=None, width: Optional[int]=None, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, clean_caption: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None): self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) height = height or self.unet.config.sample_size width = width or self.unet.config.sample_size if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, device=device, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clean_caption=clean_caption) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(0) intermediate_images = self.prepare_intermediate_images(batch_size * num_images_per_prompt, self.unet.config.in_channels, height, width, prompt_embeds.dtype, device, generator) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if hasattr(self, 'text_encoder_offload_hook') and self.text_encoder_offload_hook is not None: self.text_encoder_offload_hook.offload() num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): model_input = torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images model_input = self.scheduler.scale_model_input(model_input, t) noise_pred = self.unet(model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (noise_pred_uncond, _) = noise_pred_uncond.split(model_input.shape[1], dim=1) (noise_pred_text, predicted_variance) = noise_pred_text.split(model_input.shape[1], dim=1) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if self.scheduler.config.variance_type not in ['learned', 'learned_range']: (noise_pred, _) = noise_pred.split(model_input.shape[1], dim=1) intermediate_images = self.scheduler.step(noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False)[0] if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, intermediate_images) image = intermediate_images if output_type == 'pil': image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() (image, nsfw_detected, watermark_detected) = self.run_safety_checker(image, device, prompt_embeds.dtype) image = self.numpy_to_pil(image) if self.watermarker is not None: image = self.watermarker.apply_watermark(image, self.unet.config.sample_size) elif output_type == 'pt': nsfw_detected = None watermark_detected = None if hasattr(self, 'unet_offload_hook') and self.unet_offload_hook is not None: self.unet_offload_hook.offload() else: image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() (image, nsfw_detected, watermark_detected) = self.run_safety_checker(image, device, prompt_embeds.dtype) self.maybe_free_model_hooks() if not return_dict: return (image, nsfw_detected, watermark_detected) return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) # File: diffusers-main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img.py import html import inspect import re import urllib.parse as ul from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer from ...loaders import StableDiffusionLoraLoaderMixin from ...models import UNet2DConditionModel from ...schedulers import DDPMScheduler from ...utils import BACKENDS_MAPPING, PIL_INTERPOLATION, is_bs4_available, is_ftfy_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import IFPipelineOutput from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker logger = logging.get_logger(__name__) if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image: (w, h) = images.size coef = w / h (w, h) = (img_size, img_size) if coef >= 1: w = int(round(img_size / 8 * coef) * 8) else: h = int(round(img_size / 8 / coef) * 8) images = images.resize((w, h), resample=PIL_INTERPOLATION['bicubic'], reducing_gap=None) return images EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline\n >>> from diffusers.utils import pt_to_pil\n >>> import torch\n >>> from PIL import Image\n >>> import requests\n >>> from io import BytesIO\n\n >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"\n >>> response = requests.get(url)\n >>> original_image = Image.open(BytesIO(response.content)).convert("RGB")\n >>> original_image = original_image.resize((768, 512))\n\n >>> pipe = IFImg2ImgPipeline.from_pretrained(\n ... "DeepFloyd/IF-I-XL-v1.0",\n ... variant="fp16",\n ... torch_dtype=torch.float16,\n ... )\n >>> pipe.enable_model_cpu_offload()\n\n >>> prompt = "A fantasy landscape in style minecraft"\n >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)\n\n >>> image = pipe(\n ... image=original_image,\n ... prompt_embeds=prompt_embeds,\n ... negative_prompt_embeds=negative_embeds,\n ... output_type="pt",\n ... ).images\n\n >>> # save intermediate image\n >>> pil_image = pt_to_pil(image)\n >>> pil_image[0].save("./if_stage_I.png")\n\n >>> super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained(\n ... "DeepFloyd/IF-II-L-v1.0",\n ... text_encoder=None,\n ... variant="fp16",\n ... torch_dtype=torch.float16,\n ... )\n >>> super_res_1_pipe.enable_model_cpu_offload()\n\n >>> image = super_res_1_pipe(\n ... image=image,\n ... original_image=original_image,\n ... prompt_embeds=prompt_embeds,\n ... negative_prompt_embeds=negative_embeds,\n ... ).images\n >>> image[0].save("./if_stage_II.png")\n ```\n' class IFImg2ImgPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): tokenizer: T5Tokenizer text_encoder: T5EncoderModel unet: UNet2DConditionModel scheduler: DDPMScheduler feature_extractor: Optional[CLIPImageProcessor] safety_checker: Optional[IFSafetyChecker] watermarker: Optional[IFWatermarker] bad_punct_regex = re.compile('[' + '#®•©™&@·º½¾¿¡§~' + '\\)' + '\\(' + '\\]' + '\\[' + '\\}' + '\\{' + '\\|' + '\\' + '\\/' + '\\*' + ']{1,}') _optional_components = ['tokenizer', 'text_encoder', 'safety_checker', 'feature_extractor', 'watermarker'] model_cpu_offload_seq = 'text_encoder->unet' _exclude_from_cpu_offload = ['watermarker'] def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, unet: UNet2DConditionModel, scheduler: DDPMScheduler, safety_checker: Optional[IFSafetyChecker], feature_extractor: Optional[CLIPImageProcessor], watermarker: Optional[IFWatermarker], requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the IF license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, watermarker=watermarker) self.register_to_config(requires_safety_checker=requires_safety_checker) @torch.no_grad() def encode_prompt(self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool=True, num_images_per_prompt: int=1, device: Optional[torch.device]=None, negative_prompt: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, clean_caption: bool=False): if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = 77 if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {max_length} tokens: {removed_text}') attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.unet is not None: dtype = self.unet.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) else: negative_prompt_embeds = None return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='pt').to(device) (image, nsfw_detected, watermark_detected) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype=dtype)) else: nsfw_detected = None watermark_detected = None return (image, nsfw_detected, watermark_detected) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, image, batch_size, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if isinstance(image, list): check_image_type = image[0] else: check_image_type = image if not isinstance(check_image_type, torch.Tensor) and (not isinstance(check_image_type, PIL.Image.Image)) and (not isinstance(check_image_type, np.ndarray)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is {type(check_image_type)}') if isinstance(image, list): image_batch_size = len(image) elif isinstance(image, torch.Tensor): image_batch_size = image.shape[0] elif isinstance(image, PIL.Image.Image): image_batch_size = 1 elif isinstance(image, np.ndarray): image_batch_size = image.shape[0] else: assert False if batch_size != image_batch_size: raise ValueError(f'image batch size: {image_batch_size} must be same as prompt batch size {batch_size}') def _text_preprocessing(self, text, clean_caption=False): if clean_caption and (not is_bs4_available()): logger.warning(BACKENDS_MAPPING['bs4'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if clean_caption and (not is_ftfy_available()): logger.warning(BACKENDS_MAPPING['ftfy'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub('', 'person', caption) caption = re.sub('\\b((?:https?:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = re.sub('\\b((?:www:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = BeautifulSoup(caption, features='html.parser').text caption = re.sub('@[\\w\\d]+\\b', '', caption) caption = re.sub('[\\u31c0-\\u31ef]+', '', caption) caption = re.sub('[\\u31f0-\\u31ff]+', '', caption) caption = re.sub('[\\u3200-\\u32ff]+', '', caption) caption = re.sub('[\\u3300-\\u33ff]+', '', caption) caption = re.sub('[\\u3400-\\u4dbf]+', '', caption) caption = re.sub('[\\u4dc0-\\u4dff]+', '', caption) caption = re.sub('[\\u4e00-\\u9fff]+', '', caption) caption = re.sub('[\\u002D\\u058A\\u05BE\\u1400\\u1806\\u2010-\\u2015\\u2E17\\u2E1A\\u2E3A\\u2E3B\\u2E40\\u301C\\u3030\\u30A0\\uFE31\\uFE32\\uFE58\\uFE63\\uFF0D]+', '-', caption) caption = re.sub('[`´«»“”¨]', '"', caption) caption = re.sub('[‘’]', "'", caption) caption = re.sub('"?', '', caption) caption = re.sub('&', '', caption) caption = re.sub('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', ' ', caption) caption = re.sub('\\d:\\d\\d\\s+$', '', caption) caption = re.sub('\\\\n', ' ', caption) caption = re.sub('#\\d{1,3}\\b', '', caption) caption = re.sub('#\\d{5,}\\b', '', caption) caption = re.sub('\\b\\d{6,}\\b', '', caption) caption = re.sub('[\\S]+\\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption) caption = re.sub('[\\"\\\']{2,}', '"', caption) caption = re.sub('[\\.]{2,}', ' ', caption) caption = re.sub(self.bad_punct_regex, ' ', caption) caption = re.sub('\\s+\\.\\s+', ' ', caption) regex2 = re.compile('(?:\\-|\\_)') if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, ' ', caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub('\\b[a-zA-Z]{1,3}\\d{3,15}\\b', '', caption) caption = re.sub('\\b[a-zA-Z]+\\d+[a-zA-Z]+\\b', '', caption) caption = re.sub('\\b\\d+[a-zA-Z]+\\d+\\b', '', caption) caption = re.sub('(worldwide\\s+)?(free\\s+)?shipping', '', caption) caption = re.sub('(free\\s)?download(\\sfree)?', '', caption) caption = re.sub('\\bclick\\b\\s(?:for|on)\\s\\w+', '', caption) caption = re.sub('\\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\\simage[s]?)?', '', caption) caption = re.sub('\\bpage\\s+\\d+\\b', '', caption) caption = re.sub('\\b\\d*[a-zA-Z]+\\d+[a-zA-Z]+\\d+[a-zA-Z\\d]*\\b', ' ', caption) caption = re.sub('\\b\\d+\\.?\\d*[xх×]\\d+\\.?\\d*\\b', '', caption) caption = re.sub('\\b\\s+\\:\\s+', ': ', caption) caption = re.sub('(\\D[,\\./])\\b', '\\1 ', caption) caption = re.sub('\\s+', ' ', caption) caption.strip() caption = re.sub('^[\\"\\\']([\\w\\W]+)[\\"\\\']$', '\\1', caption) caption = re.sub("^[\\'\\_,\\-\\:;]", '', caption) caption = re.sub("[\\'\\_,\\-\\:\\-\\+]$", '', caption) caption = re.sub('^\\.\\S+$', '', caption) return caption.strip() def preprocess_image(self, image: PIL.Image.Image) -> torch.Tensor: if not isinstance(image, list): image = [image] def numpy_to_pt(images): if images.ndim == 3: images = images[..., None] images = torch.from_numpy(images.transpose(0, 3, 1, 2)) return images if isinstance(image[0], PIL.Image.Image): new_image = [] for image_ in image: image_ = image_.convert('RGB') image_ = resize(image_, self.unet.config.sample_size) image_ = np.array(image_) image_ = image_.astype(np.float32) image_ = image_ / 127.5 - 1 new_image.append(image_) image = new_image image = np.stack(image, axis=0) image = numpy_to_pt(image) elif isinstance(image[0], np.ndarray): image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) image = numpy_to_pt(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) return image def get_timesteps(self, num_inference_steps, strength): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def prepare_intermediate_images(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): (_, channels, height, width) = image.shape batch_size = batch_size * num_images_per_prompt shape = (batch_size, channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) image = image.repeat_interleave(num_images_per_prompt, dim=0) image = self.scheduler.add_noise(image, noise, timestep) return image @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: Union[PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]]=None, strength: float=0.7, num_inference_steps: int=80, timesteps: List[int]=None, guidance_scale: float=10.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, clean_caption: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] self.check_inputs(prompt, image, batch_size, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, device=device, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clean_caption=clean_caption) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) dtype = prompt_embeds.dtype if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength) image = self.preprocess_image(image) image = image.to(device=device, dtype=dtype) noise_timestep = timesteps[0:1] noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt) intermediate_images = self.prepare_intermediate_images(image, noise_timestep, batch_size, num_images_per_prompt, dtype, device, generator) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if hasattr(self, 'text_encoder_offload_hook') and self.text_encoder_offload_hook is not None: self.text_encoder_offload_hook.offload() num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): model_input = torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images model_input = self.scheduler.scale_model_input(model_input, t) noise_pred = self.unet(model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (noise_pred_uncond, _) = noise_pred_uncond.split(model_input.shape[1], dim=1) (noise_pred_text, predicted_variance) = noise_pred_text.split(model_input.shape[1], dim=1) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if self.scheduler.config.variance_type not in ['learned', 'learned_range']: (noise_pred, _) = noise_pred.split(model_input.shape[1], dim=1) intermediate_images = self.scheduler.step(noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False)[0] if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, intermediate_images) image = intermediate_images if output_type == 'pil': image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() (image, nsfw_detected, watermark_detected) = self.run_safety_checker(image, device, prompt_embeds.dtype) image = self.numpy_to_pil(image) if self.watermarker is not None: self.watermarker.apply_watermark(image, self.unet.config.sample_size) elif output_type == 'pt': nsfw_detected = None watermark_detected = None if hasattr(self, 'unet_offload_hook') and self.unet_offload_hook is not None: self.unet_offload_hook.offload() else: image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() (image, nsfw_detected, watermark_detected) = self.run_safety_checker(image, device, prompt_embeds.dtype) self.maybe_free_model_hooks() if not return_dict: return (image, nsfw_detected, watermark_detected) return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) # File: diffusers-main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py import html import inspect import re import urllib.parse as ul from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer from ...loaders import StableDiffusionLoraLoaderMixin from ...models import UNet2DConditionModel from ...schedulers import DDPMScheduler from ...utils import BACKENDS_MAPPING, PIL_INTERPOLATION, is_bs4_available, is_ftfy_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import IFPipelineOutput from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy logger = logging.get_logger(__name__) def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image: (w, h) = images.size coef = w / h (w, h) = (img_size, img_size) if coef >= 1: w = int(round(img_size / 8 * coef) * 8) else: h = int(round(img_size / 8 / coef) * 8) images = images.resize((w, h), resample=PIL_INTERPOLATION['bicubic'], reducing_gap=None) return images EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline\n >>> from diffusers.utils import pt_to_pil\n >>> import torch\n >>> from PIL import Image\n >>> import requests\n >>> from io import BytesIO\n\n >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"\n >>> response = requests.get(url)\n >>> original_image = Image.open(BytesIO(response.content)).convert("RGB")\n >>> original_image = original_image.resize((768, 512))\n\n >>> pipe = IFImg2ImgPipeline.from_pretrained(\n ... "DeepFloyd/IF-I-XL-v1.0",\n ... variant="fp16",\n ... torch_dtype=torch.float16,\n ... )\n >>> pipe.enable_model_cpu_offload()\n\n >>> prompt = "A fantasy landscape in style minecraft"\n >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)\n\n >>> image = pipe(\n ... image=original_image,\n ... prompt_embeds=prompt_embeds,\n ... negative_prompt_embeds=negative_embeds,\n ... output_type="pt",\n ... ).images\n\n >>> # save intermediate image\n >>> pil_image = pt_to_pil(image)\n >>> pil_image[0].save("./if_stage_I.png")\n\n >>> super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained(\n ... "DeepFloyd/IF-II-L-v1.0",\n ... text_encoder=None,\n ... variant="fp16",\n ... torch_dtype=torch.float16,\n ... )\n >>> super_res_1_pipe.enable_model_cpu_offload()\n\n >>> image = super_res_1_pipe(\n ... image=image,\n ... original_image=original_image,\n ... prompt_embeds=prompt_embeds,\n ... negative_prompt_embeds=negative_embeds,\n ... ).images\n >>> image[0].save("./if_stage_II.png")\n ```\n' class IFImg2ImgSuperResolutionPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): tokenizer: T5Tokenizer text_encoder: T5EncoderModel unet: UNet2DConditionModel scheduler: DDPMScheduler image_noising_scheduler: DDPMScheduler feature_extractor: Optional[CLIPImageProcessor] safety_checker: Optional[IFSafetyChecker] watermarker: Optional[IFWatermarker] bad_punct_regex = re.compile('[' + '#®•©™&@·º½¾¿¡§~' + '\\)' + '\\(' + '\\]' + '\\[' + '\\}' + '\\{' + '\\|' + '\\' + '\\/' + '\\*' + ']{1,}') _optional_components = ['tokenizer', 'text_encoder', 'safety_checker', 'feature_extractor'] model_cpu_offload_seq = 'text_encoder->unet' _exclude_from_cpu_offload = ['watermarker'] def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, unet: UNet2DConditionModel, scheduler: DDPMScheduler, image_noising_scheduler: DDPMScheduler, safety_checker: Optional[IFSafetyChecker], feature_extractor: Optional[CLIPImageProcessor], watermarker: Optional[IFWatermarker], requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the IF license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") if unet.config.in_channels != 6: logger.warning("It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`.") self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, image_noising_scheduler=image_noising_scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, watermarker=watermarker) self.register_to_config(requires_safety_checker=requires_safety_checker) def _text_preprocessing(self, text, clean_caption=False): if clean_caption and (not is_bs4_available()): logger.warning(BACKENDS_MAPPING['bs4'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if clean_caption and (not is_ftfy_available()): logger.warning(BACKENDS_MAPPING['ftfy'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub('', 'person', caption) caption = re.sub('\\b((?:https?:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = re.sub('\\b((?:www:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = BeautifulSoup(caption, features='html.parser').text caption = re.sub('@[\\w\\d]+\\b', '', caption) caption = re.sub('[\\u31c0-\\u31ef]+', '', caption) caption = re.sub('[\\u31f0-\\u31ff]+', '', caption) caption = re.sub('[\\u3200-\\u32ff]+', '', caption) caption = re.sub('[\\u3300-\\u33ff]+', '', caption) caption = re.sub('[\\u3400-\\u4dbf]+', '', caption) caption = re.sub('[\\u4dc0-\\u4dff]+', '', caption) caption = re.sub('[\\u4e00-\\u9fff]+', '', caption) caption = re.sub('[\\u002D\\u058A\\u05BE\\u1400\\u1806\\u2010-\\u2015\\u2E17\\u2E1A\\u2E3A\\u2E3B\\u2E40\\u301C\\u3030\\u30A0\\uFE31\\uFE32\\uFE58\\uFE63\\uFF0D]+', '-', caption) caption = re.sub('[`´«»“”¨]', '"', caption) caption = re.sub('[‘’]', "'", caption) caption = re.sub('"?', '', caption) caption = re.sub('&', '', caption) caption = re.sub('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', ' ', caption) caption = re.sub('\\d:\\d\\d\\s+$', '', caption) caption = re.sub('\\\\n', ' ', caption) caption = re.sub('#\\d{1,3}\\b', '', caption) caption = re.sub('#\\d{5,}\\b', '', caption) caption = re.sub('\\b\\d{6,}\\b', '', caption) caption = re.sub('[\\S]+\\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption) caption = re.sub('[\\"\\\']{2,}', '"', caption) caption = re.sub('[\\.]{2,}', ' ', caption) caption = re.sub(self.bad_punct_regex, ' ', caption) caption = re.sub('\\s+\\.\\s+', ' ', caption) regex2 = re.compile('(?:\\-|\\_)') if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, ' ', caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub('\\b[a-zA-Z]{1,3}\\d{3,15}\\b', '', caption) caption = re.sub('\\b[a-zA-Z]+\\d+[a-zA-Z]+\\b', '', caption) caption = re.sub('\\b\\d+[a-zA-Z]+\\d+\\b', '', caption) caption = re.sub('(worldwide\\s+)?(free\\s+)?shipping', '', caption) caption = re.sub('(free\\s)?download(\\sfree)?', '', caption) caption = re.sub('\\bclick\\b\\s(?:for|on)\\s\\w+', '', caption) caption = re.sub('\\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\\simage[s]?)?', '', caption) caption = re.sub('\\bpage\\s+\\d+\\b', '', caption) caption = re.sub('\\b\\d*[a-zA-Z]+\\d+[a-zA-Z]+\\d+[a-zA-Z\\d]*\\b', ' ', caption) caption = re.sub('\\b\\d+\\.?\\d*[xх×]\\d+\\.?\\d*\\b', '', caption) caption = re.sub('\\b\\s+\\:\\s+', ': ', caption) caption = re.sub('(\\D[,\\./])\\b', '\\1 ', caption) caption = re.sub('\\s+', ' ', caption) caption.strip() caption = re.sub('^[\\"\\\']([\\w\\W]+)[\\"\\\']$', '\\1', caption) caption = re.sub("^[\\'\\_,\\-\\:;]", '', caption) caption = re.sub("[\\'\\_,\\-\\:\\-\\+]$", '', caption) caption = re.sub('^\\.\\S+$', '', caption) return caption.strip() @torch.no_grad() def encode_prompt(self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool=True, num_images_per_prompt: int=1, device: Optional[torch.device]=None, negative_prompt: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, clean_caption: bool=False): if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = 77 if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {max_length} tokens: {removed_text}') attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.unet is not None: dtype = self.unet.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) else: negative_prompt_embeds = None return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='pt').to(device) (image, nsfw_detected, watermark_detected) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype=dtype)) else: nsfw_detected = None watermark_detected = None return (image, nsfw_detected, watermark_detected) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, image, original_image, batch_size, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if isinstance(image, list): check_image_type = image[0] else: check_image_type = image if not isinstance(check_image_type, torch.Tensor) and (not isinstance(check_image_type, PIL.Image.Image)) and (not isinstance(check_image_type, np.ndarray)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is {type(check_image_type)}') if isinstance(image, list): image_batch_size = len(image) elif isinstance(image, torch.Tensor): image_batch_size = image.shape[0] elif isinstance(image, PIL.Image.Image): image_batch_size = 1 elif isinstance(image, np.ndarray): image_batch_size = image.shape[0] else: assert False if batch_size != image_batch_size: raise ValueError(f'image batch size: {image_batch_size} must be same as prompt batch size {batch_size}') if isinstance(original_image, list): check_image_type = original_image[0] else: check_image_type = original_image if not isinstance(check_image_type, torch.Tensor) and (not isinstance(check_image_type, PIL.Image.Image)) and (not isinstance(check_image_type, np.ndarray)): raise ValueError(f'`original_image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is {type(check_image_type)}') if isinstance(original_image, list): image_batch_size = len(original_image) elif isinstance(original_image, torch.Tensor): image_batch_size = original_image.shape[0] elif isinstance(original_image, PIL.Image.Image): image_batch_size = 1 elif isinstance(original_image, np.ndarray): image_batch_size = original_image.shape[0] else: assert False if batch_size != image_batch_size: raise ValueError(f'original_image batch size: {image_batch_size} must be same as prompt batch size {batch_size}') def preprocess_original_image(self, image: PIL.Image.Image) -> torch.Tensor: if not isinstance(image, list): image = [image] def numpy_to_pt(images): if images.ndim == 3: images = images[..., None] images = torch.from_numpy(images.transpose(0, 3, 1, 2)) return images if isinstance(image[0], PIL.Image.Image): new_image = [] for image_ in image: image_ = image_.convert('RGB') image_ = resize(image_, self.unet.config.sample_size) image_ = np.array(image_) image_ = image_.astype(np.float32) image_ = image_ / 127.5 - 1 new_image.append(image_) image = new_image image = np.stack(image, axis=0) image = numpy_to_pt(image) elif isinstance(image[0], np.ndarray): image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) image = numpy_to_pt(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) return image def preprocess_image(self, image: PIL.Image.Image, num_images_per_prompt, device) -> torch.Tensor: if not isinstance(image, torch.Tensor) and (not isinstance(image, list)): image = [image] if isinstance(image[0], PIL.Image.Image): image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image] image = np.stack(image, axis=0) image = torch.from_numpy(image.transpose(0, 3, 1, 2)) elif isinstance(image[0], np.ndarray): image = np.stack(image, axis=0) if image.ndim == 5: image = image[0] image = torch.from_numpy(image.transpose(0, 3, 1, 2)) elif isinstance(image, list) and isinstance(image[0], torch.Tensor): dims = image[0].ndim if dims == 3: image = torch.stack(image, dim=0) elif dims == 4: image = torch.concat(image, dim=0) else: raise ValueError(f'Image must have 3 or 4 dimensions, instead got {dims}') image = image.to(device=device, dtype=self.unet.dtype) image = image.repeat_interleave(num_images_per_prompt, dim=0) return image def get_timesteps(self, num_inference_steps, strength): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def prepare_intermediate_images(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): (_, channels, height, width) = image.shape batch_size = batch_size * num_images_per_prompt shape = (batch_size, channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) image = image.repeat_interleave(num_images_per_prompt, dim=0) image = self.scheduler.add_noise(image, noise, timestep) return image @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, image: Union[PIL.Image.Image, np.ndarray, torch.Tensor], original_image: Union[PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]]=None, strength: float=0.8, prompt: Union[str, List[str]]=None, num_inference_steps: int=50, timesteps: List[int]=None, guidance_scale: float=4.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, noise_level: int=250, clean_caption: bool=True): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] self.check_inputs(prompt, image, original_image, batch_size, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) do_classifier_free_guidance = guidance_scale > 1.0 device = self._execution_device (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, device=device, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clean_caption=clean_caption) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) dtype = prompt_embeds.dtype if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength) original_image = self.preprocess_original_image(original_image) original_image = original_image.to(device=device, dtype=dtype) noise_timestep = timesteps[0:1] noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt) intermediate_images = self.prepare_intermediate_images(original_image, noise_timestep, batch_size, num_images_per_prompt, dtype, device, generator) (_, _, height, width) = original_image.shape image = self.preprocess_image(image, num_images_per_prompt, device) upscaled = F.interpolate(image, (height, width), mode='bilinear', align_corners=True) noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device) noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype) upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level) if do_classifier_free_guidance: noise_level = torch.cat([noise_level] * 2) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if hasattr(self, 'text_encoder_offload_hook') and self.text_encoder_offload_hook is not None: self.text_encoder_offload_hook.offload() num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): model_input = torch.cat([intermediate_images, upscaled], dim=1) model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input model_input = self.scheduler.scale_model_input(model_input, t) noise_pred = self.unet(model_input, t, encoder_hidden_states=prompt_embeds, class_labels=noise_level, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (noise_pred_uncond, _) = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1) (noise_pred_text, predicted_variance) = noise_pred_text.split(model_input.shape[1] // 2, dim=1) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if self.scheduler.config.variance_type not in ['learned', 'learned_range']: (noise_pred, _) = noise_pred.split(intermediate_images.shape[1], dim=1) intermediate_images = self.scheduler.step(noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False)[0] if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, intermediate_images) image = intermediate_images if output_type == 'pil': image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() (image, nsfw_detected, watermark_detected) = self.run_safety_checker(image, device, prompt_embeds.dtype) image = self.numpy_to_pil(image) if self.watermarker is not None: self.watermarker.apply_watermark(image, self.unet.config.sample_size) elif output_type == 'pt': nsfw_detected = None watermark_detected = None else: image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() (image, nsfw_detected, watermark_detected) = self.run_safety_checker(image, device, prompt_embeds.dtype) self.maybe_free_model_hooks() if not return_dict: return (image, nsfw_detected, watermark_detected) return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) # File: diffusers-main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting.py import html import inspect import re import urllib.parse as ul from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer from ...loaders import StableDiffusionLoraLoaderMixin from ...models import UNet2DConditionModel from ...schedulers import DDPMScheduler from ...utils import BACKENDS_MAPPING, PIL_INTERPOLATION, is_bs4_available, is_ftfy_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import IFPipelineOutput from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker logger = logging.get_logger(__name__) if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image: (w, h) = images.size coef = w / h (w, h) = (img_size, img_size) if coef >= 1: w = int(round(img_size / 8 * coef) * 8) else: h = int(round(img_size / 8 / coef) * 8) images = images.resize((w, h), resample=PIL_INTERPOLATION['bicubic'], reducing_gap=None) return images EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline\n >>> from diffusers.utils import pt_to_pil\n >>> import torch\n >>> from PIL import Image\n >>> import requests\n >>> from io import BytesIO\n\n >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png"\n >>> response = requests.get(url)\n >>> original_image = Image.open(BytesIO(response.content)).convert("RGB")\n >>> original_image = original_image\n\n >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png"\n >>> response = requests.get(url)\n >>> mask_image = Image.open(BytesIO(response.content))\n >>> mask_image = mask_image\n\n >>> pipe = IFInpaintingPipeline.from_pretrained(\n ... "DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16\n ... )\n >>> pipe.enable_model_cpu_offload()\n\n >>> prompt = "blue sunglasses"\n >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)\n\n >>> image = pipe(\n ... image=original_image,\n ... mask_image=mask_image,\n ... prompt_embeds=prompt_embeds,\n ... negative_prompt_embeds=negative_embeds,\n ... output_type="pt",\n ... ).images\n\n >>> # save intermediate image\n >>> pil_image = pt_to_pil(image)\n >>> pil_image[0].save("./if_stage_I.png")\n\n >>> super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained(\n ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16\n ... )\n >>> super_res_1_pipe.enable_model_cpu_offload()\n\n >>> image = super_res_1_pipe(\n ... image=image,\n ... mask_image=mask_image,\n ... original_image=original_image,\n ... prompt_embeds=prompt_embeds,\n ... negative_prompt_embeds=negative_embeds,\n ... ).images\n >>> image[0].save("./if_stage_II.png")\n ```\n' class IFInpaintingPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): tokenizer: T5Tokenizer text_encoder: T5EncoderModel unet: UNet2DConditionModel scheduler: DDPMScheduler feature_extractor: Optional[CLIPImageProcessor] safety_checker: Optional[IFSafetyChecker] watermarker: Optional[IFWatermarker] bad_punct_regex = re.compile('[' + '#®•©™&@·º½¾¿¡§~' + '\\)' + '\\(' + '\\]' + '\\[' + '\\}' + '\\{' + '\\|' + '\\' + '\\/' + '\\*' + ']{1,}') _optional_components = ['tokenizer', 'text_encoder', 'safety_checker', 'feature_extractor', 'watermarker'] model_cpu_offload_seq = 'text_encoder->unet' _exclude_from_cpu_offload = ['watermarker'] def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, unet: UNet2DConditionModel, scheduler: DDPMScheduler, safety_checker: Optional[IFSafetyChecker], feature_extractor: Optional[CLIPImageProcessor], watermarker: Optional[IFWatermarker], requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the IF license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, watermarker=watermarker) self.register_to_config(requires_safety_checker=requires_safety_checker) @torch.no_grad() def encode_prompt(self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool=True, num_images_per_prompt: int=1, device: Optional[torch.device]=None, negative_prompt: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, clean_caption: bool=False): if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = 77 if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {max_length} tokens: {removed_text}') attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.unet is not None: dtype = self.unet.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) else: negative_prompt_embeds = None return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='pt').to(device) (image, nsfw_detected, watermark_detected) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype=dtype)) else: nsfw_detected = None watermark_detected = None return (image, nsfw_detected, watermark_detected) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, image, mask_image, batch_size, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if isinstance(image, list): check_image_type = image[0] else: check_image_type = image if not isinstance(check_image_type, torch.Tensor) and (not isinstance(check_image_type, PIL.Image.Image)) and (not isinstance(check_image_type, np.ndarray)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is {type(check_image_type)}') if isinstance(image, list): image_batch_size = len(image) elif isinstance(image, torch.Tensor): image_batch_size = image.shape[0] elif isinstance(image, PIL.Image.Image): image_batch_size = 1 elif isinstance(image, np.ndarray): image_batch_size = image.shape[0] else: assert False if batch_size != image_batch_size: raise ValueError(f'image batch size: {image_batch_size} must be same as prompt batch size {batch_size}') if isinstance(mask_image, list): check_image_type = mask_image[0] else: check_image_type = mask_image if not isinstance(check_image_type, torch.Tensor) and (not isinstance(check_image_type, PIL.Image.Image)) and (not isinstance(check_image_type, np.ndarray)): raise ValueError(f'`mask_image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is {type(check_image_type)}') if isinstance(mask_image, list): image_batch_size = len(mask_image) elif isinstance(mask_image, torch.Tensor): image_batch_size = mask_image.shape[0] elif isinstance(mask_image, PIL.Image.Image): image_batch_size = 1 elif isinstance(mask_image, np.ndarray): image_batch_size = mask_image.shape[0] else: assert False if image_batch_size != 1 and batch_size != image_batch_size: raise ValueError(f'mask_image batch size: {image_batch_size} must be `1` or the same as prompt batch size {batch_size}') def _text_preprocessing(self, text, clean_caption=False): if clean_caption and (not is_bs4_available()): logger.warning(BACKENDS_MAPPING['bs4'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if clean_caption and (not is_ftfy_available()): logger.warning(BACKENDS_MAPPING['ftfy'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub('', 'person', caption) caption = re.sub('\\b((?:https?:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = re.sub('\\b((?:www:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = BeautifulSoup(caption, features='html.parser').text caption = re.sub('@[\\w\\d]+\\b', '', caption) caption = re.sub('[\\u31c0-\\u31ef]+', '', caption) caption = re.sub('[\\u31f0-\\u31ff]+', '', caption) caption = re.sub('[\\u3200-\\u32ff]+', '', caption) caption = re.sub('[\\u3300-\\u33ff]+', '', caption) caption = re.sub('[\\u3400-\\u4dbf]+', '', caption) caption = re.sub('[\\u4dc0-\\u4dff]+', '', caption) caption = re.sub('[\\u4e00-\\u9fff]+', '', caption) caption = re.sub('[\\u002D\\u058A\\u05BE\\u1400\\u1806\\u2010-\\u2015\\u2E17\\u2E1A\\u2E3A\\u2E3B\\u2E40\\u301C\\u3030\\u30A0\\uFE31\\uFE32\\uFE58\\uFE63\\uFF0D]+', '-', caption) caption = re.sub('[`´«»“”¨]', '"', caption) caption = re.sub('[‘’]', "'", caption) caption = re.sub('"?', '', caption) caption = re.sub('&', '', caption) caption = re.sub('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', ' ', caption) caption = re.sub('\\d:\\d\\d\\s+$', '', caption) caption = re.sub('\\\\n', ' ', caption) caption = re.sub('#\\d{1,3}\\b', '', caption) caption = re.sub('#\\d{5,}\\b', '', caption) caption = re.sub('\\b\\d{6,}\\b', '', caption) caption = re.sub('[\\S]+\\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption) caption = re.sub('[\\"\\\']{2,}', '"', caption) caption = re.sub('[\\.]{2,}', ' ', caption) caption = re.sub(self.bad_punct_regex, ' ', caption) caption = re.sub('\\s+\\.\\s+', ' ', caption) regex2 = re.compile('(?:\\-|\\_)') if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, ' ', caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub('\\b[a-zA-Z]{1,3}\\d{3,15}\\b', '', caption) caption = re.sub('\\b[a-zA-Z]+\\d+[a-zA-Z]+\\b', '', caption) caption = re.sub('\\b\\d+[a-zA-Z]+\\d+\\b', '', caption) caption = re.sub('(worldwide\\s+)?(free\\s+)?shipping', '', caption) caption = re.sub('(free\\s)?download(\\sfree)?', '', caption) caption = re.sub('\\bclick\\b\\s(?:for|on)\\s\\w+', '', caption) caption = re.sub('\\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\\simage[s]?)?', '', caption) caption = re.sub('\\bpage\\s+\\d+\\b', '', caption) caption = re.sub('\\b\\d*[a-zA-Z]+\\d+[a-zA-Z]+\\d+[a-zA-Z\\d]*\\b', ' ', caption) caption = re.sub('\\b\\d+\\.?\\d*[xх×]\\d+\\.?\\d*\\b', '', caption) caption = re.sub('\\b\\s+\\:\\s+', ': ', caption) caption = re.sub('(\\D[,\\./])\\b', '\\1 ', caption) caption = re.sub('\\s+', ' ', caption) caption.strip() caption = re.sub('^[\\"\\\']([\\w\\W]+)[\\"\\\']$', '\\1', caption) caption = re.sub("^[\\'\\_,\\-\\:;]", '', caption) caption = re.sub("[\\'\\_,\\-\\:\\-\\+]$", '', caption) caption = re.sub('^\\.\\S+$', '', caption) return caption.strip() def preprocess_image(self, image: PIL.Image.Image) -> torch.Tensor: if not isinstance(image, list): image = [image] def numpy_to_pt(images): if images.ndim == 3: images = images[..., None] images = torch.from_numpy(images.transpose(0, 3, 1, 2)) return images if isinstance(image[0], PIL.Image.Image): new_image = [] for image_ in image: image_ = image_.convert('RGB') image_ = resize(image_, self.unet.config.sample_size) image_ = np.array(image_) image_ = image_.astype(np.float32) image_ = image_ / 127.5 - 1 new_image.append(image_) image = new_image image = np.stack(image, axis=0) image = numpy_to_pt(image) elif isinstance(image[0], np.ndarray): image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) image = numpy_to_pt(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) return image def preprocess_mask_image(self, mask_image) -> torch.Tensor: if not isinstance(mask_image, list): mask_image = [mask_image] if isinstance(mask_image[0], torch.Tensor): mask_image = torch.cat(mask_image, axis=0) if mask_image[0].ndim == 4 else torch.stack(mask_image, axis=0) if mask_image.ndim == 2: mask_image = mask_image.unsqueeze(0).unsqueeze(0) elif mask_image.ndim == 3 and mask_image.shape[0] == 1: mask_image = mask_image.unsqueeze(0) elif mask_image.ndim == 3 and mask_image.shape[0] != 1: mask_image = mask_image.unsqueeze(1) mask_image[mask_image < 0.5] = 0 mask_image[mask_image >= 0.5] = 1 elif isinstance(mask_image[0], PIL.Image.Image): new_mask_image = [] for mask_image_ in mask_image: mask_image_ = mask_image_.convert('L') mask_image_ = resize(mask_image_, self.unet.config.sample_size) mask_image_ = np.array(mask_image_) mask_image_ = mask_image_[None, None, :] new_mask_image.append(mask_image_) mask_image = new_mask_image mask_image = np.concatenate(mask_image, axis=0) mask_image = mask_image.astype(np.float32) / 255.0 mask_image[mask_image < 0.5] = 0 mask_image[mask_image >= 0.5] = 1 mask_image = torch.from_numpy(mask_image) elif isinstance(mask_image[0], np.ndarray): mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0) mask_image[mask_image < 0.5] = 0 mask_image[mask_image >= 0.5] = 1 mask_image = torch.from_numpy(mask_image) return mask_image def get_timesteps(self, num_inference_steps, strength): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def prepare_intermediate_images(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator=None): (image_batch_size, channels, height, width) = image.shape batch_size = batch_size * num_images_per_prompt shape = (batch_size, channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) image = image.repeat_interleave(num_images_per_prompt, dim=0) noised_image = self.scheduler.add_noise(image, noise, timestep) image = (1 - mask_image) * image + mask_image * noised_image return image @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: Union[PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]]=None, mask_image: Union[PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]]=None, strength: float=1.0, num_inference_steps: int=50, timesteps: List[int]=None, guidance_scale: float=7.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, clean_caption: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] self.check_inputs(prompt, image, mask_image, batch_size, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, device=device, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clean_caption=clean_caption) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) dtype = prompt_embeds.dtype if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength) image = self.preprocess_image(image) image = image.to(device=device, dtype=dtype) mask_image = self.preprocess_mask_image(mask_image) mask_image = mask_image.to(device=device, dtype=dtype) if mask_image.shape[0] == 1: mask_image = mask_image.repeat_interleave(batch_size * num_images_per_prompt, dim=0) else: mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) noise_timestep = timesteps[0:1] noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt) intermediate_images = self.prepare_intermediate_images(image, noise_timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if hasattr(self, 'text_encoder_offload_hook') and self.text_encoder_offload_hook is not None: self.text_encoder_offload_hook.offload() num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): model_input = torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images model_input = self.scheduler.scale_model_input(model_input, t) noise_pred = self.unet(model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (noise_pred_uncond, _) = noise_pred_uncond.split(model_input.shape[1], dim=1) (noise_pred_text, predicted_variance) = noise_pred_text.split(model_input.shape[1], dim=1) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if self.scheduler.config.variance_type not in ['learned', 'learned_range']: (noise_pred, _) = noise_pred.split(model_input.shape[1], dim=1) prev_intermediate_images = intermediate_images intermediate_images = self.scheduler.step(noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False)[0] intermediate_images = (1 - mask_image) * prev_intermediate_images + mask_image * intermediate_images if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, intermediate_images) image = intermediate_images if output_type == 'pil': image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() (image, nsfw_detected, watermark_detected) = self.run_safety_checker(image, device, prompt_embeds.dtype) image = self.numpy_to_pil(image) if self.watermarker is not None: self.watermarker.apply_watermark(image, self.unet.config.sample_size) elif output_type == 'pt': nsfw_detected = None watermark_detected = None if hasattr(self, 'unet_offload_hook') and self.unet_offload_hook is not None: self.unet_offload_hook.offload() else: image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() (image, nsfw_detected, watermark_detected) = self.run_safety_checker(image, device, prompt_embeds.dtype) self.maybe_free_model_hooks() if not return_dict: return (image, nsfw_detected, watermark_detected) return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) # File: diffusers-main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py import html import inspect import re import urllib.parse as ul from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer from ...loaders import StableDiffusionLoraLoaderMixin from ...models import UNet2DConditionModel from ...schedulers import DDPMScheduler from ...utils import BACKENDS_MAPPING, PIL_INTERPOLATION, is_bs4_available, is_ftfy_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import IFPipelineOutput from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy logger = logging.get_logger(__name__) def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image: (w, h) = images.size coef = w / h (w, h) = (img_size, img_size) if coef >= 1: w = int(round(img_size / 8 * coef) * 8) else: h = int(round(img_size / 8 / coef) * 8) images = images.resize((w, h), resample=PIL_INTERPOLATION['bicubic'], reducing_gap=None) return images EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline\n >>> from diffusers.utils import pt_to_pil\n >>> import torch\n >>> from PIL import Image\n >>> import requests\n >>> from io import BytesIO\n\n >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png"\n >>> response = requests.get(url)\n >>> original_image = Image.open(BytesIO(response.content)).convert("RGB")\n >>> original_image = original_image\n\n >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png"\n >>> response = requests.get(url)\n >>> mask_image = Image.open(BytesIO(response.content))\n >>> mask_image = mask_image\n\n >>> pipe = IFInpaintingPipeline.from_pretrained(\n ... "DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16\n ... )\n >>> pipe.enable_model_cpu_offload()\n\n >>> prompt = "blue sunglasses"\n\n >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)\n >>> image = pipe(\n ... image=original_image,\n ... mask_image=mask_image,\n ... prompt_embeds=prompt_embeds,\n ... negative_prompt_embeds=negative_embeds,\n ... output_type="pt",\n ... ).images\n\n >>> # save intermediate image\n >>> pil_image = pt_to_pil(image)\n >>> pil_image[0].save("./if_stage_I.png")\n\n >>> super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained(\n ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16\n ... )\n >>> super_res_1_pipe.enable_model_cpu_offload()\n\n >>> image = super_res_1_pipe(\n ... image=image,\n ... mask_image=mask_image,\n ... original_image=original_image,\n ... prompt_embeds=prompt_embeds,\n ... negative_prompt_embeds=negative_embeds,\n ... ).images\n >>> image[0].save("./if_stage_II.png")\n ```\n ' class IFInpaintingSuperResolutionPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): tokenizer: T5Tokenizer text_encoder: T5EncoderModel unet: UNet2DConditionModel scheduler: DDPMScheduler image_noising_scheduler: DDPMScheduler feature_extractor: Optional[CLIPImageProcessor] safety_checker: Optional[IFSafetyChecker] watermarker: Optional[IFWatermarker] bad_punct_regex = re.compile('[' + '#®•©™&@·º½¾¿¡§~' + '\\)' + '\\(' + '\\]' + '\\[' + '\\}' + '\\{' + '\\|' + '\\' + '\\/' + '\\*' + ']{1,}') model_cpu_offload_seq = 'text_encoder->unet' _optional_components = ['tokenizer', 'text_encoder', 'safety_checker', 'feature_extractor', 'watermarker'] _exclude_from_cpu_offload = ['watermarker'] def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, unet: UNet2DConditionModel, scheduler: DDPMScheduler, image_noising_scheduler: DDPMScheduler, safety_checker: Optional[IFSafetyChecker], feature_extractor: Optional[CLIPImageProcessor], watermarker: Optional[IFWatermarker], requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the IF license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") if unet.config.in_channels != 6: logger.warning("It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`.") self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, image_noising_scheduler=image_noising_scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, watermarker=watermarker) self.register_to_config(requires_safety_checker=requires_safety_checker) def _text_preprocessing(self, text, clean_caption=False): if clean_caption and (not is_bs4_available()): logger.warning(BACKENDS_MAPPING['bs4'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if clean_caption and (not is_ftfy_available()): logger.warning(BACKENDS_MAPPING['ftfy'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub('', 'person', caption) caption = re.sub('\\b((?:https?:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = re.sub('\\b((?:www:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = BeautifulSoup(caption, features='html.parser').text caption = re.sub('@[\\w\\d]+\\b', '', caption) caption = re.sub('[\\u31c0-\\u31ef]+', '', caption) caption = re.sub('[\\u31f0-\\u31ff]+', '', caption) caption = re.sub('[\\u3200-\\u32ff]+', '', caption) caption = re.sub('[\\u3300-\\u33ff]+', '', caption) caption = re.sub('[\\u3400-\\u4dbf]+', '', caption) caption = re.sub('[\\u4dc0-\\u4dff]+', '', caption) caption = re.sub('[\\u4e00-\\u9fff]+', '', caption) caption = re.sub('[\\u002D\\u058A\\u05BE\\u1400\\u1806\\u2010-\\u2015\\u2E17\\u2E1A\\u2E3A\\u2E3B\\u2E40\\u301C\\u3030\\u30A0\\uFE31\\uFE32\\uFE58\\uFE63\\uFF0D]+', '-', caption) caption = re.sub('[`´«»“”¨]', '"', caption) caption = re.sub('[‘’]', "'", caption) caption = re.sub('"?', '', caption) caption = re.sub('&', '', caption) caption = re.sub('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', ' ', caption) caption = re.sub('\\d:\\d\\d\\s+$', '', caption) caption = re.sub('\\\\n', ' ', caption) caption = re.sub('#\\d{1,3}\\b', '', caption) caption = re.sub('#\\d{5,}\\b', '', caption) caption = re.sub('\\b\\d{6,}\\b', '', caption) caption = re.sub('[\\S]+\\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption) caption = re.sub('[\\"\\\']{2,}', '"', caption) caption = re.sub('[\\.]{2,}', ' ', caption) caption = re.sub(self.bad_punct_regex, ' ', caption) caption = re.sub('\\s+\\.\\s+', ' ', caption) regex2 = re.compile('(?:\\-|\\_)') if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, ' ', caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub('\\b[a-zA-Z]{1,3}\\d{3,15}\\b', '', caption) caption = re.sub('\\b[a-zA-Z]+\\d+[a-zA-Z]+\\b', '', caption) caption = re.sub('\\b\\d+[a-zA-Z]+\\d+\\b', '', caption) caption = re.sub('(worldwide\\s+)?(free\\s+)?shipping', '', caption) caption = re.sub('(free\\s)?download(\\sfree)?', '', caption) caption = re.sub('\\bclick\\b\\s(?:for|on)\\s\\w+', '', caption) caption = re.sub('\\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\\simage[s]?)?', '', caption) caption = re.sub('\\bpage\\s+\\d+\\b', '', caption) caption = re.sub('\\b\\d*[a-zA-Z]+\\d+[a-zA-Z]+\\d+[a-zA-Z\\d]*\\b', ' ', caption) caption = re.sub('\\b\\d+\\.?\\d*[xх×]\\d+\\.?\\d*\\b', '', caption) caption = re.sub('\\b\\s+\\:\\s+', ': ', caption) caption = re.sub('(\\D[,\\./])\\b', '\\1 ', caption) caption = re.sub('\\s+', ' ', caption) caption.strip() caption = re.sub('^[\\"\\\']([\\w\\W]+)[\\"\\\']$', '\\1', caption) caption = re.sub("^[\\'\\_,\\-\\:;]", '', caption) caption = re.sub("[\\'\\_,\\-\\:\\-\\+]$", '', caption) caption = re.sub('^\\.\\S+$', '', caption) return caption.strip() @torch.no_grad() def encode_prompt(self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool=True, num_images_per_prompt: int=1, device: Optional[torch.device]=None, negative_prompt: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, clean_caption: bool=False): if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = 77 if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {max_length} tokens: {removed_text}') attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.unet is not None: dtype = self.unet.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) else: negative_prompt_embeds = None return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='pt').to(device) (image, nsfw_detected, watermark_detected) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype=dtype)) else: nsfw_detected = None watermark_detected = None return (image, nsfw_detected, watermark_detected) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, image, original_image, mask_image, batch_size, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if isinstance(image, list): check_image_type = image[0] else: check_image_type = image if not isinstance(check_image_type, torch.Tensor) and (not isinstance(check_image_type, PIL.Image.Image)) and (not isinstance(check_image_type, np.ndarray)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is {type(check_image_type)}') if isinstance(image, list): image_batch_size = len(image) elif isinstance(image, torch.Tensor): image_batch_size = image.shape[0] elif isinstance(image, PIL.Image.Image): image_batch_size = 1 elif isinstance(image, np.ndarray): image_batch_size = image.shape[0] else: assert False if batch_size != image_batch_size: raise ValueError(f'image batch size: {image_batch_size} must be same as prompt batch size {batch_size}') if isinstance(original_image, list): check_image_type = original_image[0] else: check_image_type = original_image if not isinstance(check_image_type, torch.Tensor) and (not isinstance(check_image_type, PIL.Image.Image)) and (not isinstance(check_image_type, np.ndarray)): raise ValueError(f'`original_image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is {type(check_image_type)}') if isinstance(original_image, list): image_batch_size = len(original_image) elif isinstance(original_image, torch.Tensor): image_batch_size = original_image.shape[0] elif isinstance(original_image, PIL.Image.Image): image_batch_size = 1 elif isinstance(original_image, np.ndarray): image_batch_size = original_image.shape[0] else: assert False if batch_size != image_batch_size: raise ValueError(f'original_image batch size: {image_batch_size} must be same as prompt batch size {batch_size}') if isinstance(mask_image, list): check_image_type = mask_image[0] else: check_image_type = mask_image if not isinstance(check_image_type, torch.Tensor) and (not isinstance(check_image_type, PIL.Image.Image)) and (not isinstance(check_image_type, np.ndarray)): raise ValueError(f'`mask_image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is {type(check_image_type)}') if isinstance(mask_image, list): image_batch_size = len(mask_image) elif isinstance(mask_image, torch.Tensor): image_batch_size = mask_image.shape[0] elif isinstance(mask_image, PIL.Image.Image): image_batch_size = 1 elif isinstance(mask_image, np.ndarray): image_batch_size = mask_image.shape[0] else: assert False if image_batch_size != 1 and batch_size != image_batch_size: raise ValueError(f'mask_image batch size: {image_batch_size} must be `1` or the same as prompt batch size {batch_size}') def preprocess_original_image(self, image: PIL.Image.Image) -> torch.Tensor: if not isinstance(image, list): image = [image] def numpy_to_pt(images): if images.ndim == 3: images = images[..., None] images = torch.from_numpy(images.transpose(0, 3, 1, 2)) return images if isinstance(image[0], PIL.Image.Image): new_image = [] for image_ in image: image_ = image_.convert('RGB') image_ = resize(image_, self.unet.config.sample_size) image_ = np.array(image_) image_ = image_.astype(np.float32) image_ = image_ / 127.5 - 1 new_image.append(image_) image = new_image image = np.stack(image, axis=0) image = numpy_to_pt(image) elif isinstance(image[0], np.ndarray): image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) image = numpy_to_pt(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) return image def preprocess_image(self, image: PIL.Image.Image, num_images_per_prompt, device) -> torch.Tensor: if not isinstance(image, torch.Tensor) and (not isinstance(image, list)): image = [image] if isinstance(image[0], PIL.Image.Image): image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image] image = np.stack(image, axis=0) image = torch.from_numpy(image.transpose(0, 3, 1, 2)) elif isinstance(image[0], np.ndarray): image = np.stack(image, axis=0) if image.ndim == 5: image = image[0] image = torch.from_numpy(image.transpose(0, 3, 1, 2)) elif isinstance(image, list) and isinstance(image[0], torch.Tensor): dims = image[0].ndim if dims == 3: image = torch.stack(image, dim=0) elif dims == 4: image = torch.concat(image, dim=0) else: raise ValueError(f'Image must have 3 or 4 dimensions, instead got {dims}') image = image.to(device=device, dtype=self.unet.dtype) image = image.repeat_interleave(num_images_per_prompt, dim=0) return image def preprocess_mask_image(self, mask_image) -> torch.Tensor: if not isinstance(mask_image, list): mask_image = [mask_image] if isinstance(mask_image[0], torch.Tensor): mask_image = torch.cat(mask_image, axis=0) if mask_image[0].ndim == 4 else torch.stack(mask_image, axis=0) if mask_image.ndim == 2: mask_image = mask_image.unsqueeze(0).unsqueeze(0) elif mask_image.ndim == 3 and mask_image.shape[0] == 1: mask_image = mask_image.unsqueeze(0) elif mask_image.ndim == 3 and mask_image.shape[0] != 1: mask_image = mask_image.unsqueeze(1) mask_image[mask_image < 0.5] = 0 mask_image[mask_image >= 0.5] = 1 elif isinstance(mask_image[0], PIL.Image.Image): new_mask_image = [] for mask_image_ in mask_image: mask_image_ = mask_image_.convert('L') mask_image_ = resize(mask_image_, self.unet.config.sample_size) mask_image_ = np.array(mask_image_) mask_image_ = mask_image_[None, None, :] new_mask_image.append(mask_image_) mask_image = new_mask_image mask_image = np.concatenate(mask_image, axis=0) mask_image = mask_image.astype(np.float32) / 255.0 mask_image[mask_image < 0.5] = 0 mask_image[mask_image >= 0.5] = 1 mask_image = torch.from_numpy(mask_image) elif isinstance(mask_image[0], np.ndarray): mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0) mask_image[mask_image < 0.5] = 0 mask_image[mask_image >= 0.5] = 1 mask_image = torch.from_numpy(mask_image) return mask_image def get_timesteps(self, num_inference_steps, strength): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def prepare_intermediate_images(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator=None): (image_batch_size, channels, height, width) = image.shape batch_size = batch_size * num_images_per_prompt shape = (batch_size, channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) image = image.repeat_interleave(num_images_per_prompt, dim=0) noised_image = self.scheduler.add_noise(image, noise, timestep) image = (1 - mask_image) * image + mask_image * noised_image return image @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, image: Union[PIL.Image.Image, np.ndarray, torch.Tensor], original_image: Union[PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]]=None, mask_image: Union[PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]]=None, strength: float=0.8, prompt: Union[str, List[str]]=None, num_inference_steps: int=100, timesteps: List[int]=None, guidance_scale: float=4.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, noise_level: int=0, clean_caption: bool=True): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] self.check_inputs(prompt, image, original_image, mask_image, batch_size, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) do_classifier_free_guidance = guidance_scale > 1.0 device = self._execution_device (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, device=device, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clean_caption=clean_caption) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) dtype = prompt_embeds.dtype if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength) original_image = self.preprocess_original_image(original_image) original_image = original_image.to(device=device, dtype=dtype) mask_image = self.preprocess_mask_image(mask_image) mask_image = mask_image.to(device=device, dtype=dtype) if mask_image.shape[0] == 1: mask_image = mask_image.repeat_interleave(batch_size * num_images_per_prompt, dim=0) else: mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) noise_timestep = timesteps[0:1] noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt) intermediate_images = self.prepare_intermediate_images(original_image, noise_timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator) (_, _, height, width) = original_image.shape image = self.preprocess_image(image, num_images_per_prompt, device) upscaled = F.interpolate(image, (height, width), mode='bilinear', align_corners=True) noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device) noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype) upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level) if do_classifier_free_guidance: noise_level = torch.cat([noise_level] * 2) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if hasattr(self, 'text_encoder_offload_hook') and self.text_encoder_offload_hook is not None: self.text_encoder_offload_hook.offload() num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): model_input = torch.cat([intermediate_images, upscaled], dim=1) model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input model_input = self.scheduler.scale_model_input(model_input, t) noise_pred = self.unet(model_input, t, encoder_hidden_states=prompt_embeds, class_labels=noise_level, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (noise_pred_uncond, _) = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1) (noise_pred_text, predicted_variance) = noise_pred_text.split(model_input.shape[1] // 2, dim=1) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if self.scheduler.config.variance_type not in ['learned', 'learned_range']: (noise_pred, _) = noise_pred.split(intermediate_images.shape[1], dim=1) prev_intermediate_images = intermediate_images intermediate_images = self.scheduler.step(noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False)[0] intermediate_images = (1 - mask_image) * prev_intermediate_images + mask_image * intermediate_images if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, intermediate_images) image = intermediate_images if output_type == 'pil': image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() (image, nsfw_detected, watermark_detected) = self.run_safety_checker(image, device, prompt_embeds.dtype) image = self.numpy_to_pil(image) if self.watermarker is not None: self.watermarker.apply_watermark(image, self.unet.config.sample_size) elif output_type == 'pt': nsfw_detected = None watermark_detected = None else: image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() (image, nsfw_detected, watermark_detected) = self.run_safety_checker(image, device, prompt_embeds.dtype) self.maybe_free_model_hooks() if not return_dict: return (image, nsfw_detected, watermark_detected) return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) # File: diffusers-main/src/diffusers/pipelines/deepfloyd_if/pipeline_if_superresolution.py import html import inspect import re import urllib.parse as ul from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer from ...loaders import StableDiffusionLoraLoaderMixin from ...models import UNet2DConditionModel from ...schedulers import DDPMScheduler from ...utils import BACKENDS_MAPPING, is_bs4_available, is_ftfy_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import IFPipelineOutput from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import IFPipeline, IFSuperResolutionPipeline, DiffusionPipeline\n >>> from diffusers.utils import pt_to_pil\n >>> import torch\n\n >>> pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)\n >>> pipe.enable_model_cpu_offload()\n\n >>> prompt = \'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"\'\n >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)\n\n >>> image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt").images\n\n >>> # save intermediate image\n >>> pil_image = pt_to_pil(image)\n >>> pil_image[0].save("./if_stage_I.png")\n\n >>> super_res_1_pipe = IFSuperResolutionPipeline.from_pretrained(\n ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16\n ... )\n >>> super_res_1_pipe.enable_model_cpu_offload()\n\n >>> image = super_res_1_pipe(\n ... image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds\n ... ).images\n >>> image[0].save("./if_stage_II.png")\n ```\n' class IFSuperResolutionPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): tokenizer: T5Tokenizer text_encoder: T5EncoderModel unet: UNet2DConditionModel scheduler: DDPMScheduler image_noising_scheduler: DDPMScheduler feature_extractor: Optional[CLIPImageProcessor] safety_checker: Optional[IFSafetyChecker] watermarker: Optional[IFWatermarker] bad_punct_regex = re.compile('[' + '#®•©™&@·º½¾¿¡§~' + '\\)' + '\\(' + '\\]' + '\\[' + '\\}' + '\\{' + '\\|' + '\\' + '\\/' + '\\*' + ']{1,}') _optional_components = ['tokenizer', 'text_encoder', 'safety_checker', 'feature_extractor', 'watermarker'] model_cpu_offload_seq = 'text_encoder->unet' _exclude_from_cpu_offload = ['watermarker'] def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, unet: UNet2DConditionModel, scheduler: DDPMScheduler, image_noising_scheduler: DDPMScheduler, safety_checker: Optional[IFSafetyChecker], feature_extractor: Optional[CLIPImageProcessor], watermarker: Optional[IFWatermarker], requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the IF license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") if unet.config.in_channels != 6: logger.warning("It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`.") self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, image_noising_scheduler=image_noising_scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, watermarker=watermarker) self.register_to_config(requires_safety_checker=requires_safety_checker) def _text_preprocessing(self, text, clean_caption=False): if clean_caption and (not is_bs4_available()): logger.warning(BACKENDS_MAPPING['bs4'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if clean_caption and (not is_ftfy_available()): logger.warning(BACKENDS_MAPPING['ftfy'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub('', 'person', caption) caption = re.sub('\\b((?:https?:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = re.sub('\\b((?:www:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = BeautifulSoup(caption, features='html.parser').text caption = re.sub('@[\\w\\d]+\\b', '', caption) caption = re.sub('[\\u31c0-\\u31ef]+', '', caption) caption = re.sub('[\\u31f0-\\u31ff]+', '', caption) caption = re.sub('[\\u3200-\\u32ff]+', '', caption) caption = re.sub('[\\u3300-\\u33ff]+', '', caption) caption = re.sub('[\\u3400-\\u4dbf]+', '', caption) caption = re.sub('[\\u4dc0-\\u4dff]+', '', caption) caption = re.sub('[\\u4e00-\\u9fff]+', '', caption) caption = re.sub('[\\u002D\\u058A\\u05BE\\u1400\\u1806\\u2010-\\u2015\\u2E17\\u2E1A\\u2E3A\\u2E3B\\u2E40\\u301C\\u3030\\u30A0\\uFE31\\uFE32\\uFE58\\uFE63\\uFF0D]+', '-', caption) caption = re.sub('[`´«»“”¨]', '"', caption) caption = re.sub('[‘’]', "'", caption) caption = re.sub('"?', '', caption) caption = re.sub('&', '', caption) caption = re.sub('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', ' ', caption) caption = re.sub('\\d:\\d\\d\\s+$', '', caption) caption = re.sub('\\\\n', ' ', caption) caption = re.sub('#\\d{1,3}\\b', '', caption) caption = re.sub('#\\d{5,}\\b', '', caption) caption = re.sub('\\b\\d{6,}\\b', '', caption) caption = re.sub('[\\S]+\\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption) caption = re.sub('[\\"\\\']{2,}', '"', caption) caption = re.sub('[\\.]{2,}', ' ', caption) caption = re.sub(self.bad_punct_regex, ' ', caption) caption = re.sub('\\s+\\.\\s+', ' ', caption) regex2 = re.compile('(?:\\-|\\_)') if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, ' ', caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub('\\b[a-zA-Z]{1,3}\\d{3,15}\\b', '', caption) caption = re.sub('\\b[a-zA-Z]+\\d+[a-zA-Z]+\\b', '', caption) caption = re.sub('\\b\\d+[a-zA-Z]+\\d+\\b', '', caption) caption = re.sub('(worldwide\\s+)?(free\\s+)?shipping', '', caption) caption = re.sub('(free\\s)?download(\\sfree)?', '', caption) caption = re.sub('\\bclick\\b\\s(?:for|on)\\s\\w+', '', caption) caption = re.sub('\\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\\simage[s]?)?', '', caption) caption = re.sub('\\bpage\\s+\\d+\\b', '', caption) caption = re.sub('\\b\\d*[a-zA-Z]+\\d+[a-zA-Z]+\\d+[a-zA-Z\\d]*\\b', ' ', caption) caption = re.sub('\\b\\d+\\.?\\d*[xх×]\\d+\\.?\\d*\\b', '', caption) caption = re.sub('\\b\\s+\\:\\s+', ': ', caption) caption = re.sub('(\\D[,\\./])\\b', '\\1 ', caption) caption = re.sub('\\s+', ' ', caption) caption.strip() caption = re.sub('^[\\"\\\']([\\w\\W]+)[\\"\\\']$', '\\1', caption) caption = re.sub("^[\\'\\_,\\-\\:;]", '', caption) caption = re.sub("[\\'\\_,\\-\\:\\-\\+]$", '', caption) caption = re.sub('^\\.\\S+$', '', caption) return caption.strip() @torch.no_grad() def encode_prompt(self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool=True, num_images_per_prompt: int=1, device: Optional[torch.device]=None, negative_prompt: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, clean_caption: bool=False): if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = 77 if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {max_length} tokens: {removed_text}') attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.unet is not None: dtype = self.unet.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) else: negative_prompt_embeds = None return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='pt').to(device) (image, nsfw_detected, watermark_detected) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype=dtype)) else: nsfw_detected = None watermark_detected = None return (image, nsfw_detected, watermark_detected) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, image, batch_size, noise_level, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps: raise ValueError(f'`noise_level`: {noise_level} must be a valid timestep in `self.noising_scheduler`, [0, {self.image_noising_scheduler.config.num_train_timesteps})') if isinstance(image, list): check_image_type = image[0] else: check_image_type = image if not isinstance(check_image_type, torch.Tensor) and (not isinstance(check_image_type, PIL.Image.Image)) and (not isinstance(check_image_type, np.ndarray)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is {type(check_image_type)}') if isinstance(image, list): image_batch_size = len(image) elif isinstance(image, torch.Tensor): image_batch_size = image.shape[0] elif isinstance(image, PIL.Image.Image): image_batch_size = 1 elif isinstance(image, np.ndarray): image_batch_size = image.shape[0] else: assert False if batch_size != image_batch_size: raise ValueError(f'image batch size: {image_batch_size} must be same as prompt batch size {batch_size}') def prepare_intermediate_images(self, batch_size, num_channels, height, width, dtype, device, generator): shape = (batch_size, num_channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') intermediate_images = randn_tensor(shape, generator=generator, device=device, dtype=dtype) intermediate_images = intermediate_images * self.scheduler.init_noise_sigma return intermediate_images def preprocess_image(self, image, num_images_per_prompt, device): if not isinstance(image, torch.Tensor) and (not isinstance(image, list)): image = [image] if isinstance(image[0], PIL.Image.Image): image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image] image = np.stack(image, axis=0) image = torch.from_numpy(image.transpose(0, 3, 1, 2)) elif isinstance(image[0], np.ndarray): image = np.stack(image, axis=0) if image.ndim == 5: image = image[0] image = torch.from_numpy(image.transpose(0, 3, 1, 2)) elif isinstance(image, list) and isinstance(image[0], torch.Tensor): dims = image[0].ndim if dims == 3: image = torch.stack(image, dim=0) elif dims == 4: image = torch.concat(image, dim=0) else: raise ValueError(f'Image must have 3 or 4 dimensions, instead got {dims}') image = image.to(device=device, dtype=self.unet.dtype) image = image.repeat_interleave(num_images_per_prompt, dim=0) return image @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: int=None, width: int=None, image: Union[PIL.Image.Image, np.ndarray, torch.Tensor]=None, num_inference_steps: int=50, timesteps: List[int]=None, guidance_scale: float=4.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, noise_level: int=250, clean_caption: bool=True): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] self.check_inputs(prompt, image, batch_size, noise_level, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) height = height or self.unet.config.sample_size width = width or self.unet.config.sample_size device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, device=device, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clean_caption=clean_caption) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(0) num_channels = self.unet.config.in_channels // 2 intermediate_images = self.prepare_intermediate_images(batch_size * num_images_per_prompt, num_channels, height, width, prompt_embeds.dtype, device, generator) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) image = self.preprocess_image(image, num_images_per_prompt, device) upscaled = F.interpolate(image, (height, width), mode='bilinear', align_corners=True) noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device) noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype) upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level) if do_classifier_free_guidance: noise_level = torch.cat([noise_level] * 2) if hasattr(self, 'text_encoder_offload_hook') and self.text_encoder_offload_hook is not None: self.text_encoder_offload_hook.offload() num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): model_input = torch.cat([intermediate_images, upscaled], dim=1) model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input model_input = self.scheduler.scale_model_input(model_input, t) noise_pred = self.unet(model_input, t, encoder_hidden_states=prompt_embeds, class_labels=noise_level, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (noise_pred_uncond, _) = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1) (noise_pred_text, predicted_variance) = noise_pred_text.split(model_input.shape[1] // 2, dim=1) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if self.scheduler.config.variance_type not in ['learned', 'learned_range']: (noise_pred, _) = noise_pred.split(intermediate_images.shape[1], dim=1) intermediate_images = self.scheduler.step(noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False)[0] if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, intermediate_images) image = intermediate_images if output_type == 'pil': image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() (image, nsfw_detected, watermark_detected) = self.run_safety_checker(image, device, prompt_embeds.dtype) image = self.numpy_to_pil(image) if self.watermarker is not None: self.watermarker.apply_watermark(image, self.unet.config.sample_size) elif output_type == 'pt': nsfw_detected = None watermark_detected = None if hasattr(self, 'unet_offload_hook') and self.unet_offload_hook is not None: self.unet_offload_hook.offload() else: image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() (image, nsfw_detected, watermark_detected) = self.run_safety_checker(image, device, prompt_embeds.dtype) self.maybe_free_model_hooks() if not return_dict: return (image, nsfw_detected, watermark_detected) return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) # File: diffusers-main/src/diffusers/pipelines/deepfloyd_if/pipeline_output.py from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image from ...utils import BaseOutput @dataclass class IFPipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] nsfw_detected: Optional[List[bool]] watermark_detected: Optional[List[bool]] # File: diffusers-main/src/diffusers/pipelines/deepfloyd_if/safety_checker.py import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging logger = logging.get_logger(__name__) class IFSafetyChecker(PreTrainedModel): config_class = CLIPConfig _no_split_modules = ['CLIPEncoderLayer'] def __init__(self, config: CLIPConfig): super().__init__(config) self.vision_model = CLIPVisionModelWithProjection(config.vision_config) self.p_head = nn.Linear(config.vision_config.projection_dim, 1) self.w_head = nn.Linear(config.vision_config.projection_dim, 1) @torch.no_grad() def forward(self, clip_input, images, p_threshold=0.5, w_threshold=0.5): image_embeds = self.vision_model(clip_input)[0] nsfw_detected = self.p_head(image_embeds) nsfw_detected = nsfw_detected.flatten() nsfw_detected = nsfw_detected > p_threshold nsfw_detected = nsfw_detected.tolist() if any(nsfw_detected): logger.warning('Potential NSFW content was detected in one or more images. A black image will be returned instead. Try again with a different prompt and/or seed.') for (idx, nsfw_detected_) in enumerate(nsfw_detected): if nsfw_detected_: images[idx] = np.zeros(images[idx].shape) watermark_detected = self.w_head(image_embeds) watermark_detected = watermark_detected.flatten() watermark_detected = watermark_detected > w_threshold watermark_detected = watermark_detected.tolist() if any(watermark_detected): logger.warning('Potential watermarked content was detected in one or more images. A black image will be returned instead. Try again with a different prompt and/or seed.') for (idx, watermark_detected_) in enumerate(watermark_detected): if watermark_detected_: images[idx] = np.zeros(images[idx].shape) return (images, nsfw_detected, watermark_detected) # File: diffusers-main/src/diffusers/pipelines/deepfloyd_if/timesteps.py fast27_timesteps = [999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0] smart27_timesteps = [999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0] smart50_timesteps = [999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0] smart100_timesteps = [999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0] smart185_timesteps = [999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0] super27_timesteps = [999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0] super40_timesteps = [999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0] super100_timesteps = [999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0] # File: diffusers-main/src/diffusers/pipelines/deepfloyd_if/watermark.py from typing import List import PIL.Image import torch from PIL import Image from ...configuration_utils import ConfigMixin from ...models.modeling_utils import ModelMixin from ...utils import PIL_INTERPOLATION class IFWatermarker(ModelMixin, ConfigMixin): def __init__(self): super().__init__() self.register_buffer('watermark_image', torch.zeros((62, 62, 4))) self.watermark_image_as_pil = None def apply_watermark(self, images: List[PIL.Image.Image], sample_size=None): h = images[0].height w = images[0].width sample_size = sample_size or h coef = min(h / sample_size, w / sample_size) (img_h, img_w) = (int(h / coef), int(w / coef)) if coef < 1 else (h, w) (S1, S2) = (1024 ** 2, img_w * img_h) K = (S2 / S1) ** 0.5 (wm_size, wm_x, wm_y) = (int(K * 62), img_w - int(14 * K), img_h - int(14 * K)) if self.watermark_image_as_pil is None: watermark_image = self.watermark_image.to(torch.uint8).cpu().numpy() watermark_image = Image.fromarray(watermark_image, mode='RGBA') self.watermark_image_as_pil = watermark_image wm_img = self.watermark_image_as_pil.resize((wm_size, wm_size), PIL_INTERPOLATION['bicubic'], reducing_gap=None) for pil_img in images: pil_img.paste(wm_img, box=(wm_x - wm_size, wm_y - wm_size, wm_x, wm_y), mask=wm_img.split()[-1]) return images # File: diffusers-main/src/diffusers/pipelines/deprecated/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_librosa_available, is_note_seq_available, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_pt_objects _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) else: _import_structure['latent_diffusion_uncond'] = ['LDMPipeline'] _import_structure['pndm'] = ['PNDMPipeline'] _import_structure['repaint'] = ['RePaintPipeline'] _import_structure['score_sde_ve'] = ['ScoreSdeVePipeline'] _import_structure['stochastic_karras_ve'] = ['KarrasVePipeline'] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['alt_diffusion'] = ['AltDiffusionImg2ImgPipeline', 'AltDiffusionPipeline', 'AltDiffusionPipelineOutput'] _import_structure['versatile_diffusion'] = ['VersatileDiffusionDualGuidedPipeline', 'VersatileDiffusionImageVariationPipeline', 'VersatileDiffusionPipeline', 'VersatileDiffusionTextToImagePipeline'] _import_structure['vq_diffusion'] = ['VQDiffusionPipeline'] _import_structure['stable_diffusion_variants'] = ['CycleDiffusionPipeline', 'StableDiffusionInpaintPipelineLegacy', 'StableDiffusionPix2PixZeroPipeline', 'StableDiffusionParadigmsPipeline', 'StableDiffusionModelEditingPipeline'] try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_librosa_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_librosa_objects)) else: _import_structure['audio_diffusion'] = ['AudioDiffusionPipeline', 'Mel'] try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_transformers_and_torch_and_note_seq_objects _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) else: _import_structure['spectrogram_diffusion'] = ['MidiProcessor', 'SpectrogramDiffusionPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_pt_objects import * else: from .latent_diffusion_uncond import LDMPipeline from .pndm import PNDMPipeline from .repaint import RePaintPipeline from .score_sde_ve import ScoreSdeVePipeline from .stochastic_karras_ve import KarrasVePipeline try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .alt_diffusion import AltDiffusionImg2ImgPipeline, AltDiffusionPipeline, AltDiffusionPipelineOutput from .audio_diffusion import AudioDiffusionPipeline, Mel from .spectrogram_diffusion import SpectrogramDiffusionPipeline from .stable_diffusion_variants import CycleDiffusionPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionModelEditingPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPix2PixZeroPipeline from .stochastic_karras_ve import KarrasVePipeline from .versatile_diffusion import VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline from .vq_diffusion import VQDiffusionPipeline try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_librosa_objects import * else: from .audio_diffusion import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * else: from .spectrogram_diffusion import MidiProcessor, SpectrogramDiffusionPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/deprecated/alt_diffusion/__init__.py from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['modeling_roberta_series'] = ['RobertaSeriesModelWithTransformation'] _import_structure['pipeline_alt_diffusion'] = ['AltDiffusionPipeline'] _import_structure['pipeline_alt_diffusion_img2img'] = ['AltDiffusionImg2ImgPipeline'] _import_structure['pipeline_output'] = ['AltDiffusionPipelineOutput'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import * else: from .modeling_roberta_series import RobertaSeriesModelWithTransformation from .pipeline_alt_diffusion import AltDiffusionPipeline from .pipeline_alt_diffusion_img2img import AltDiffusionImg2ImgPipeline from .pipeline_output import AltDiffusionPipelineOutput else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class TransformationModelOutput(ModelOutput): projection_state: Optional[torch.Tensor] = None last_hidden_state: torch.Tensor = None hidden_states: Optional[Tuple[torch.Tensor]] = None attentions: Optional[Tuple[torch.Tensor]] = None class RobertaSeriesConfig(XLMRobertaConfig): def __init__(self, pad_token_id=1, bos_token_id=0, eos_token_id=2, project_dim=512, pooler_fn='cls', learn_encoder=False, use_attention_mask=True, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.project_dim = project_dim self.pooler_fn = pooler_fn self.learn_encoder = learn_encoder self.use_attention_mask = use_attention_mask class RobertaSeriesModelWithTransformation(RobertaPreTrainedModel): _keys_to_ignore_on_load_unexpected = ['pooler', 'logit_scale'] _keys_to_ignore_on_load_missing = ['position_ids', 'predictions.decoder.bias'] base_model_prefix = 'roberta' config_class = RobertaSeriesConfig def __init__(self, config): super().__init__(config) self.roberta = XLMRobertaModel(config) self.transformation = nn.Linear(config.hidden_size, config.project_dim) self.has_pre_transformation = getattr(config, 'has_pre_transformation', False) if self.has_pre_transformation: self.transformation_pre = nn.Linear(config.hidden_size, config.project_dim) self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_init() def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None, output_hidden_states: Optional[bool]=None): """""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.base_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=True if self.has_pre_transformation else output_hidden_states, return_dict=return_dict) if self.has_pre_transformation: sequence_output2 = outputs['hidden_states'][-2] sequence_output2 = self.pre_LN(sequence_output2) projection_state2 = self.transformation_pre(sequence_output2) return TransformationModelOutput(projection_state=projection_state2, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions) else: projection_state = self.transformation(outputs.last_hidden_state) return TransformationModelOutput(projection_state=projection_state, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: diffusers-main/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from packaging import version from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, XLMRobertaTokenizer from ....configuration_utils import FrozenDict from ....image_processor import PipelineImageInput, VaeImageProcessor from ....loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ....models.lora import adjust_lora_scale_text_encoder from ....schedulers import KarrasDiffusionSchedulers from ....utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .modeling_roberta_series import RobertaSeriesModelWithTransformation from .pipeline_output import AltDiffusionPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import AltDiffusionPipeline\n\n >>> pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion-m9", torch_dtype=torch.float16)\n >>> pipe = pipe.to("cuda")\n\n >>> # "dark elf princess, highly detailed, d & d, fantasy, highly detailed, digital painting, trending on artstation, concept art, sharp focus, illustration, art by artgerm and greg rutkowski and fuji choko and viktoria gavrilenko and hoang lap"\n >>> prompt = "黑暗精灵公主,非常详细,幻想,非常详细,数字绘画,概念艺术,敏锐的焦点,插图"\n >>> image = pipe(prompt).images[0]\n ```\n' def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class AltDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: RobertaSeriesModelWithTransformation, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection=None, requires_safety_checker: bool=True): super().__init__() if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'clip_sample') and scheduler.config.clip_sample is True: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('clip_sample not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['clip_sample'] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Alt Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None: output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True (image_embeds, negative_image_embeds) = self.encode_image(ip_adapter_image, device, num_images_per_prompt, output_hidden_state) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None else None timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return AltDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, XLMRobertaTokenizer from ....configuration_utils import FrozenDict from ....image_processor import PipelineImageInput, VaeImageProcessor from ....loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ....models.lora import adjust_lora_scale_text_encoder from ....schedulers import KarrasDiffusionSchedulers from ....utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .modeling_roberta_series import RobertaSeriesModelWithTransformation from .pipeline_output import AltDiffusionPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import requests\n >>> import torch\n >>> from PIL import Image\n >>> from io import BytesIO\n\n >>> from diffusers import AltDiffusionImg2ImgPipeline\n\n >>> device = "cuda"\n >>> model_id_or_path = "BAAI/AltDiffusion-m9"\n >>> pipe = AltDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"\n\n >>> response = requests.get(url)\n >>> init_image = Image.open(BytesIO(response.content)).convert("RGB")\n >>> init_image = init_image.resize((768, 512))\n\n >>> # "A fantasy landscape, trending on artstation"\n >>> prompt = "幻想风景, artstation"\n\n >>> images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images\n >>> images[0].save("幻想风景.png")\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def preprocess(image): deprecation_message = 'The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead' deprecate('preprocess', '1.0.0', deprecation_message, standard_warn=False) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): (w, h) = image[0].size (w, h) = (x - x % 8 for x in (w, h)) image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos']))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class AltDiffusionImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: RobertaSeriesModelWithTransformation, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection=None, requires_safety_checker: bool=True): super().__init__() if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'clip_sample') and scheduler.config.clip_sample is True: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('clip_sample not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['clip_sample'] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Alt Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] return (timesteps, num_inference_steps - t_start) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): init_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: deprecation_message = f'You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, strength: float=0.8, num_inference_steps: Optional[int]=50, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: Optional[float]=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: Optional[float]=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: int=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None: output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True (image_embeds, negative_image_embeds) = self.encode_image(ip_adapter_image, device, num_images_per_prompt, output_hidden_state) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) image = self.image_processor.preprocess(image) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) latents = self.prepare_latents(image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None else None timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return AltDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/deprecated/alt_diffusion/pipeline_output.py from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image from ....utils import BaseOutput @dataclass class AltDiffusionPipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] # File: diffusers-main/src/diffusers/pipelines/deprecated/audio_diffusion/__init__.py from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {'mel': ['Mel'], 'pipeline_audio_diffusion': ['AudioDiffusionPipeline']} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .mel import Mel from .pipeline_audio_diffusion import AudioDiffusionPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: diffusers-main/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py import numpy as np from ....configuration_utils import ConfigMixin, register_to_config from ....schedulers.scheduling_utils import SchedulerMixin try: import librosa _librosa_can_be_imported = True _import_error = '' except Exception as e: _librosa_can_be_imported = False _import_error = f'Cannot import librosa because {e}. Make sure to correctly install librosa to be able to install it.' from PIL import Image class Mel(ConfigMixin, SchedulerMixin): config_name = 'mel_config.json' @register_to_config def __init__(self, x_res: int=256, y_res: int=256, sample_rate: int=22050, n_fft: int=2048, hop_length: int=512, top_db: int=80, n_iter: int=32): self.hop_length = hop_length self.sr = sample_rate self.n_fft = n_fft self.top_db = top_db self.n_iter = n_iter self.set_resolution(x_res, y_res) self.audio = None if not _librosa_can_be_imported: raise ValueError(_import_error) def set_resolution(self, x_res: int, y_res: int): self.x_res = x_res self.y_res = y_res self.n_mels = self.y_res self.slice_size = self.x_res * self.hop_length - 1 def load_audio(self, audio_file: str=None, raw_audio: np.ndarray=None): if audio_file is not None: (self.audio, _) = librosa.load(audio_file, mono=True, sr=self.sr) else: self.audio = raw_audio if len(self.audio) < self.x_res * self.hop_length: self.audio = np.concatenate([self.audio, np.zeros((self.x_res * self.hop_length - len(self.audio),))]) def get_number_of_slices(self) -> int: return len(self.audio) // self.slice_size def get_audio_slice(self, slice: int=0) -> np.ndarray: return self.audio[self.slice_size * slice:self.slice_size * (slice + 1)] def get_sample_rate(self) -> int: return self.sr def audio_slice_to_image(self, slice: int) -> Image.Image: S = librosa.feature.melspectrogram(y=self.get_audio_slice(slice), sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_mels=self.n_mels) log_S = librosa.power_to_db(S, ref=np.max, top_db=self.top_db) bytedata = (((log_S + self.top_db) * 255 / self.top_db).clip(0, 255) + 0.5).astype(np.uint8) image = Image.fromarray(bytedata) return image def image_to_audio(self, image: Image.Image) -> np.ndarray: bytedata = np.frombuffer(image.tobytes(), dtype='uint8').reshape((image.height, image.width)) log_S = bytedata.astype('float') * self.top_db / 255 - self.top_db S = librosa.db_to_power(log_S) audio = librosa.feature.inverse.mel_to_audio(S, sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_iter=self.n_iter) return audio # File: diffusers-main/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ....models import AutoencoderKL, UNet2DConditionModel from ....schedulers import DDIMScheduler, DDPMScheduler from ....utils.torch_utils import randn_tensor from ...pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class AudioDiffusionPipeline(DiffusionPipeline): _optional_components = ['vqvae'] def __init__(self, vqvae: AutoencoderKL, unet: UNet2DConditionModel, mel: Mel, scheduler: Union[DDIMScheduler, DDPMScheduler]): super().__init__() self.register_modules(unet=unet, scheduler=scheduler, mel=mel, vqvae=vqvae) def get_default_steps(self) -> int: return 50 if isinstance(self.scheduler, DDIMScheduler) else 1000 @torch.no_grad() def __call__(self, batch_size: int=1, audio_file: str=None, raw_audio: np.ndarray=None, slice: int=0, start_step: int=0, steps: int=None, generator: torch.Generator=None, mask_start_secs: float=0, mask_end_secs: float=0, step_generator: torch.Generator=None, eta: float=0, noise: torch.Tensor=None, encoding: torch.Tensor=None, return_dict=True) -> Union[Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]]]: steps = steps or self.get_default_steps() self.scheduler.set_timesteps(steps) step_generator = step_generator or generator if isinstance(self.unet.config.sample_size, int): self.unet.config.sample_size = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: noise = randn_tensor((batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1]), generator=generator, device=self.device) images = noise mask = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(audio_file, raw_audio) input_image = self.mel.audio_slice_to_image(slice) input_image = np.frombuffer(input_image.tobytes(), dtype='uint8').reshape((input_image.height, input_image.width)) input_image = input_image / 255 * 2 - 1 input_images = torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float).to(self.device) if self.vqvae is not None: input_images = self.vqvae.encode(torch.unsqueeze(input_images, 0)).latent_dist.sample(generator=generator)[0] input_images = self.vqvae.config.scaling_factor * input_images if start_step > 0: images[0, 0] = self.scheduler.add_noise(input_images, noise, self.scheduler.timesteps[start_step - 1]) pixels_per_second = self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length mask_start = int(mask_start_secs * pixels_per_second) mask_end = int(mask_end_secs * pixels_per_second) mask = self.scheduler.add_noise(input_images, noise, torch.tensor(self.scheduler.timesteps[start_step:])) for (step, t) in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])): if isinstance(self.unet, UNet2DConditionModel): model_output = self.unet(images, t, encoding)['sample'] else: model_output = self.unet(images, t)['sample'] if isinstance(self.scheduler, DDIMScheduler): images = self.scheduler.step(model_output=model_output, timestep=t, sample=images, eta=eta, generator=step_generator)['prev_sample'] else: images = self.scheduler.step(model_output=model_output, timestep=t, sample=images, generator=step_generator)['prev_sample'] if mask is not None: if mask_start > 0: images[:, :, :, :mask_start] = mask[:, step, :, :mask_start] if mask_end > 0: images[:, :, :, -mask_end:] = mask[:, step, :, -mask_end:] if self.vqvae is not None: images = 1 / self.vqvae.config.scaling_factor * images images = self.vqvae.decode(images)['sample'] images = (images / 2 + 0.5).clamp(0, 1) images = images.cpu().permute(0, 2, 3, 1).numpy() images = (images * 255).round().astype('uint8') images = list((Image.fromarray(_[:, :, 0]) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_, mode='RGB').convert('L') for _ in images)) audios = [self.mel.image_to_audio(_) for _ in images] if not return_dict: return (images, (self.mel.get_sample_rate(), audios)) return BaseOutput(**AudioPipelineOutput(np.array(audios)[:, np.newaxis, :]), **ImagePipelineOutput(images)) @torch.no_grad() def encode(self, images: List[Image.Image], steps: int=50) -> np.ndarray: assert isinstance(self.scheduler, DDIMScheduler) self.scheduler.set_timesteps(steps) sample = np.array([np.frombuffer(image.tobytes(), dtype='uint8').reshape((1, image.height, image.width)) for image in images]) sample = sample / 255 * 2 - 1 sample = torch.Tensor(sample).to(self.device) for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,))): prev_timestep = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps alpha_prod_t = self.scheduler.alphas_cumprod[t] alpha_prod_t_prev = self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t model_output = self.unet(sample, t)['sample'] pred_sample_direction = (1 - alpha_prod_t_prev) ** 0.5 * model_output sample = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) sample = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def slerp(x0: torch.Tensor, x1: torch.Tensor, alpha: float) -> torch.Tensor: theta = acos(torch.dot(torch.flatten(x0), torch.flatten(x1)) / torch.norm(x0) / torch.norm(x1)) return sin((1 - alpha) * theta) * x0 / sin(theta) + sin(alpha * theta) * x1 / sin(theta) # File: diffusers-main/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/__init__.py from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {'pipeline_latent_diffusion_uncond': ['LDMPipeline']} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_latent_diffusion_uncond import LDMPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: diffusers-main/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py import inspect from typing import List, Optional, Tuple, Union import torch from ....models import UNet2DModel, VQModel from ....schedulers import DDIMScheduler from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput class LDMPipeline(DiffusionPipeline): def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler): super().__init__() self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) @torch.no_grad() def __call__(self, batch_size: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, eta: float=0.0, num_inference_steps: int=50, output_type: Optional[str]='pil', return_dict: bool=True, **kwargs) -> Union[Tuple, ImagePipelineOutput]: latents = randn_tensor((batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=generator) latents = latents.to(self.device) latents = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(num_inference_steps) accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_kwargs = {} if accepts_eta: extra_kwargs['eta'] = eta for t in self.progress_bar(self.scheduler.timesteps): latent_model_input = self.scheduler.scale_model_input(latents, t) noise_prediction = self.unet(latent_model_input, t).sample latents = self.scheduler.step(noise_prediction, t, latents, **extra_kwargs).prev_sample latents = latents / self.vqvae.config.scaling_factor image = self.vqvae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/deprecated/pndm/__init__.py from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {'pipeline_pndm': ['PNDMPipeline']} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_pndm import PNDMPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: diffusers-main/src/diffusers/pipelines/deprecated/pndm/pipeline_pndm.py from typing import List, Optional, Tuple, Union import torch from ....models import UNet2DModel from ....schedulers import PNDMScheduler from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput class PNDMPipeline(DiffusionPipeline): unet: UNet2DModel scheduler: PNDMScheduler def __init__(self, unet: UNet2DModel, scheduler: PNDMScheduler): super().__init__() scheduler = PNDMScheduler.from_config(scheduler.config) self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__(self, batch_size: int=1, num_inference_steps: int=50, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, output_type: Optional[str]='pil', return_dict: bool=True, **kwargs) -> Union[ImagePipelineOutput, Tuple]: image = randn_tensor((batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=generator, device=self.device) self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): model_output = self.unet(image, t).sample image = self.scheduler.step(model_output, t, image).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/deprecated/repaint/__init__.py from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {'pipeline_repaint': ['RePaintPipeline']} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_repaint import RePaintPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: diffusers-main/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py from typing import List, Optional, Tuple, Union import numpy as np import PIL.Image import torch from ....models import UNet2DModel from ....schedulers import RePaintScheduler from ....utils import PIL_INTERPOLATION, deprecate, logging from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) def _preprocess_image(image: Union[List, PIL.Image.Image, torch.Tensor]): deprecation_message = 'The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead' deprecate('preprocess', '1.0.0', deprecation_message, standard_warn=False) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): (w, h) = image[0].size (w, h) = (x - x % 8 for x in (w, h)) image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos']))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image def _preprocess_mask(mask: Union[List, PIL.Image.Image, torch.Tensor]): if isinstance(mask, torch.Tensor): return mask elif isinstance(mask, PIL.Image.Image): mask = [mask] if isinstance(mask[0], PIL.Image.Image): (w, h) = mask[0].size (w, h) = (x - x % 32 for x in (w, h)) mask = [np.array(m.convert('L').resize((w, h), resample=PIL_INTERPOLATION['nearest']))[None, :] for m in mask] mask = np.concatenate(mask, axis=0) mask = mask.astype(np.float32) / 255.0 mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) elif isinstance(mask[0], torch.Tensor): mask = torch.cat(mask, dim=0) return mask class RePaintPipeline(DiffusionPipeline): unet: UNet2DModel scheduler: RePaintScheduler model_cpu_offload_seq = 'unet' def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__(self, image: Union[torch.Tensor, PIL.Image.Image], mask_image: Union[torch.Tensor, PIL.Image.Image], num_inference_steps: int=250, eta: float=0.0, jump_length: int=10, jump_n_sample: int=10, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, output_type: Optional[str]='pil', return_dict: bool=True) -> Union[ImagePipelineOutput, Tuple]: original_image = image original_image = _preprocess_image(original_image) original_image = original_image.to(device=self._execution_device, dtype=self.unet.dtype) mask_image = _preprocess_mask(mask_image) mask_image = mask_image.to(device=self._execution_device, dtype=self.unet.dtype) batch_size = original_image.shape[0] if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') image_shape = original_image.shape image = randn_tensor(image_shape, generator=generator, device=self._execution_device, dtype=self.unet.dtype) self.scheduler.set_timesteps(num_inference_steps, jump_length, jump_n_sample, self._execution_device) self.scheduler.eta = eta t_last = self.scheduler.timesteps[0] + 1 generator = generator[0] if isinstance(generator, list) else generator for (i, t) in enumerate(self.progress_bar(self.scheduler.timesteps)): if t < t_last: model_output = self.unet(image, t).sample image = self.scheduler.step(model_output, t, image, original_image, mask_image, generator).prev_sample else: image = self.scheduler.undo_step(image, t_last, generator) t_last = t image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {'pipeline_score_sde_ve': ['ScoreSdeVePipeline']} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_score_sde_ve import ScoreSdeVePipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: diffusers-main/src/diffusers/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py from typing import List, Optional, Tuple, Union import torch from ....models import UNet2DModel from ....schedulers import ScoreSdeVeScheduler from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput class ScoreSdeVePipeline(DiffusionPipeline): unet: UNet2DModel scheduler: ScoreSdeVeScheduler def __init__(self, unet: UNet2DModel, scheduler: ScoreSdeVeScheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__(self, batch_size: int=1, num_inference_steps: int=2000, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, output_type: Optional[str]='pil', return_dict: bool=True, **kwargs) -> Union[ImagePipelineOutput, Tuple]: img_size = self.unet.config.sample_size shape = (batch_size, 3, img_size, img_size) model = self.unet sample = randn_tensor(shape, generator=generator) * self.scheduler.init_noise_sigma sample = sample.to(self.device) self.scheduler.set_timesteps(num_inference_steps) self.scheduler.set_sigmas(num_inference_steps) for (i, t) in enumerate(self.progress_bar(self.scheduler.timesteps)): sigma_t = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device) for _ in range(self.scheduler.config.correct_steps): model_output = self.unet(sample, sigma_t).sample sample = self.scheduler.step_correct(model_output, sample, generator=generator).prev_sample model_output = model(sample, sigma_t).sample output = self.scheduler.step_pred(model_output, t, sample, generator=generator) (sample, sample_mean) = (output.prev_sample, output.prev_sample_mean) sample = sample_mean.clamp(0, 1) sample = sample.cpu().permute(0, 2, 3, 1).numpy() if output_type == 'pil': sample = self.numpy_to_pil(sample) if not return_dict: return (sample,) return ImagePipelineOutput(images=sample) # File: diffusers-main/src/diffusers/pipelines/deprecated/spectrogram_diffusion/__init__.py from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, is_note_seq_available, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, get_objects_from_module _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['continous_encoder'] = ['SpectrogramContEncoder'] _import_structure['notes_encoder'] = ['SpectrogramNotesEncoder'] _import_structure['pipeline_spectrogram_diffusion'] = ['SpectrogramContEncoder', 'SpectrogramDiffusionPipeline', 'T5FilmDecoder'] try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils import dummy_transformers_and_torch_and_note_seq_objects _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) else: _import_structure['midi_utils'] = ['MidiProcessor'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_spectrogram_diffusion import SpectrogramDiffusionPipeline from .pipeline_spectrogram_diffusion import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import SpectrogramNotesEncoder from .pipeline_spectrogram_diffusion import T5FilmDecoder try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_transformers_and_torch_and_note_seq_objects import * else: from .midi_utils import MidiProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.t5.modeling_t5 import T5Block, T5Config, T5LayerNorm from ....configuration_utils import ConfigMixin, register_to_config from ....models import ModelMixin class SpectrogramContEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): @register_to_config def __init__(self, input_dims: int, targets_context_length: int, d_model: int, dropout_rate: float, num_layers: int, num_heads: int, d_kv: int, d_ff: int, feed_forward_proj: str, is_decoder: bool=False): super().__init__() self.input_proj = nn.Linear(input_dims, d_model, bias=False) self.position_encoding = nn.Embedding(targets_context_length, d_model) self.position_encoding.weight.requires_grad = False self.dropout_pre = nn.Dropout(p=dropout_rate) t5config = T5Config(d_model=d_model, num_heads=num_heads, d_kv=d_kv, d_ff=d_ff, feed_forward_proj=feed_forward_proj, dropout_rate=dropout_rate, is_decoder=is_decoder, is_encoder_decoder=False) self.encoders = nn.ModuleList() for lyr_num in range(num_layers): lyr = T5Block(t5config) self.encoders.append(lyr) self.layer_norm = T5LayerNorm(d_model) self.dropout_post = nn.Dropout(p=dropout_rate) def forward(self, encoder_inputs, encoder_inputs_mask): x = self.input_proj(encoder_inputs) max_positions = encoder_inputs.shape[1] input_positions = torch.arange(max_positions, device=encoder_inputs.device) seq_lens = encoder_inputs_mask.sum(-1) input_positions = torch.roll(input_positions.unsqueeze(0), tuple(seq_lens.tolist()), dims=0) x += self.position_encoding(input_positions) x = self.dropout_pre(x) input_shape = encoder_inputs.size() extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape) for lyr in self.encoders: x = lyr(x, extended_attention_mask)[0] x = self.layer_norm(x) return (self.dropout_post(x), encoder_inputs_mask) # File: diffusers-main/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py import dataclasses import math import os from typing import Any, Callable, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union import numpy as np import torch import torch.nn.functional as F from ....utils import is_note_seq_available from .pipeline_spectrogram_diffusion import TARGET_FEATURE_LENGTH if is_note_seq_available(): import note_seq else: raise ImportError('Please install note-seq via `pip install note-seq`') INPUT_FEATURE_LENGTH = 2048 SAMPLE_RATE = 16000 HOP_SIZE = 320 FRAME_RATE = int(SAMPLE_RATE // HOP_SIZE) DEFAULT_STEPS_PER_SECOND = 100 DEFAULT_MAX_SHIFT_SECONDS = 10 DEFAULT_NUM_VELOCITY_BINS = 1 SLAKH_CLASS_PROGRAMS = {'Acoustic Piano': 0, 'Electric Piano': 4, 'Chromatic Percussion': 8, 'Organ': 16, 'Acoustic Guitar': 24, 'Clean Electric Guitar': 26, 'Distorted Electric Guitar': 29, 'Acoustic Bass': 32, 'Electric Bass': 33, 'Violin': 40, 'Viola': 41, 'Cello': 42, 'Contrabass': 43, 'Orchestral Harp': 46, 'Timpani': 47, 'String Ensemble': 48, 'Synth Strings': 50, 'Choir and Voice': 52, 'Orchestral Hit': 55, 'Trumpet': 56, 'Trombone': 57, 'Tuba': 58, 'French Horn': 60, 'Brass Section': 61, 'Soprano/Alto Sax': 64, 'Tenor Sax': 66, 'Baritone Sax': 67, 'Oboe': 68, 'English Horn': 69, 'Bassoon': 70, 'Clarinet': 71, 'Pipe': 73, 'Synth Lead': 80, 'Synth Pad': 88} @dataclasses.dataclass class NoteRepresentationConfig: onsets_only: bool include_ties: bool @dataclasses.dataclass class NoteEventData: pitch: int velocity: Optional[int] = None program: Optional[int] = None is_drum: Optional[bool] = None instrument: Optional[int] = None @dataclasses.dataclass class NoteEncodingState: active_pitches: MutableMapping[Tuple[int, int], int] = dataclasses.field(default_factory=dict) @dataclasses.dataclass class EventRange: type: str min_value: int max_value: int @dataclasses.dataclass class Event: type: str value: int class Tokenizer: def __init__(self, regular_ids: int): self._num_special_tokens = 3 self._num_regular_tokens = regular_ids def encode(self, token_ids): encoded = [] for token_id in token_ids: if not 0 <= token_id < self._num_regular_tokens: raise ValueError(f'token_id {token_id} does not fall within valid range of [0, {self._num_regular_tokens})') encoded.append(token_id + self._num_special_tokens) encoded.append(1) encoded = encoded + [0] * (INPUT_FEATURE_LENGTH - len(encoded)) return encoded class Codec: def __init__(self, max_shift_steps: int, steps_per_second: float, event_ranges: List[EventRange]): self.steps_per_second = steps_per_second self._shift_range = EventRange(type='shift', min_value=0, max_value=max_shift_steps) self._event_ranges = [self._shift_range] + event_ranges assert len(self._event_ranges) == len({er.type for er in self._event_ranges}) @property def num_classes(self) -> int: return sum((er.max_value - er.min_value + 1 for er in self._event_ranges)) def is_shift_event_index(self, index: int) -> bool: return self._shift_range.min_value <= index and index <= self._shift_range.max_value @property def max_shift_steps(self) -> int: return self._shift_range.max_value def encode_event(self, event: Event) -> int: offset = 0 for er in self._event_ranges: if event.type == er.type: if not er.min_value <= event.value <= er.max_value: raise ValueError(f'Event value {event.value} is not within valid range [{er.min_value}, {er.max_value}] for type {event.type}') return offset + event.value - er.min_value offset += er.max_value - er.min_value + 1 raise ValueError(f'Unknown event type: {event.type}') def event_type_range(self, event_type: str) -> Tuple[int, int]: offset = 0 for er in self._event_ranges: if event_type == er.type: return (offset, offset + (er.max_value - er.min_value)) offset += er.max_value - er.min_value + 1 raise ValueError(f'Unknown event type: {event_type}') def decode_event_index(self, index: int) -> Event: offset = 0 for er in self._event_ranges: if offset <= index <= offset + er.max_value - er.min_value: return Event(type=er.type, value=er.min_value + index - offset) offset += er.max_value - er.min_value + 1 raise ValueError(f'Unknown event index: {index}') @dataclasses.dataclass class ProgramGranularity: tokens_map_fn: Callable[[Sequence[int], Codec], Sequence[int]] program_map_fn: Callable[[int], int] def drop_programs(tokens, codec: Codec): (min_program_id, max_program_id) = codec.event_type_range('program') return tokens[(tokens < min_program_id) | (tokens > max_program_id)] def programs_to_midi_classes(tokens, codec): (min_program_id, max_program_id) = codec.event_type_range('program') is_program = (tokens >= min_program_id) & (tokens <= max_program_id) return np.where(is_program, min_program_id + 8 * ((tokens - min_program_id) // 8), tokens) PROGRAM_GRANULARITIES = {'flat': ProgramGranularity(tokens_map_fn=drop_programs, program_map_fn=lambda program: 0), 'midi_class': ProgramGranularity(tokens_map_fn=programs_to_midi_classes, program_map_fn=lambda program: 8 * (program // 8)), 'full': ProgramGranularity(tokens_map_fn=lambda tokens, codec: tokens, program_map_fn=lambda program: program)} def frame(signal, frame_length, frame_step, pad_end=False, pad_value=0, axis=-1): signal_length = signal.shape[axis] if pad_end: frames_overlap = frame_length - frame_step rest_samples = np.abs(signal_length - frames_overlap) % np.abs(frame_length - frames_overlap) pad_size = int(frame_length - rest_samples) if pad_size != 0: pad_axis = [0] * signal.ndim pad_axis[axis] = pad_size signal = F.pad(signal, pad_axis, 'constant', pad_value) frames = signal.unfold(axis, frame_length, frame_step) return frames def program_to_slakh_program(program): for slakh_program in sorted(SLAKH_CLASS_PROGRAMS.values(), reverse=True): if program >= slakh_program: return slakh_program def audio_to_frames(samples, hop_size: int, frame_rate: int) -> Tuple[Sequence[Sequence[int]], torch.Tensor]: frame_size = hop_size samples = np.pad(samples, [0, frame_size - len(samples) % frame_size], mode='constant') frames = frame(torch.Tensor(samples).unsqueeze(0), frame_length=frame_size, frame_step=frame_size, pad_end=False) num_frames = len(samples) // frame_size times = np.arange(num_frames) / frame_rate return (frames, times) def note_sequence_to_onsets_and_offsets_and_programs(ns: note_seq.NoteSequence) -> Tuple[Sequence[float], Sequence[NoteEventData]]: notes = sorted(ns.notes, key=lambda note: (note.is_drum, note.program, note.pitch)) times = [note.end_time for note in notes if not note.is_drum] + [note.start_time for note in notes] values = [NoteEventData(pitch=note.pitch, velocity=0, program=note.program, is_drum=False) for note in notes if not note.is_drum] + [NoteEventData(pitch=note.pitch, velocity=note.velocity, program=note.program, is_drum=note.is_drum) for note in notes] return (times, values) def num_velocity_bins_from_codec(codec: Codec): (lo, hi) = codec.event_type_range('velocity') return hi - lo def segment(a, n): return [a[i:i + n] for i in range(0, len(a), n)] def velocity_to_bin(velocity, num_velocity_bins): if velocity == 0: return 0 else: return math.ceil(num_velocity_bins * velocity / note_seq.MAX_MIDI_VELOCITY) def note_event_data_to_events(state: Optional[NoteEncodingState], value: NoteEventData, codec: Codec) -> Sequence[Event]: if value.velocity is None: return [Event('pitch', value.pitch)] else: num_velocity_bins = num_velocity_bins_from_codec(codec) velocity_bin = velocity_to_bin(value.velocity, num_velocity_bins) if value.program is None: if state is not None: state.active_pitches[value.pitch, 0] = velocity_bin return [Event('velocity', velocity_bin), Event('pitch', value.pitch)] elif value.is_drum: return [Event('velocity', velocity_bin), Event('drum', value.pitch)] else: if state is not None: state.active_pitches[value.pitch, value.program] = velocity_bin return [Event('program', value.program), Event('velocity', velocity_bin), Event('pitch', value.pitch)] def note_encoding_state_to_events(state: NoteEncodingState) -> Sequence[Event]: events = [] for (pitch, program) in sorted(state.active_pitches.keys(), key=lambda k: k[::-1]): if state.active_pitches[pitch, program]: events += [Event('program', program), Event('pitch', pitch)] events.append(Event('tie', 0)) return events def encode_and_index_events(state, event_times, event_values, codec, frame_times, encode_event_fn, encoding_state_to_events_fn=None): indices = np.argsort(event_times, kind='stable') event_steps = [round(event_times[i] * codec.steps_per_second) for i in indices] event_values = [event_values[i] for i in indices] events = [] state_events = [] event_start_indices = [] state_event_indices = [] cur_step = 0 cur_event_idx = 0 cur_state_event_idx = 0 def fill_event_start_indices_to_cur_step(): while len(event_start_indices) < len(frame_times) and frame_times[len(event_start_indices)] < cur_step / codec.steps_per_second: event_start_indices.append(cur_event_idx) state_event_indices.append(cur_state_event_idx) for (event_step, event_value) in zip(event_steps, event_values): while event_step > cur_step: events.append(codec.encode_event(Event(type='shift', value=1))) cur_step += 1 fill_event_start_indices_to_cur_step() cur_event_idx = len(events) cur_state_event_idx = len(state_events) if encoding_state_to_events_fn: for e in encoding_state_to_events_fn(state): state_events.append(codec.encode_event(e)) for e in encode_event_fn(state, event_value, codec): events.append(codec.encode_event(e)) while cur_step / codec.steps_per_second <= frame_times[-1]: events.append(codec.encode_event(Event(type='shift', value=1))) cur_step += 1 fill_event_start_indices_to_cur_step() cur_event_idx = len(events) event_end_indices = event_start_indices[1:] + [len(events)] events = np.array(events).astype(np.int32) state_events = np.array(state_events).astype(np.int32) event_start_indices = segment(np.array(event_start_indices).astype(np.int32), TARGET_FEATURE_LENGTH) event_end_indices = segment(np.array(event_end_indices).astype(np.int32), TARGET_FEATURE_LENGTH) state_event_indices = segment(np.array(state_event_indices).astype(np.int32), TARGET_FEATURE_LENGTH) outputs = [] for (start_indices, end_indices, event_indices) in zip(event_start_indices, event_end_indices, state_event_indices): outputs.append({'inputs': events, 'event_start_indices': start_indices, 'event_end_indices': end_indices, 'state_events': state_events, 'state_event_indices': event_indices}) return outputs def extract_sequence_with_indices(features, state_events_end_token=None, feature_key='inputs'): features = features.copy() start_idx = features['event_start_indices'][0] end_idx = features['event_end_indices'][-1] features[feature_key] = features[feature_key][start_idx:end_idx] if state_events_end_token is not None: state_event_start_idx = features['state_event_indices'][0] state_event_end_idx = state_event_start_idx + 1 while features['state_events'][state_event_end_idx - 1] != state_events_end_token: state_event_end_idx += 1 features[feature_key] = np.concatenate([features['state_events'][state_event_start_idx:state_event_end_idx], features[feature_key]], axis=0) return features def map_midi_programs(feature, codec: Codec, granularity_type: str='full', feature_key: str='inputs') -> Mapping[str, Any]: granularity = PROGRAM_GRANULARITIES[granularity_type] feature[feature_key] = granularity.tokens_map_fn(feature[feature_key], codec) return feature def run_length_encode_shifts_fn(features, codec: Codec, feature_key: str='inputs', state_change_event_types: Sequence[str]=()) -> Callable[[Mapping[str, Any]], Mapping[str, Any]]: state_change_event_ranges = [codec.event_type_range(event_type) for event_type in state_change_event_types] def run_length_encode_shifts(features: MutableMapping[str, Any]) -> Mapping[str, Any]: events = features[feature_key] shift_steps = 0 total_shift_steps = 0 output = np.array([], dtype=np.int32) current_state = np.zeros(len(state_change_event_ranges), dtype=np.int32) for event in events: if codec.is_shift_event_index(event): shift_steps += 1 total_shift_steps += 1 else: is_redundant = False for (i, (min_index, max_index)) in enumerate(state_change_event_ranges): if min_index <= event and event <= max_index: if current_state[i] == event: is_redundant = True current_state[i] = event if is_redundant: continue if shift_steps > 0: shift_steps = total_shift_steps while shift_steps > 0: output_steps = np.minimum(codec.max_shift_steps, shift_steps) output = np.concatenate([output, [output_steps]], axis=0) shift_steps -= output_steps output = np.concatenate([output, [event]], axis=0) features[feature_key] = output return features return run_length_encode_shifts(features) def note_representation_processor_chain(features, codec: Codec, note_representation_config: NoteRepresentationConfig): tie_token = codec.encode_event(Event('tie', 0)) state_events_end_token = tie_token if note_representation_config.include_ties else None features = extract_sequence_with_indices(features, state_events_end_token=state_events_end_token, feature_key='inputs') features = map_midi_programs(features, codec) features = run_length_encode_shifts_fn(features, codec, state_change_event_types=['velocity', 'program']) return features class MidiProcessor: def __init__(self): self.codec = Codec(max_shift_steps=DEFAULT_MAX_SHIFT_SECONDS * DEFAULT_STEPS_PER_SECOND, steps_per_second=DEFAULT_STEPS_PER_SECOND, event_ranges=[EventRange('pitch', note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH), EventRange('velocity', 0, DEFAULT_NUM_VELOCITY_BINS), EventRange('tie', 0, 0), EventRange('program', note_seq.MIN_MIDI_PROGRAM, note_seq.MAX_MIDI_PROGRAM), EventRange('drum', note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH)]) self.tokenizer = Tokenizer(self.codec.num_classes) self.note_representation_config = NoteRepresentationConfig(onsets_only=False, include_ties=True) def __call__(self, midi: Union[bytes, os.PathLike, str]): if not isinstance(midi, bytes): with open(midi, 'rb') as f: midi = f.read() ns = note_seq.midi_to_note_sequence(midi) ns_sus = note_seq.apply_sustain_control_changes(ns) for note in ns_sus.notes: if not note.is_drum: note.program = program_to_slakh_program(note.program) samples = np.zeros(int(ns_sus.total_time * SAMPLE_RATE)) (_, frame_times) = audio_to_frames(samples, HOP_SIZE, FRAME_RATE) (times, values) = note_sequence_to_onsets_and_offsets_and_programs(ns_sus) events = encode_and_index_events(state=NoteEncodingState(), event_times=times, event_values=values, frame_times=frame_times, codec=self.codec, encode_event_fn=note_event_data_to_events, encoding_state_to_events_fn=note_encoding_state_to_events) events = [note_representation_processor_chain(event, self.codec, self.note_representation_config) for event in events] input_tokens = [self.tokenizer.encode(event['inputs']) for event in events] return input_tokens # File: diffusers-main/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.t5.modeling_t5 import T5Block, T5Config, T5LayerNorm from ....configuration_utils import ConfigMixin, register_to_config from ....models import ModelMixin class SpectrogramNotesEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): @register_to_config def __init__(self, max_length: int, vocab_size: int, d_model: int, dropout_rate: float, num_layers: int, num_heads: int, d_kv: int, d_ff: int, feed_forward_proj: str, is_decoder: bool=False): super().__init__() self.token_embedder = nn.Embedding(vocab_size, d_model) self.position_encoding = nn.Embedding(max_length, d_model) self.position_encoding.weight.requires_grad = False self.dropout_pre = nn.Dropout(p=dropout_rate) t5config = T5Config(vocab_size=vocab_size, d_model=d_model, num_heads=num_heads, d_kv=d_kv, d_ff=d_ff, dropout_rate=dropout_rate, feed_forward_proj=feed_forward_proj, is_decoder=is_decoder, is_encoder_decoder=False) self.encoders = nn.ModuleList() for lyr_num in range(num_layers): lyr = T5Block(t5config) self.encoders.append(lyr) self.layer_norm = T5LayerNorm(d_model) self.dropout_post = nn.Dropout(p=dropout_rate) def forward(self, encoder_input_tokens, encoder_inputs_mask): x = self.token_embedder(encoder_input_tokens) seq_length = encoder_input_tokens.shape[1] inputs_positions = torch.arange(seq_length, device=encoder_input_tokens.device) x += self.position_encoding(inputs_positions) x = self.dropout_pre(x) input_shape = encoder_input_tokens.size() extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape) for lyr in self.encoders: x = lyr(x, extended_attention_mask)[0] x = self.layer_norm(x) return (self.dropout_post(x), encoder_inputs_mask) # File: diffusers-main/src/diffusers/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ....models import T5FilmDecoder from ....schedulers import DDPMScheduler from ....utils import is_onnx_available, logging from ....utils.torch_utils import randn_tensor if is_onnx_available(): from ...onnx_utils import OnnxRuntimeModel from ...pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continuous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder logger = logging.get_logger(__name__) TARGET_FEATURE_LENGTH = 256 class SpectrogramDiffusionPipeline(DiffusionPipeline): _optional_components = ['melgan'] def __init__(self, notes_encoder: SpectrogramNotesEncoder, continuous_encoder: SpectrogramContEncoder, decoder: T5FilmDecoder, scheduler: DDPMScheduler, melgan: OnnxRuntimeModel if is_onnx_available() else Any) -> None: super().__init__() self.min_value = math.log(1e-05) self.max_value = 4.0 self.n_dims = 128 self.register_modules(notes_encoder=notes_encoder, continuous_encoder=continuous_encoder, decoder=decoder, scheduler=scheduler, melgan=melgan) def scale_features(self, features, output_range=(-1.0, 1.0), clip=False): (min_out, max_out) = output_range if clip: features = torch.clip(features, self.min_value, self.max_value) zero_one = (features - self.min_value) / (self.max_value - self.min_value) return zero_one * (max_out - min_out) + min_out def scale_to_features(self, outputs, input_range=(-1.0, 1.0), clip=False): (min_out, max_out) = input_range outputs = torch.clip(outputs, min_out, max_out) if clip else outputs zero_one = (outputs - min_out) / (max_out - min_out) return zero_one * (self.max_value - self.min_value) + self.min_value def encode(self, input_tokens, continuous_inputs, continuous_mask): tokens_mask = input_tokens > 0 (tokens_encoded, tokens_mask) = self.notes_encoder(encoder_input_tokens=input_tokens, encoder_inputs_mask=tokens_mask) (continuous_encoded, continuous_mask) = self.continuous_encoder(encoder_inputs=continuous_inputs, encoder_inputs_mask=continuous_mask) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def decode(self, encodings_and_masks, input_tokens, noise_time): timesteps = noise_time if not torch.is_tensor(timesteps): timesteps = torch.tensor([timesteps], dtype=torch.long, device=input_tokens.device) elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: timesteps = timesteps[None].to(input_tokens.device) timesteps = timesteps * torch.ones(input_tokens.shape[0], dtype=timesteps.dtype, device=timesteps.device) logits = self.decoder(encodings_and_masks=encodings_and_masks, decoder_input_tokens=input_tokens, decoder_noise_time=timesteps) return logits @torch.no_grad() def __call__(self, input_tokens: List[List[int]], generator: Optional[torch.Generator]=None, num_inference_steps: int=100, return_dict: bool=True, output_type: str='np', callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1) -> Union[AudioPipelineOutput, Tuple]: if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') '' pred_mel = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims], dtype=np.float32) full_pred_mel = np.zeros([1, 0, self.n_dims], np.float32) ones = torch.ones((1, TARGET_FEATURE_LENGTH), dtype=bool, device=self.device) for (i, encoder_input_tokens) in enumerate(input_tokens): if i == 0: encoder_continuous_inputs = torch.from_numpy(pred_mel[:1].copy()).to(device=self.device, dtype=self.decoder.dtype) encoder_continuous_mask = torch.zeros((1, TARGET_FEATURE_LENGTH), dtype=bool, device=self.device) else: encoder_continuous_mask = ones encoder_continuous_inputs = self.scale_features(encoder_continuous_inputs, output_range=[-1.0, 1.0], clip=True) encodings_and_masks = self.encode(input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device), continuous_inputs=encoder_continuous_inputs, continuous_mask=encoder_continuous_mask) x = randn_tensor(shape=encoder_continuous_inputs.shape, generator=generator, device=self.device, dtype=self.decoder.dtype) self.scheduler.set_timesteps(num_inference_steps) for (j, t) in enumerate(self.progress_bar(self.scheduler.timesteps)): output = self.decode(encodings_and_masks=encodings_and_masks, input_tokens=x, noise_time=t / self.scheduler.config.num_train_timesteps) x = self.scheduler.step(output, t, x, generator=generator).prev_sample mel = self.scale_to_features(x, input_range=[-1.0, 1.0]) encoder_continuous_inputs = mel[:1] pred_mel = mel.cpu().float().numpy() full_pred_mel = np.concatenate([full_pred_mel, pred_mel[:1]], axis=1) if callback is not None and i % callback_steps == 0: callback(i, full_pred_mel) logger.info('Generated segment', i) if output_type == 'np' and (not is_onnx_available()): raise ValueError("Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.") elif output_type == 'np' and self.melgan is None: raise ValueError("Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.") if output_type == 'np': output = self.melgan(input_features=full_pred_mel.astype(np.float32)) else: output = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=output) # File: diffusers-main/src/diffusers/pipelines/deprecated/stable_diffusion_variants/__init__.py from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_cycle_diffusion'] = ['CycleDiffusionPipeline'] _import_structure['pipeline_stable_diffusion_inpaint_legacy'] = ['StableDiffusionInpaintPipelineLegacy'] _import_structure['pipeline_stable_diffusion_model_editing'] = ['StableDiffusionModelEditingPipeline'] _import_structure['pipeline_stable_diffusion_paradigms'] = ['StableDiffusionParadigmsPipeline'] _import_structure['pipeline_stable_diffusion_pix2pix_zero'] = ['StableDiffusionPix2PixZeroPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_pix2pix_zero import StableDiffusionPix2PixZeroPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ....configuration_utils import FrozenDict from ....image_processor import PipelineImageInput, VaeImageProcessor from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ....models import AutoencoderKL, UNet2DConditionModel from ....models.lora import adjust_lora_scale_text_encoder from ....schedulers import DDIMScheduler from ....utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) def preprocess(image): deprecation_message = 'The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead' deprecate('preprocess', '1.0.0', deprecation_message, standard_warn=False) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): (w, h) = image[0].size (w, h) = (x - x % 8 for x in (w, h)) image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos']))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def posterior_sample(scheduler, latents, timestep, clean_latents, generator, eta): prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps if prev_timestep <= 0: return clean_latents alpha_prod_t = scheduler.alphas_cumprod[timestep] alpha_prod_t_prev = scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod variance = scheduler._get_variance(timestep, prev_timestep) std_dev_t = eta * variance ** 0.5 e_t = (latents - alpha_prod_t ** 0.5 * clean_latents) / (1 - alpha_prod_t) ** 0.5 dir_xt = (1.0 - alpha_prod_t_prev - std_dev_t ** 2) ** 0.5 * e_t noise = std_dev_t * randn_tensor(clean_latents.shape, dtype=clean_latents.dtype, device=clean_latents.device, generator=generator) prev_latents = alpha_prod_t_prev ** 0.5 * clean_latents + dir_xt + noise return prev_latents def compute_noise(scheduler, prev_latents, latents, timestep, noise_pred, eta): prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps alpha_prod_t = scheduler.alphas_cumprod[timestep] alpha_prod_t_prev = scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t pred_original_sample = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 if scheduler.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -1, 1) variance = scheduler._get_variance(timestep, prev_timestep) std_dev_t = eta * variance ** 0.5 pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t ** 2) ** 0.5 * noise_pred noise = (prev_latents - (alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction)) / (variance ** 0.5 * eta) return noise class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: DDIMScheduler, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def check_inputs(self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): image = image.to(device=device, dtype=dtype) batch_size = image.shape[0] if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if isinstance(generator, list): init_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(image.shape[0])] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: deprecation_message = f'You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt * num_images_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) clean_latents = init_latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return (latents, clean_latents) @torch.no_grad() def __call__(self, prompt: Union[str, List[str]], source_prompt: Union[str, List[str]], image: PipelineImageInput=None, strength: float=0.8, num_inference_steps: Optional[int]=50, guidance_scale: Optional[float]=7.5, source_guidance_scale: Optional[float]=1, num_images_per_prompt: Optional[int]=1, eta: Optional[float]=0.1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None): self.check_inputs(prompt, strength, callback_steps) batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None prompt_embeds_tuple = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, prompt_embeds=prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip) source_prompt_embeds_tuple = self.encode_prompt(source_prompt, device, num_images_per_prompt, do_classifier_free_guidance, None, clip_skip=clip_skip) if prompt_embeds_tuple[1] is not None: prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) else: prompt_embeds = prompt_embeds_tuple[0] if source_prompt_embeds_tuple[1] is not None: source_prompt_embeds = torch.cat([source_prompt_embeds_tuple[1], source_prompt_embeds_tuple[0]]) else: source_prompt_embeds = source_prompt_embeds_tuple[0] image = self.image_processor.preprocess(image) self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) (latents, clean_latents) = self.prepare_latents(image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator) source_latents = latents extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) generator = extra_step_kwargs.pop('generator', None) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents source_latent_model_input = torch.cat([source_latents] * 2) if do_classifier_free_guidance else source_latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) source_latent_model_input = self.scheduler.scale_model_input(source_latent_model_input, t) if do_classifier_free_guidance: concat_latent_model_input = torch.stack([source_latent_model_input[0], latent_model_input[0], source_latent_model_input[1], latent_model_input[1]], dim=0) concat_prompt_embeds = torch.stack([source_prompt_embeds[0], prompt_embeds[0], source_prompt_embeds[1], prompt_embeds[1]], dim=0) else: concat_latent_model_input = torch.cat([source_latent_model_input, latent_model_input], dim=0) concat_prompt_embeds = torch.cat([source_prompt_embeds, prompt_embeds], dim=0) concat_noise_pred = self.unet(concat_latent_model_input, t, cross_attention_kwargs=cross_attention_kwargs, encoder_hidden_states=concat_prompt_embeds).sample if do_classifier_free_guidance: (source_noise_pred_uncond, noise_pred_uncond, source_noise_pred_text, noise_pred_text) = concat_noise_pred.chunk(4, dim=0) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) source_noise_pred = source_noise_pred_uncond + source_guidance_scale * (source_noise_pred_text - source_noise_pred_uncond) else: (source_noise_pred, noise_pred) = concat_noise_pred.chunk(2, dim=0) prev_source_latents = posterior_sample(self.scheduler, source_latents, t, clean_latents, generator=generator, **extra_step_kwargs) noise = compute_noise(self.scheduler, prev_source_latents, source_latents, t, source_noise_pred, **extra_step_kwargs) source_latents = prev_source_latents latents = self.scheduler.step(noise_pred, t, latents, variance_noise=noise, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py import inspect from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTokenizer from ....configuration_utils import FrozenDict from ....schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from ....utils import deprecate, logging from ...onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel from ...pipeline_utils import DiffusionPipeline from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput logger = logging.get_logger(__name__) def preprocess(image): (w, h) = image.size (w, h) = (x - x % 32 for x in (w, h)) image = image.resize((w, h), resample=PIL.Image.LANCZOS) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) return 2.0 * image - 1.0 def preprocess_mask(mask, scale_factor=8): mask = mask.convert('L') (w, h) = mask.size (w, h) = (x - x % 32 for x in (w, h)) mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL.Image.NEAREST) mask = np.array(mask).astype(np.float32) / 255.0 mask = np.tile(mask, (4, 1, 1)) mask = mask[None].transpose(0, 1, 2, 3) mask = 1 - mask return mask class OnnxStableDiffusionInpaintPipelineLegacy(DiffusionPipeline): _optional_components = ['safety_checker', 'feature_extractor'] _is_onnx = True vae_encoder: OnnxRuntimeModel vae_decoder: OnnxRuntimeModel text_encoder: OnnxRuntimeModel tokenizer: CLIPTokenizer unet: OnnxRuntimeModel scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] safety_checker: OnnxRuntimeModel feature_extractor: CLIPImageProcessor def __init__(self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: OnnxRuntimeModel, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'clip_sample') and scheduler.config.clip_sample is True: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('clip_sample not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['clip_sample'] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt: Union[str, List[str]], num_images_per_prompt: Optional[int], do_classifier_free_guidance: bool, negative_prompt: Optional[str], prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='np') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='max_length', return_tensors='np').input_ids if not np.array_equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] * batch_size elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='np') negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] if do_classifier_free_guidance: negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def check_inputs(self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def __call__(self, prompt: Union[str, List[str]], image: Union[np.ndarray, PIL.Image.Image]=None, mask_image: Union[np.ndarray, PIL.Image.Image]=None, strength: float=0.8, num_inference_steps: Optional[int]=50, guidance_scale: Optional[float]=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: Optional[float]=0.0, generator: Optional[np.random.RandomState]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, np.ndarray], None]]=None, callback_steps: int=1): self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if generator is None: generator = np.random self.scheduler.set_timesteps(num_inference_steps) if isinstance(image, PIL.Image.Image): image = preprocess(image) do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) latents_dtype = prompt_embeds.dtype image = image.astype(latents_dtype) init_latents = self.vae_encoder(sample=image)[0] init_latents = 0.18215 * init_latents init_latents = np.concatenate([init_latents] * num_images_per_prompt, axis=0) init_latents_orig = init_latents if not isinstance(mask_image, np.ndarray): mask_image = preprocess_mask(mask_image, 8) mask_image = mask_image.astype(latents_dtype) mask = np.concatenate([mask_image] * num_images_per_prompt, axis=0) if not mask.shape == init_latents.shape: raise ValueError('The mask and image should be the same size!') offset = self.scheduler.config.get('steps_offset', 0) init_timestep = int(num_inference_steps * strength) + offset init_timestep = min(init_timestep, num_inference_steps) timesteps = self.scheduler.timesteps.numpy()[-init_timestep] timesteps = np.array([timesteps] * batch_size * num_images_per_prompt) noise = generator.randn(*init_latents.shape).astype(latents_dtype) init_latents = self.scheduler.add_noise(torch.from_numpy(init_latents), torch.from_numpy(noise), torch.from_numpy(timesteps)) init_latents = init_latents.numpy() accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta latents = init_latents t_start = max(num_inference_steps - init_timestep + offset, 0) timesteps = self.scheduler.timesteps[t_start:].numpy() timestep_dtype = next((input.type for input in self.unet.model.get_inputs() if input.name == 'timestep'), 'tensor(float)') timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) timestep = np.array([t], dtype=timestep_dtype) noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs).prev_sample latents = latents.numpy() init_latents_proper = self.scheduler.add_noise(torch.from_numpy(init_latents_orig), torch.from_numpy(noise), torch.from_numpy(np.array([t]))) init_latents_proper = init_latents_proper.numpy() latents = init_latents_proper * mask + latents * (1 - mask) if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) latents = 1 / 0.18215 * latents image = np.concatenate([self.vae_decoder(latent_sample=latents[i:i + 1])[0] for i in range(latents.shape[0])]) image = np.clip(image / 2 + 0.5, 0, 1) image = image.transpose((0, 2, 3, 1)) if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='np').pixel_values.astype(image.dtype) (images, has_nsfw_concept) = ([], []) for i in range(image.shape[0]): (image_i, has_nsfw_concept_i) = self.safety_checker(clip_input=safety_checker_input[i:i + 1], images=image[i:i + 1]) images.append(image_i) has_nsfw_concept.append(has_nsfw_concept_i[0]) image = np.concatenate(images) else: has_nsfw_concept = None if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ....configuration_utils import FrozenDict from ....image_processor import VaeImageProcessor from ....loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ....models import AutoencoderKL, UNet2DConditionModel from ....models.lora import adjust_lora_scale_text_encoder from ....schedulers import KarrasDiffusionSchedulers from ....utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline from ...stable_diffusion import StableDiffusionPipelineOutput from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) def preprocess_image(image, batch_size): (w, h) = image.size (w, h) = (x - x % 8 for x in (w, h)) image = image.resize((w, h), resample=PIL_INTERPOLATION['lanczos']) image = np.array(image).astype(np.float32) / 255.0 image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) image = torch.from_numpy(image) return 2.0 * image - 1.0 def preprocess_mask(mask, batch_size, scale_factor=8): if not isinstance(mask, torch.Tensor): mask = mask.convert('L') (w, h) = mask.size (w, h) = (x - x % 8 for x in (w, h)) mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION['nearest']) mask = np.array(mask).astype(np.float32) / 255.0 mask = np.tile(mask, (4, 1, 1)) mask = np.vstack([mask[None]] * batch_size) mask = 1 - mask mask = torch.from_numpy(mask) return mask else: valid_mask_channel_sizes = [1, 3] if mask.shape[3] in valid_mask_channel_sizes: mask = mask.permute(0, 3, 1, 2) elif mask.shape[1] not in valid_mask_channel_sizes: raise ValueError(f'Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension, but received mask of shape {tuple(mask.shape)}') mask = mask.mean(dim=1, keepdim=True) (h, w) = mask.shape[-2:] (h, w) = (x - x % 8 for x in (h, w)) mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor)) return mask class StableDiffusionInpaintPipelineLegacy(DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['feature_extractor'] _exclude_from_cpu_offload = ['safety_checker'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() deprecation_message = f'The class {self.__class__} is deprecated and will be removed in v1.0.0. You can achieve exactly the same functionalityby loading your model into `StableDiffusionInpaintPipeline` instead. See https://github.com/huggingface/diffusers/pull/3533for more information.' deprecate('legacy is outdated', '1.0.0', deprecation_message, standard_warn=False) if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'clip_sample') and scheduler.config.clip_sample is True: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('clip_sample not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['clip_sample'] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] return (timesteps, num_inference_steps - t_start) def prepare_latents(self, image, timestep, num_images_per_prompt, dtype, device, generator): image = image.to(device=device, dtype=dtype) init_latent_dist = self.vae.encode(image).latent_dist init_latents = init_latent_dist.sample(generator=generator) init_latents = self.vae.config.scaling_factor * init_latents init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0) init_latents_orig = init_latents noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return (latents, init_latents_orig, noise) @torch.no_grad() def __call__(self, prompt: Union[str, List[str]]=None, image: Union[torch.Tensor, PIL.Image.Image]=None, mask_image: Union[torch.Tensor, PIL.Image.Image]=None, strength: float=0.8, num_inference_steps: Optional[int]=50, guidance_scale: Optional[float]=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, add_predicted_noise: Optional[bool]=False, eta: Optional[float]=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None): self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if not isinstance(image, torch.Tensor): image = preprocess_image(image, batch_size) mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor) self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) (latents, init_latents_orig, noise) = self.prepare_latents(image, latent_timestep, num_images_per_prompt, prompt_embeds.dtype, device, generator) mask = mask_image.to(device=device, dtype=latents.dtype) mask = torch.cat([mask] * num_images_per_prompt) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if add_predicted_noise: init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise_pred_uncond, torch.tensor([t])) else: init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t])) latents = init_latents_proper * mask + latents * (1 - mask) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) latents = init_latents_orig * mask + latents * (1 - mask) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py import copy import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ....image_processor import VaeImageProcessor from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ....models import AutoencoderKL, UNet2DConditionModel from ....models.lora import adjust_lora_scale_text_encoder from ....schedulers import PNDMScheduler from ....schedulers.scheduling_utils import SchedulerMixin from ....utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) AUGS_CONST = ['A photo of ', 'An image of ', 'A picture of '] class StableDiffusionModelEditingPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor'] _exclude_from_cpu_offload = ['safety_checker'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: SchedulerMixin, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True, with_to_k: bool=True, with_augs: list=AUGS_CONST): super().__init__() if isinstance(scheduler, PNDMScheduler): logger.error('PNDMScheduler for this pipeline is currently not supported.') if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) self.with_to_k = with_to_k self.with_augs = with_augs ca_layers = [] def append_ca(net_): if net_.__class__.__name__ == 'CrossAttention': ca_layers.append(net_) elif hasattr(net_, 'children'): for net__ in net_.children(): append_ca(net__) for net in self.unet.named_children(): if 'down' in net[0]: append_ca(net[1]) elif 'up' in net[0]: append_ca(net[1]) elif 'mid' in net[0]: append_ca(net[1]) self.ca_clip_layers = [l for l in ca_layers if l.to_v.in_features == 768] self.projection_matrices = [l.to_v for l in self.ca_clip_layers] self.og_matrices = [copy.deepcopy(l.to_v) for l in self.ca_clip_layers] if self.with_to_k: self.projection_matrices = self.projection_matrices + [l.to_k for l in self.ca_clip_layers] self.og_matrices = self.og_matrices + [copy.deepcopy(l.to_k) for l in self.ca_clip_layers] def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def edit_model(self, source_prompt: str, destination_prompt: str, lamb: float=0.1, restart_params: bool=True): if restart_params: num_ca_clip_layers = len(self.ca_clip_layers) for (idx_, l) in enumerate(self.ca_clip_layers): l.to_v = copy.deepcopy(self.og_matrices[idx_]) self.projection_matrices[idx_] = l.to_v if self.with_to_k: l.to_k = copy.deepcopy(self.og_matrices[num_ca_clip_layers + idx_]) self.projection_matrices[num_ca_clip_layers + idx_] = l.to_k old_texts = [source_prompt] new_texts = [destination_prompt] base = old_texts[0] if old_texts[0][0:1] != 'A' else 'a' + old_texts[0][1:] for aug in self.with_augs: old_texts.append(aug + base) base = new_texts[0] if new_texts[0][0:1] != 'A' else 'a' + new_texts[0][1:] for aug in self.with_augs: new_texts.append(aug + base) (old_embs, new_embs) = ([], []) for (old_text, new_text) in zip(old_texts, new_texts): text_input = self.tokenizer([old_text, new_text], padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] (old_emb, new_emb) = text_embeddings old_embs.append(old_emb) new_embs.append(new_emb) idxs_replaces = [] for (old_text, new_text) in zip(old_texts, new_texts): tokens_a = self.tokenizer(old_text).input_ids tokens_b = self.tokenizer(new_text).input_ids tokens_a = [self.tokenizer.encode('a ')[1] if self.tokenizer.decode(t) == 'an' else t for t in tokens_a] tokens_b = [self.tokenizer.encode('a ')[1] if self.tokenizer.decode(t) == 'an' else t for t in tokens_b] num_orig_tokens = len(tokens_a) idxs_replace = [] j = 0 for i in range(num_orig_tokens): curr_token = tokens_a[i] while tokens_b[j] != curr_token: j += 1 idxs_replace.append(j) j += 1 while j < 77: idxs_replace.append(j) j += 1 while len(idxs_replace) < 77: idxs_replace.append(76) idxs_replaces.append(idxs_replace) (contexts, valuess) = ([], []) for (old_emb, new_emb, idxs_replace) in zip(old_embs, new_embs, idxs_replaces): context = old_emb.detach() values = [] with torch.no_grad(): for layer in self.projection_matrices: values.append(layer(new_emb[idxs_replace]).detach()) contexts.append(context) valuess.append(values) for layer_num in range(len(self.projection_matrices)): mat1 = lamb * self.projection_matrices[layer_num].weight mat2 = lamb * torch.eye(self.projection_matrices[layer_num].weight.shape[1], device=self.projection_matrices[layer_num].weight.device) for (context, values) in zip(contexts, valuess): context_vector = context.reshape(context.shape[0], context.shape[1], 1) context_vector_T = context.reshape(context.shape[0], 1, context.shape[1]) value_vector = values[layer_num].reshape(values[layer_num].shape[0], values[layer_num].shape[1], 1) for_mat1 = (value_vector @ context_vector_T).sum(dim=0) for_mat2 = (context_vector @ context_vector_T).sum(dim=0) mat1 += for_mat1 mat2 += for_mat2 self.projection_matrices[layer_num].weight = torch.nn.Parameter(mat1 @ torch.inverse(mat2)) @torch.no_grad() def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ....image_processor import VaeImageProcessor from ....loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ....models import AutoencoderKL, UNet2DConditionModel from ....models.lora import adjust_lora_scale_text_encoder from ....schedulers import KarrasDiffusionSchedulers from ....utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import DDPMParallelScheduler\n >>> from diffusers import StableDiffusionParadigmsPipeline\n\n >>> scheduler = DDPMParallelScheduler.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="scheduler")\n\n >>> pipe = StableDiffusionParadigmsPipeline.from_pretrained(\n ... "runwayml/stable-diffusion-v1-5", scheduler=scheduler, torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> ngpu, batch_per_device = torch.cuda.device_count(), 5\n >>> pipe.wrapped_unet = torch.nn.DataParallel(pipe.unet, device_ids=[d for d in range(ngpu)])\n\n >>> prompt = "a photo of an astronaut riding a horse on mars"\n >>> image = pipe(prompt, parallel=ngpu * batch_per_device, num_inference_steps=1000).images[0]\n ```\n' class StableDiffusionParadigmsPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor'] _exclude_from_cpu_offload = ['safety_checker'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) self.wrapped_unet = self.unet def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def _cumsum(self, input, dim, debug=False): if debug: return torch.cumsum(input.cpu().float(), dim=dim).to(input.device) else: return torch.cumsum(input, dim=dim) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, parallel: int=10, tolerance: float=0.1, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, debug: bool=False, clip_skip: int=None): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) self.scheduler.set_timesteps(num_inference_steps, device=device) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) extra_step_kwargs.pop('generator', None) scheduler = self.scheduler parallel = min(parallel, len(scheduler.timesteps)) begin_idx = 0 end_idx = parallel latents_time_evolution_buffer = torch.stack([latents] * (len(scheduler.timesteps) + 1)) noise_array = torch.zeros_like(latents_time_evolution_buffer) for j in range(len(scheduler.timesteps)): base_noise = randn_tensor(shape=latents.shape, generator=generator, device=latents.device, dtype=prompt_embeds.dtype) noise = self.scheduler._get_variance(scheduler.timesteps[j]) ** 0.5 * base_noise noise_array[j] = noise.clone() inverse_variance_norm = 1.0 / torch.tensor([scheduler._get_variance(scheduler.timesteps[j]) for j in range(len(scheduler.timesteps))] + [0]).to(noise_array.device) latent_dim = noise_array[0, 0].numel() inverse_variance_norm = inverse_variance_norm[:, None] / latent_dim scaled_tolerance = tolerance ** 2 with self.progress_bar(total=num_inference_steps) as progress_bar: steps = 0 while begin_idx < len(scheduler.timesteps): parallel_len = end_idx - begin_idx block_prompt_embeds = torch.stack([prompt_embeds] * parallel_len) block_latents = latents_time_evolution_buffer[begin_idx:end_idx] block_t = scheduler.timesteps[begin_idx:end_idx, None].repeat(1, batch_size * num_images_per_prompt) t_vec = block_t if do_classifier_free_guidance: t_vec = t_vec.repeat(1, 2) latent_model_input = torch.cat([block_latents] * 2, dim=1) if do_classifier_free_guidance else block_latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t_vec) net = self.wrapped_unet if parallel_len > 3 else self.unet model_output = net(latent_model_input.flatten(0, 1), t_vec.flatten(0, 1), encoder_hidden_states=block_prompt_embeds.flatten(0, 1), cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] per_latent_shape = model_output.shape[1:] if do_classifier_free_guidance: model_output = model_output.reshape(parallel_len, 2, batch_size * num_images_per_prompt, *per_latent_shape) (noise_pred_uncond, noise_pred_text) = (model_output[:, 0], model_output[:, 1]) model_output = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) model_output = model_output.reshape(parallel_len * batch_size * num_images_per_prompt, *per_latent_shape) block_latents_denoise = scheduler.batch_step_no_noise(model_output=model_output, timesteps=block_t.flatten(0, 1), sample=block_latents.flatten(0, 1), **extra_step_kwargs).reshape(block_latents.shape) delta = block_latents_denoise - block_latents cumulative_delta = self._cumsum(delta, dim=0, debug=debug) cumulative_noise = self._cumsum(noise_array[begin_idx:end_idx], dim=0, debug=debug) if scheduler._is_ode_scheduler: cumulative_noise = 0 block_latents_new = latents_time_evolution_buffer[begin_idx][None,] + cumulative_delta + cumulative_noise cur_error = torch.linalg.norm((block_latents_new - latents_time_evolution_buffer[begin_idx + 1:end_idx + 1]).reshape(parallel_len, batch_size * num_images_per_prompt, -1), dim=-1).pow(2) error_ratio = cur_error * inverse_variance_norm[begin_idx + 1:end_idx + 1] error_ratio = torch.nn.functional.pad(error_ratio, (0, 0, 0, 1), value=1000000000.0) any_error_at_time = torch.max(error_ratio > scaled_tolerance, dim=1).values.int() ind = torch.argmax(any_error_at_time).item() new_begin_idx = begin_idx + min(1 + ind, parallel) new_end_idx = min(new_begin_idx + parallel, len(scheduler.timesteps)) latents_time_evolution_buffer[begin_idx + 1:end_idx + 1] = block_latents_new latents_time_evolution_buffer[end_idx:new_end_idx + 1] = latents_time_evolution_buffer[end_idx][None,] steps += 1 progress_bar.update(new_begin_idx - begin_idx) if callback is not None and steps % callback_steps == 0: callback(begin_idx, block_t[begin_idx], latents_time_evolution_buffer[begin_idx]) begin_idx = new_begin_idx end_idx = new_end_idx latents = latents_time_evolution_buffer[-1] if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py import inspect from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import BlipForConditionalGeneration, BlipProcessor, CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ....image_processor import PipelineImageInput, VaeImageProcessor from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ....models import AutoencoderKL, UNet2DConditionModel from ....models.attention_processor import Attention from ....models.lora import adjust_lora_scale_text_encoder from ....schedulers import DDIMScheduler, DDPMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler from ....schedulers.scheduling_ddim_inverse import DDIMInverseScheduler from ....utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, BaseOutput, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) @dataclass class Pix2PixInversionPipelineOutput(BaseOutput, TextualInversionLoaderMixin): latents: torch.Tensor images: Union[List[PIL.Image.Image], np.ndarray] EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import requests\n >>> import torch\n\n >>> from diffusers import DDIMScheduler, StableDiffusionPix2PixZeroPipeline\n\n\n >>> def download(embedding_url, local_filepath):\n ... r = requests.get(embedding_url)\n ... with open(local_filepath, "wb") as f:\n ... f.write(r.content)\n\n\n >>> model_ckpt = "CompVis/stable-diffusion-v1-4"\n >>> pipeline = StableDiffusionPix2PixZeroPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)\n >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)\n >>> pipeline.to("cuda")\n\n >>> prompt = "a high resolution painting of a cat in the style of van gough"\n >>> source_emb_url = "https://hf.co/datasets/sayakpaul/sample-datasets/resolve/main/cat.pt"\n >>> target_emb_url = "https://hf.co/datasets/sayakpaul/sample-datasets/resolve/main/dog.pt"\n\n >>> for url in [source_emb_url, target_emb_url]:\n ... download(url, url.split("/")[-1])\n\n >>> src_embeds = torch.load(source_emb_url.split("/")[-1])\n >>> target_embeds = torch.load(target_emb_url.split("/")[-1])\n >>> images = pipeline(\n ... prompt,\n ... source_embeds=src_embeds,\n ... target_embeds=target_embeds,\n ... num_inference_steps=50,\n ... cross_attention_guidance_amount=0.15,\n ... ).images\n\n >>> images[0].save("edited_image_dog.png")\n ```\n' EXAMPLE_INVERT_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from transformers import BlipForConditionalGeneration, BlipProcessor\n >>> from diffusers import DDIMScheduler, DDIMInverseScheduler, StableDiffusionPix2PixZeroPipeline\n\n >>> import requests\n >>> from PIL import Image\n\n >>> captioner_id = "Salesforce/blip-image-captioning-base"\n >>> processor = BlipProcessor.from_pretrained(captioner_id)\n >>> model = BlipForConditionalGeneration.from_pretrained(\n ... captioner_id, torch_dtype=torch.float16, low_cpu_mem_usage=True\n ... )\n\n >>> sd_model_ckpt = "CompVis/stable-diffusion-v1-4"\n >>> pipeline = StableDiffusionPix2PixZeroPipeline.from_pretrained(\n ... sd_model_ckpt,\n ... caption_generator=model,\n ... caption_processor=processor,\n ... torch_dtype=torch.float16,\n ... safety_checker=None,\n ... )\n\n >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)\n >>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)\n >>> pipeline.enable_model_cpu_offload()\n\n >>> img_url = "https://github.com/pix2pixzero/pix2pix-zero/raw/main/assets/test_images/cats/cat_6.png"\n\n >>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB").resize((512, 512))\n >>> # generate caption\n >>> caption = pipeline.generate_caption(raw_image)\n\n >>> # "a photography of a cat with flowers and dai dai daie - daie - daie kasaii"\n >>> inv_latents = pipeline.invert(caption, image=raw_image).latents\n >>> # we need to generate source and target embeds\n\n >>> source_prompts = ["a cat sitting on the street", "a cat playing in the field", "a face of a cat"]\n\n >>> target_prompts = ["a dog sitting on the street", "a dog playing in the field", "a face of a dog"]\n\n >>> source_embeds = pipeline.get_embeds(source_prompts)\n >>> target_embeds = pipeline.get_embeds(target_prompts)\n >>> # the latents can then be used to edit a real image\n >>> # when using Stable Diffusion 2 or other models that use v-prediction\n >>> # set `cross_attention_guidance_amount` to 0.01 or less to avoid input latent gradient explosion\n\n >>> image = pipeline(\n ... caption,\n ... source_embeds=source_embeds,\n ... target_embeds=target_embeds,\n ... num_inference_steps=50,\n ... cross_attention_guidance_amount=0.15,\n ... generator=generator,\n ... latents=inv_latents,\n ... negative_prompt=caption,\n ... ).images[0]\n >>> image.save("edited_image.png")\n ```\n' def preprocess(image): deprecation_message = 'The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead' deprecate('preprocess', '1.0.0', deprecation_message, standard_warn=False) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): (w, h) = image[0].size (w, h) = (x - x % 8 for x in (w, h)) image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos']))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image def prepare_unet(unet: UNet2DConditionModel): pix2pix_zero_attn_procs = {} for name in unet.attn_processors.keys(): module_name = name.replace('.processor', '') module = unet.get_submodule(module_name) if 'attn2' in name: pix2pix_zero_attn_procs[name] = Pix2PixZeroAttnProcessor(is_pix2pix_zero=True) module.requires_grad_(True) else: pix2pix_zero_attn_procs[name] = Pix2PixZeroAttnProcessor(is_pix2pix_zero=False) module.requires_grad_(False) unet.set_attn_processor(pix2pix_zero_attn_procs) return unet class Pix2PixZeroL2Loss: def __init__(self): self.loss = 0.0 def compute_loss(self, predictions, targets): self.loss += ((predictions - targets) ** 2).sum((1, 2)).mean(0) class Pix2PixZeroAttnProcessor: def __init__(self, is_pix2pix_zero=False): self.is_pix2pix_zero = is_pix2pix_zero if self.is_pix2pix_zero: self.reference_cross_attn_map = {} def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, timestep=None, loss=None): (batch_size, sequence_length, _) = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) if self.is_pix2pix_zero and timestep is not None: if loss is None: self.reference_cross_attn_map[timestep.item()] = attention_probs.detach().cpu() elif loss is not None: prev_attn_probs = self.reference_cross_attn_map.pop(timestep.item()) loss.compute_loss(attention_probs, prev_attn_probs.to(attention_probs.device)) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline, StableDiffusionMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'caption_generator', 'caption_processor', 'inverse_scheduler'] _exclude_from_cpu_offload = ['safety_checker'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDPMScheduler, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler], feature_extractor: CLIPImageProcessor, safety_checker: StableDiffusionSafetyChecker, inverse_scheduler: DDIMInverseScheduler, caption_generator: BlipForConditionalGeneration, caption_processor: BlipProcessor, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, caption_processor=caption_processor, caption_generator=caption_generator, inverse_scheduler=inverse_scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, source_embeds, target_embeds, callback_steps, prompt_embeds=None): if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if source_embeds is None and target_embeds is None: raise ValueError('`source_embeds` and `target_embeds` cannot be undefined.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def generate_caption(self, images): text = 'a photography of' prev_device = self.caption_generator.device device = self._execution_device inputs = self.caption_processor(images, text, return_tensors='pt').to(device=device, dtype=self.caption_generator.dtype) self.caption_generator.to(device) outputs = self.caption_generator.generate(**inputs, max_new_tokens=128) self.caption_generator.to(prev_device) caption = self.caption_processor.batch_decode(outputs, skip_special_tokens=True)[0] return caption def construct_direction(self, embs_source: torch.Tensor, embs_target: torch.Tensor): return (embs_target.mean(0) - embs_source.mean(0)).unsqueeze(0) @torch.no_grad() def get_embeds(self, prompt: List[str], batch_size: int=16) -> torch.Tensor: num_prompts = len(prompt) embeds = [] for i in range(0, num_prompts, batch_size): prompt_slice = prompt[i:i + batch_size] input_ids = self.tokenizer(prompt_slice, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt').input_ids input_ids = input_ids.to(self.text_encoder.device) embeds.append(self.text_encoder(input_ids)[0]) return torch.cat(embeds, dim=0).mean(0)[None] def prepare_image_latents(self, image, batch_size, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) if image.shape[1] == 4: latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if isinstance(generator, list): latents = [self.vae.encode(image[i:i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)] latents = torch.cat(latents, dim=0) else: latents = self.vae.encode(image).latent_dist.sample(generator) latents = self.vae.config.scaling_factor * latents if batch_size != latents.shape[0]: if batch_size % latents.shape[0] == 0: deprecation_message = f'You have passed {batch_size} text prompts (`prompt`), but only {latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_latents_per_image = batch_size // latents.shape[0] latents = torch.cat([latents] * additional_latents_per_image, dim=0) else: raise ValueError(f'Cannot duplicate `image` of batch size {latents.shape[0]} to {batch_size} text prompts.') else: latents = torch.cat([latents], dim=0) return latents def get_epsilon(self, model_output: torch.Tensor, sample: torch.Tensor, timestep: int): pred_type = self.inverse_scheduler.config.prediction_type alpha_prod_t = self.inverse_scheduler.alphas_cumprod[timestep] beta_prod_t = 1 - alpha_prod_t if pred_type == 'epsilon': return model_output elif pred_type == 'sample': return (sample - alpha_prod_t ** 0.5 * model_output) / beta_prod_t ** 0.5 elif pred_type == 'v_prediction': return alpha_prod_t ** 0.5 * model_output + beta_prod_t ** 0.5 * sample else: raise ValueError(f'prediction_type given as {pred_type} must be one of `epsilon`, `sample`, or `v_prediction`') def auto_corr_loss(self, hidden_states, generator=None): reg_loss = 0.0 for i in range(hidden_states.shape[0]): for j in range(hidden_states.shape[1]): noise = hidden_states[i:i + 1, j:j + 1, :, :] while True: roll_amount = torch.randint(noise.shape[2] // 2, (1,), generator=generator).item() reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=2)).mean() ** 2 reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=3)).mean() ** 2 if noise.shape[2] <= 8: break noise = F.avg_pool2d(noise, kernel_size=2) return reg_loss def kl_divergence(self, hidden_states): mean = hidden_states.mean() var = hidden_states.var() return var + mean ** 2 - 1 - torch.log(var + 1e-07) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[str, List[str]]]=None, source_embeds: torch.Tensor=None, target_embeds: torch.Tensor=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, cross_attention_guidance_amount: float=0.1, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: Optional[int]=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, source_embeds, target_embeds, callback_steps, prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if cross_attention_kwargs is None: cross_attention_kwargs = {} device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) latents_init = latents.clone() extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) self.unet = prepare_unet(self.unet) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs={'timestep': t}).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) edit_direction = self.construct_direction(source_embeds, target_embeds).to(prompt_embeds.device) prompt_embeds_edit = prompt_embeds.clone() prompt_embeds_edit[1:2] += edit_direction self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps latents = latents_init num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) x_in = latent_model_input.detach().clone() x_in.requires_grad = True opt = torch.optim.SGD([x_in], lr=cross_attention_guidance_amount) with torch.enable_grad(): loss = Pix2PixZeroL2Loss() noise_pred = self.unet(x_in, t, encoder_hidden_states=prompt_embeds_edit.detach(), cross_attention_kwargs={'timestep': t, 'loss': loss}).sample loss.loss.backward(retain_graph=False) opt.step() noise_pred = self.unet(x_in.detach(), t, encoder_hidden_states=prompt_embeds_edit, cross_attention_kwargs={'timestep': None}).sample latents = x_in.detach().chunk(2)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) @torch.no_grad() @replace_example_docstring(EXAMPLE_INVERT_DOC_STRING) def invert(self, prompt: Optional[str]=None, image: PipelineImageInput=None, num_inference_steps: int=50, guidance_scale: float=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, cross_attention_guidance_amount: float=0.1, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: Optional[int]=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, lambda_auto_corr: float=20.0, lambda_kl: float=20.0, num_reg_steps: int=5, num_auto_corr_rolls: int=5): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if cross_attention_kwargs is None: cross_attention_kwargs = {} device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 image = self.image_processor.preprocess(image) latents = self.prepare_image_latents(image, batch_size, self.vae.dtype, device, generator) num_images_per_prompt = 1 (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, prompt_embeds=prompt_embeds) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) self.inverse_scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.inverse_scheduler.timesteps self.unet = prepare_unet(self.unet) num_warmup_steps = len(timesteps) - num_inference_steps * self.inverse_scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.inverse_scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs={'timestep': t}).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) with torch.enable_grad(): for _ in range(num_reg_steps): if lambda_auto_corr > 0: for _ in range(num_auto_corr_rolls): var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True) var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t) l_ac = self.auto_corr_loss(var_epsilon, generator=generator) l_ac.backward() grad = var.grad.detach() / num_auto_corr_rolls noise_pred = noise_pred - lambda_auto_corr * grad if lambda_kl > 0: var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True) var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t) l_kld = self.kl_divergence(var_epsilon) l_kld.backward() grad = var.grad.detach() noise_pred = noise_pred - lambda_kl * grad noise_pred = noise_pred.detach() latents = self.inverse_scheduler.step(noise_pred, t, latents).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.inverse_scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) inverted_latents = latents.detach().clone() image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (inverted_latents, image) return Pix2PixInversionPipelineOutput(latents=inverted_latents, images=image) # File: diffusers-main/src/diffusers/pipelines/deprecated/stochastic_karras_ve/__init__.py from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {'pipeline_stochastic_karras_ve': ['KarrasVePipeline']} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_stochastic_karras_ve import KarrasVePipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: diffusers-main/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py from typing import List, Optional, Tuple, Union import torch from ....models import UNet2DModel from ....schedulers import KarrasVeScheduler from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput class KarrasVePipeline(DiffusionPipeline): unet: UNet2DModel scheduler: KarrasVeScheduler def __init__(self, unet: UNet2DModel, scheduler: KarrasVeScheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__(self, batch_size: int=1, num_inference_steps: int=50, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, output_type: Optional[str]='pil', return_dict: bool=True, **kwargs) -> Union[Tuple, ImagePipelineOutput]: img_size = self.unet.config.sample_size shape = (batch_size, 3, img_size, img_size) model = self.unet sample = randn_tensor(shape, generator=generator, device=self.device) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): sigma = self.scheduler.schedule[t] sigma_prev = self.scheduler.schedule[t - 1] if t > 0 else 0 (sample_hat, sigma_hat) = self.scheduler.add_noise_to_input(sample, sigma, generator=generator) model_output = sigma_hat / 2 * model((sample_hat + 1) / 2, sigma_hat / 2).sample step_output = self.scheduler.step(model_output, sigma_hat, sigma_prev, sample_hat) if sigma_prev != 0: model_output = sigma_prev / 2 * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample step_output = self.scheduler.step_correct(model_output, sigma_hat, sigma_prev, sample_hat, step_output.prev_sample, step_output['derivative']) sample = step_output.prev_sample sample = (sample / 2 + 0.5).clamp(0, 1) image = sample.cpu().permute(0, 2, 3, 1).numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, is_transformers_version _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline _dummy_objects.update({'VersatileDiffusionDualGuidedPipeline': VersatileDiffusionDualGuidedPipeline, 'VersatileDiffusionImageVariationPipeline': VersatileDiffusionImageVariationPipeline, 'VersatileDiffusionPipeline': VersatileDiffusionPipeline, 'VersatileDiffusionTextToImagePipeline': VersatileDiffusionTextToImagePipeline}) else: _import_structure['modeling_text_unet'] = ['UNetFlatConditionModel'] _import_structure['pipeline_versatile_diffusion'] = ['VersatileDiffusionPipeline'] _import_structure['pipeline_versatile_diffusion_dual_guided'] = ['VersatileDiffusionDualGuidedPipeline'] _import_structure['pipeline_versatile_diffusion_image_variation'] = ['VersatileDiffusionImageVariationPipeline'] _import_structure['pipeline_versatile_diffusion_text_to_image'] = ['VersatileDiffusionTextToImagePipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline else: from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from diffusers.utils import deprecate from ....configuration_utils import ConfigMixin, register_to_config from ....models import ModelMixin from ....models.activations import get_activation from ....models.attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, AttentionProcessor, AttnAddedKVProcessor, AttnAddedKVProcessor2_0, AttnProcessor from ....models.embeddings import GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps from ....models.resnet import ResnetBlockCondNorm2D from ....models.transformers.dual_transformer_2d import DualTransformer2DModel from ....models.transformers.transformer_2d import Transformer2DModel from ....models.unets.unet_2d_condition import UNet2DConditionOutput from ....utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers from ....utils.torch_utils import apply_freeu logger = logging.get_logger(__name__) def get_down_block(down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample, resnet_eps, resnet_act_fn, num_attention_heads, transformer_layers_per_block, attention_type, attention_head_dim, resnet_groups=None, cross_attention_dim=None, downsample_padding=None, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift='default', resnet_skip_time_act=False, resnet_out_scale_factor=1.0, cross_attention_norm=None, dropout=0.0): down_block_type = down_block_type[7:] if down_block_type.startswith('UNetRes') else down_block_type if down_block_type == 'DownBlockFlat': return DownBlockFlat(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift) elif down_block_type == 'CrossAttnDownBlockFlat': if cross_attention_dim is None: raise ValueError('cross_attention_dim must be specified for CrossAttnDownBlockFlat') return CrossAttnDownBlockFlat(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, resnet_time_scale_shift=resnet_time_scale_shift) raise ValueError(f'{down_block_type} is not supported.') def get_up_block(up_block_type, num_layers, in_channels, out_channels, prev_output_channel, temb_channels, add_upsample, resnet_eps, resnet_act_fn, num_attention_heads, transformer_layers_per_block, resolution_idx, attention_type, attention_head_dim, resnet_groups=None, cross_attention_dim=None, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift='default', resnet_skip_time_act=False, resnet_out_scale_factor=1.0, cross_attention_norm=None, dropout=0.0): up_block_type = up_block_type[7:] if up_block_type.startswith('UNetRes') else up_block_type if up_block_type == 'UpBlockFlat': return UpBlockFlat(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift) elif up_block_type == 'CrossAttnUpBlockFlat': if cross_attention_dim is None: raise ValueError('cross_attention_dim must be specified for CrossAttnUpBlockFlat') return CrossAttnUpBlockFlat(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, resnet_time_scale_shift=resnet_time_scale_shift) raise ValueError(f'{up_block_type} is not supported.') class FourierEmbedder(nn.Module): def __init__(self, num_freqs=64, temperature=100): super().__init__() self.num_freqs = num_freqs self.temperature = temperature freq_bands = temperature ** (torch.arange(num_freqs) / num_freqs) freq_bands = freq_bands[None, None, None] self.register_buffer('freq_bands', freq_bands, persistent=False) def __call__(self, x): x = self.freq_bands * x.unsqueeze(-1) return torch.stack((x.sin(), x.cos()), dim=-1).permute(0, 1, 3, 4, 2).reshape(*x.shape[:2], -1) class GLIGENTextBoundingboxProjection(nn.Module): def __init__(self, positive_len, out_dim, feature_type, fourier_freqs=8): super().__init__() self.positive_len = positive_len self.out_dim = out_dim self.fourier_embedder = FourierEmbedder(num_freqs=fourier_freqs) self.position_dim = fourier_freqs * 2 * 4 if isinstance(out_dim, tuple): out_dim = out_dim[0] if feature_type == 'text-only': self.linears = nn.Sequential(nn.Linear(self.positive_len + self.position_dim, 512), nn.SiLU(), nn.Linear(512, 512), nn.SiLU(), nn.Linear(512, out_dim)) self.null_positive_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) elif feature_type == 'text-image': self.linears_text = nn.Sequential(nn.Linear(self.positive_len + self.position_dim, 512), nn.SiLU(), nn.Linear(512, 512), nn.SiLU(), nn.Linear(512, out_dim)) self.linears_image = nn.Sequential(nn.Linear(self.positive_len + self.position_dim, 512), nn.SiLU(), nn.Linear(512, 512), nn.SiLU(), nn.Linear(512, out_dim)) self.null_text_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) self.null_image_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) self.null_position_feature = torch.nn.Parameter(torch.zeros([self.position_dim])) def forward(self, boxes, masks, positive_embeddings=None, phrases_masks=None, image_masks=None, phrases_embeddings=None, image_embeddings=None): masks = masks.unsqueeze(-1) xyxy_embedding = self.fourier_embedder(boxes) xyxy_null = self.null_position_feature.view(1, 1, -1) xyxy_embedding = xyxy_embedding * masks + (1 - masks) * xyxy_null if positive_embeddings: positive_null = self.null_positive_feature.view(1, 1, -1) positive_embeddings = positive_embeddings * masks + (1 - masks) * positive_null objs = self.linears(torch.cat([positive_embeddings, xyxy_embedding], dim=-1)) else: phrases_masks = phrases_masks.unsqueeze(-1) image_masks = image_masks.unsqueeze(-1) text_null = self.null_text_feature.view(1, 1, -1) image_null = self.null_image_feature.view(1, 1, -1) phrases_embeddings = phrases_embeddings * phrases_masks + (1 - phrases_masks) * text_null image_embeddings = image_embeddings * image_masks + (1 - image_masks) * image_null objs_text = self.linears_text(torch.cat([phrases_embeddings, xyxy_embedding], dim=-1)) objs_image = self.linears_image(torch.cat([image_embeddings, xyxy_embedding], dim=-1)) objs = torch.cat([objs_text, objs_image], dim=1) return objs class UNetFlatConditionModel(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True _no_split_modules = ['BasicTransformerBlock', 'ResnetBlockFlat', 'CrossAttnUpBlockFlat'] @register_to_config def __init__(self, sample_size: Optional[int]=None, in_channels: int=4, out_channels: int=4, center_input_sample: bool=False, flip_sin_to_cos: bool=True, freq_shift: int=0, down_block_types: Tuple[str]=('CrossAttnDownBlockFlat', 'CrossAttnDownBlockFlat', 'CrossAttnDownBlockFlat', 'DownBlockFlat'), mid_block_type: Optional[str]='UNetMidBlockFlatCrossAttn', up_block_types: Tuple[str]=('UpBlockFlat', 'CrossAttnUpBlockFlat', 'CrossAttnUpBlockFlat', 'CrossAttnUpBlockFlat'), only_cross_attention: Union[bool, Tuple[bool]]=False, block_out_channels: Tuple[int]=(320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]]=2, downsample_padding: int=1, mid_block_scale_factor: float=1, dropout: float=0.0, act_fn: str='silu', norm_num_groups: Optional[int]=32, norm_eps: float=1e-05, cross_attention_dim: Union[int, Tuple[int]]=1280, transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]]=1, reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]]=None, encoder_hid_dim: Optional[int]=None, encoder_hid_dim_type: Optional[str]=None, attention_head_dim: Union[int, Tuple[int]]=8, num_attention_heads: Optional[Union[int, Tuple[int]]]=None, dual_cross_attention: bool=False, use_linear_projection: bool=False, class_embed_type: Optional[str]=None, addition_embed_type: Optional[str]=None, addition_time_embed_dim: Optional[int]=None, num_class_embeds: Optional[int]=None, upcast_attention: bool=False, resnet_time_scale_shift: str='default', resnet_skip_time_act: bool=False, resnet_out_scale_factor: int=1.0, time_embedding_type: str='positional', time_embedding_dim: Optional[int]=None, time_embedding_act_fn: Optional[str]=None, timestep_post_act: Optional[str]=None, time_cond_proj_dim: Optional[int]=None, conv_in_kernel: int=3, conv_out_kernel: int=3, projection_class_embeddings_input_dim: Optional[int]=None, attention_type: str='default', class_embeddings_concat: bool=False, mid_block_only_cross_attention: Optional[bool]=None, cross_attention_norm: Optional[str]=None, addition_embed_type_num_heads=64): super().__init__() self.sample_size = sample_size if num_attention_heads is not None: raise ValueError('At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.') num_attention_heads = num_attention_heads or attention_head_dim if len(down_block_types) != len(up_block_types): raise ValueError(f'Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.') if len(block_out_channels) != len(down_block_types): raise ValueError(f'Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.') if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError(f'Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.') if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError(f'Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}.') if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError(f'Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.') if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError(f'Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}.') if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError(f'Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}.') if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None: for layer_number_per_block in transformer_layers_per_block: if isinstance(layer_number_per_block, list): raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.") conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = LinearMultiDim(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding) if time_embedding_type == 'fourier': time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f'`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.') self.time_proj = GaussianFourierProjection(time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos) timestep_input_dim = time_embed_dim elif time_embedding_type == 'positional': time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError(f'{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`.') self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = 'text_proj' self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError(f'`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}.') if encoder_hid_dim_type == 'text_proj': self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == 'text_image_proj': self.encoder_hid_proj = TextImageProjection(text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim) elif encoder_hid_dim_type == 'image_proj': self.encoder_hid_proj = ImageProjection(image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim) elif encoder_hid_dim_type is not None: raise ValueError(f"`encoder_hid_dim_type`: {encoder_hid_dim_type} must be None, 'text_proj', 'text_image_proj' or 'image_proj'.") else: self.encoder_hid_proj = None if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == 'timestep': self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == 'identity': self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == 'projection': if projection_class_embeddings_input_dim is None: raise ValueError("`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set") self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == 'simple_projection': if projection_class_embeddings_input_dim is None: raise ValueError("`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set") self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == 'text': if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding(text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads) elif addition_embed_type == 'text_image': self.add_embedding = TextImageTimeEmbedding(text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == 'text_time': self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == 'image': self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == 'image_hint': self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim output_channel = block_out_channels[0] for (i, down_block_type) in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block(down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, dropout=dropout) self.down_blocks.append(down_block) if mid_block_type == 'UNetMidBlockFlatCrossAttn': self.mid_block = UNetMidBlockFlatCrossAttn(transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, dropout=dropout, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type) elif mid_block_type == 'UNetMidBlockFlatSimpleCrossAttn': self.mid_block = UNetMidBlockFlatSimpleCrossAttn(in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, dropout=dropout, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], attention_head_dim=attention_head_dim[-1], resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm) elif mid_block_type == 'UNetMidBlockFlat': self.mid_block = UNetMidBlockFlat(in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, dropout=dropout, num_layers=0, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, add_attention=False) elif mid_block_type is None: self.mid_block = None else: raise ValueError(f'unknown mid_block_type : {mid_block_type}') self.num_upsamplers = 0 reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) if reverse_transformer_layers_per_block is None else reverse_transformer_layers_per_block only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for (i, up_block_type) in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block(up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resolution_idx=i, resnet_groups=norm_num_groups, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, dropout=dropout) self.up_blocks.append(up_block) prev_output_channel = output_channel if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps) self.conv_act = get_activation(act_fn) else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = LinearMultiDim(block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding) if attention_type in ['gated', 'gated-text-image']: positive_len = 768 if isinstance(cross_attention_dim, int): positive_len = cross_attention_dim elif isinstance(cross_attention_dim, (list, tuple)): positive_len = cross_attention_dim[0] feature_type = 'text-only' if attention_type == 'gated' else 'text-image' self.position_net = GLIGENTextBoundingboxProjection(positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) def set_attention_slice(self, slice_size): sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, 'set_attention_slice'): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == 'auto': slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == 'max': slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError(f'You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.') for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f'size {size} has to be smaller or equal to {dim}.') def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, 'set_attention_slice'): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, 'gradient_checkpointing'): module.gradient_checkpointing = value def enable_freeu(self, s1, s2, b1, b2): for (i, upsample_block) in enumerate(self.up_blocks): setattr(upsample_block, 's1', s1) setattr(upsample_block, 's2', s2) setattr(upsample_block, 'b1', b1) setattr(upsample_block, 'b2', b2) def disable_freeu(self): freeu_keys = {'s1', 's2', 'b1', 'b2'} for (i, upsample_block) in enumerate(self.up_blocks): for k in freeu_keys: if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: setattr(upsample_block, k, None) def fuse_qkv_projections(self): self.original_attn_processors = None for (_, attn_processor) in self.attn_processors.items(): if 'Added' in str(attn_processor.__class__.__name__): raise ValueError('`fuse_qkv_projections()` is not supported for models having added KV projections.') self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) def unfuse_qkv_projections(self): if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def unload_lora(self): deprecate('unload_lora', '0.28.0', 'Calling `unload_lora()` is deprecated and will be removed in a future version. Please install `peft` and then call `disable_adapters().') for module in self.modules(): if hasattr(module, 'set_lora_layer'): module.set_lora_layer(None) def forward(self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, class_labels: Optional[torch.Tensor]=None, timestep_cond: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]]=None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]]=None, mid_block_additional_residual: Optional[torch.Tensor]=None, down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]]=None, encoder_attention_mask: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[UNet2DConditionOutput, Tuple]: default_overall_up_factor = 2 ** self.num_upsamplers forward_upsample_size = False upsample_size = None for dim in sample.shape[-2:]: if dim % default_overall_up_factor != 0: forward_upsample_size = True break if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) if encoder_attention_mask is not None: encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) if self.config.center_input_sample: sample = 2 * sample - 1.0 timesteps = timestep if not torch.is_tensor(timesteps): is_mps = sample.device.type == 'mps' if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError('class_labels should be provided when num_class_embeds > 0') if self.config.class_embed_type == 'timestep': class_labels = self.time_proj(class_labels) class_labels = class_labels.to(dtype=sample.dtype) class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) if self.config.class_embeddings_concat: emb = torch.cat([emb, class_emb], dim=-1) else: emb = emb + class_emb if self.config.addition_embed_type == 'text': aug_emb = self.add_embedding(encoder_hidden_states) elif self.config.addition_embed_type == 'text_image': if 'image_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`") image_embs = added_cond_kwargs.get('image_embeds') text_embs = added_cond_kwargs.get('text_embeds', encoder_hidden_states) aug_emb = self.add_embedding(text_embs, image_embs) elif self.config.addition_embed_type == 'text_time': if 'text_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`") text_embeds = added_cond_kwargs.get('text_embeds') if 'time_ids' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`") time_ids = added_cond_kwargs.get('time_ids') time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) elif self.config.addition_embed_type == 'image': if 'image_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`") image_embs = added_cond_kwargs.get('image_embeds') aug_emb = self.add_embedding(image_embs) elif self.config.addition_embed_type == 'image_hint': if 'image_embeds' not in added_cond_kwargs or 'hint' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`") image_embs = added_cond_kwargs.get('image_embeds') hint = added_cond_kwargs.get('hint') (aug_emb, hint) = self.add_embedding(image_embs, hint) sample = torch.cat([sample, hint], dim=1) emb = emb + aug_emb if aug_emb is not None else emb if self.time_embed_act is not None: emb = self.time_embed_act(emb) if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == 'text_proj': encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == 'text_image_proj': if 'image_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`") image_embeds = added_cond_kwargs.get('image_embeds') encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == 'image_proj': if 'image_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`") image_embeds = added_cond_kwargs.get('image_embeds') encoder_hidden_states = self.encoder_hid_proj(image_embeds) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == 'ip_image_proj': if 'image_embeds' not in added_cond_kwargs: raise ValueError(f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`") image_embeds = added_cond_kwargs.get('image_embeds') image_embeds = self.encoder_hid_proj(image_embeds) encoder_hidden_states = (encoder_hidden_states, image_embeds) sample = self.conv_in(sample) if cross_attention_kwargs is not None and cross_attention_kwargs.get('gligen', None) is not None: cross_attention_kwargs = cross_attention_kwargs.copy() gligen_args = cross_attention_kwargs.pop('gligen') cross_attention_kwargs['gligen'] = {'objs': self.position_net(**gligen_args)} lora_scale = cross_attention_kwargs.get('scale', 1.0) if cross_attention_kwargs is not None else 1.0 if USE_PEFT_BACKEND: scale_lora_layers(self, lora_scale) is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None is_adapter = down_intrablock_additional_residuals is not None if not is_adapter and mid_block_additional_residual is None and (down_block_additional_residuals is not None): deprecate('T2I should not use down_block_additional_residuals', '1.3.0', 'Passing intrablock residual connections with `down_block_additional_residuals` is deprecated and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ', standard_warn=False) down_intrablock_additional_residuals = down_block_additional_residuals is_adapter = True down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, 'has_cross_attention') and downsample_block.has_cross_attention: additional_residuals = {} if is_adapter and len(down_intrablock_additional_residuals) > 0: additional_residuals['additional_residuals'] = down_intrablock_additional_residuals.pop(0) (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, **additional_residuals) else: (sample, res_samples) = downsample_block(hidden_states=sample, temb=emb) if is_adapter and len(down_intrablock_additional_residuals) > 0: sample += down_intrablock_additional_residuals.pop(0) down_block_res_samples += res_samples if is_controlnet: new_down_block_res_samples = () for (down_block_res_sample, down_block_additional_residual) in zip(down_block_res_samples, down_block_additional_residuals): down_block_res_sample = down_block_res_sample + down_block_additional_residual new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = new_down_block_res_samples if self.mid_block is not None: if hasattr(self.mid_block, 'has_cross_attention') and self.mid_block.has_cross_attention: sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask) else: sample = self.mid_block(sample, emb) if is_adapter and len(down_intrablock_additional_residuals) > 0 and (sample.shape == down_intrablock_additional_residuals[0].shape): sample += down_intrablock_additional_residuals.pop(0) if is_controlnet: sample = sample + mid_block_additional_residual for (i, upsample_block) in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets):] down_block_res_samples = down_block_res_samples[:-len(upsample_block.resnets)] if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, 'has_cross_attention') and upsample_block.has_cross_attention: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, upsample_size=upsample_size, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask) else: sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, scale=lora_scale) if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) if USE_PEFT_BACKEND: unscale_lora_layers(self, lora_scale) if not return_dict: return (sample,) return UNet2DConditionOutput(sample=sample) class LinearMultiDim(nn.Linear): def __init__(self, in_features, out_features=None, second_dim=4, *args, **kwargs): in_features = [in_features, second_dim, 1] if isinstance(in_features, int) else list(in_features) if out_features is None: out_features = in_features out_features = [out_features, second_dim, 1] if isinstance(out_features, int) else list(out_features) self.in_features_multidim = in_features self.out_features_multidim = out_features super().__init__(np.array(in_features).prod(), np.array(out_features).prod()) def forward(self, input_tensor, *args, **kwargs): shape = input_tensor.shape n_dim = len(self.in_features_multidim) input_tensor = input_tensor.reshape(*shape[0:-n_dim], self.in_features) output_tensor = super().forward(input_tensor) output_tensor = output_tensor.view(*shape[0:-n_dim], *self.out_features_multidim) return output_tensor class ResnetBlockFlat(nn.Module): def __init__(self, *, in_channels, out_channels=None, dropout=0.0, temb_channels=512, groups=32, groups_out=None, pre_norm=True, eps=1e-06, time_embedding_norm='default', use_in_shortcut=None, second_dim=4, **kwargs): super().__init__() self.pre_norm = pre_norm self.pre_norm = True in_channels = [in_channels, second_dim, 1] if isinstance(in_channels, int) else list(in_channels) self.in_channels_prod = np.array(in_channels).prod() self.channels_multidim = in_channels if out_channels is not None: out_channels = [out_channels, second_dim, 1] if isinstance(out_channels, int) else list(out_channels) out_channels_prod = np.array(out_channels).prod() self.out_channels_multidim = out_channels else: out_channels_prod = self.in_channels_prod self.out_channels_multidim = self.channels_multidim self.time_embedding_norm = time_embedding_norm if groups_out is None: groups_out = groups self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=self.in_channels_prod, eps=eps, affine=True) self.conv1 = torch.nn.Conv2d(self.in_channels_prod, out_channels_prod, kernel_size=1, padding=0) if temb_channels is not None: self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels_prod) else: self.time_emb_proj = None self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels_prod, eps=eps, affine=True) self.dropout = torch.nn.Dropout(dropout) self.conv2 = torch.nn.Conv2d(out_channels_prod, out_channels_prod, kernel_size=1, padding=0) self.nonlinearity = nn.SiLU() self.use_in_shortcut = self.in_channels_prod != out_channels_prod if use_in_shortcut is None else use_in_shortcut self.conv_shortcut = None if self.use_in_shortcut: self.conv_shortcut = torch.nn.Conv2d(self.in_channels_prod, out_channels_prod, kernel_size=1, stride=1, padding=0) def forward(self, input_tensor, temb): shape = input_tensor.shape n_dim = len(self.channels_multidim) input_tensor = input_tensor.reshape(*shape[0:-n_dim], self.in_channels_prod, 1, 1) input_tensor = input_tensor.view(-1, self.in_channels_prod, 1, 1) hidden_states = input_tensor hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.conv1(hidden_states) if temb is not None: temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None] hidden_states = hidden_states + temb hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: input_tensor = self.conv_shortcut(input_tensor) output_tensor = input_tensor + hidden_states output_tensor = output_tensor.view(*shape[0:-n_dim], -1) output_tensor = output_tensor.view(*shape[0:-n_dim], *self.out_channels_multidim) return output_tensor class DownBlockFlat(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor: float=1.0, add_downsample: bool=True, downsample_padding: int=1): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(ResnetBlockFlat(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: output_states = () for resnet in self.resnets: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False) else: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return (hidden_states, output_states) class CrossAttnDownBlockFlat(nn.Module): def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: Union[int, Tuple[int]]=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads: int=1, cross_attention_dim: int=1280, output_scale_factor: float=1.0, downsample_padding: int=1, add_downsample: bool=True, dual_cross_attention: bool=False, use_linear_projection: bool=False, only_cross_attention: bool=False, upcast_attention: bool=False, attention_type: str='default'): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append(ResnetBlockFlat(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) if not dual_cross_attention: attentions.append(Transformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, attention_type=attention_type)) else: attentions.append(DualTransformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None, additional_residuals: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: output_states = () blocks = list(zip(self.resnets, self.attentions)) for (i, (resnet, attn)) in enumerate(blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] if i == len(blocks) - 1 and additional_residuals is not None: hidden_states = hidden_states + additional_residuals output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return (hidden_states, output_states) class UpBlockFlat(nn.Module): def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int]=None, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor: float=1.0, add_upsample: bool=True): super().__init__() resnets = [] for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(ResnetBlockFlat(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, upsample_size: Optional[int]=None, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get('scale', None) is not None: deprecation_message = 'The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`.' deprecate('scale', '1.0.0', deprecation_message) is_freeu_enabled = getattr(self, 's1', None) and getattr(self, 's2', None) and getattr(self, 'b1', None) and getattr(self, 'b2', None) for resnet in self.resnets: res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] if is_freeu_enabled: (hidden_states, res_hidden_states) = apply_freeu(self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False) else: hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states class CrossAttnUpBlockFlat(nn.Module): def __init__(self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, resolution_idx: Optional[int]=None, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: Union[int, Tuple[int]]=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, num_attention_heads: int=1, cross_attention_dim: int=1280, output_scale_factor: float=1.0, add_upsample: bool=True, dual_cross_attention: bool=False, use_linear_projection: bool=False, only_cross_attention: bool=False, upcast_attention: bool=False, attention_type: str='default'): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): res_skip_channels = in_channels if i == num_layers - 1 else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append(ResnetBlockFlat(in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) if not dual_cross_attention: attentions.append(Transformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, attention_type=attention_type)) else: attentions.append(DualTransformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, upsample_size: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') is_freeu_enabled = getattr(self, 's1', None) and getattr(self, 's2', None) and getattr(self, 'b1', None) and getattr(self, 'b2', None) for (resnet, attn) in zip(self.resnets, self.attentions): res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] if is_freeu_enabled: (hidden_states, res_hidden_states) = apply_freeu(self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states class UNetMidBlockFlat(nn.Module): def __init__(self, in_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, attn_groups: Optional[int]=None, resnet_pre_norm: bool=True, add_attention: bool=True, attention_head_dim: int=1, output_scale_factor: float=1.0): super().__init__() resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) self.add_attention = add_attention if attn_groups is None: attn_groups = resnet_groups if resnet_time_scale_shift == 'default' else None if resnet_time_scale_shift == 'spatial': resnets = [ResnetBlockCondNorm2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm='spatial', non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor)] else: resnets = [ResnetBlockFlat(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)] attentions = [] if attention_head_dim is None: logger.warning(f'It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}.') attention_head_dim = in_channels for _ in range(num_layers): if self.add_attention: attentions.append(Attention(in_channels, heads=in_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=attn_groups, spatial_norm_dim=temb_channels if resnet_time_scale_shift == 'spatial' else None, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True)) else: attentions.append(None) if resnet_time_scale_shift == 'spatial': resnets.append(ResnetBlockCondNorm2D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm='spatial', non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor)) else: resnets.append(ResnetBlockFlat(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None) -> torch.Tensor: hidden_states = self.resnets[0](hidden_states, temb) for (attn, resnet) in zip(self.attentions, self.resnets[1:]): if attn is not None: hidden_states = attn(hidden_states, temb=temb) hidden_states = resnet(hidden_states, temb) return hidden_states class UNetMidBlockFlatCrossAttn(nn.Module): def __init__(self, in_channels: int, temb_channels: int, out_channels: Optional[int]=None, dropout: float=0.0, num_layers: int=1, transformer_layers_per_block: Union[int, Tuple[int]]=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_groups_out: Optional[int]=None, resnet_pre_norm: bool=True, num_attention_heads: int=1, output_scale_factor: float=1.0, cross_attention_dim: int=1280, dual_cross_attention: bool=False, use_linear_projection: bool=False, upcast_attention: bool=False, attention_type: str='default'): super().__init__() out_channels = out_channels or in_channels self.in_channels = in_channels self.out_channels = out_channels self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers resnet_groups_out = resnet_groups_out or resnet_groups resnets = [ResnetBlockFlat(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, groups_out=resnet_groups_out, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)] attentions = [] for i in range(num_layers): if not dual_cross_attention: attentions.append(Transformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups_out, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type)) else: attentions.append(DualTransformer2DModel(num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups)) resnets.append(ResnetBlockFlat(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups_out, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') hidden_states = self.resnets[0](hidden_states, temb) for (attn, resnet) in zip(self.attentions, self.resnets[1:]): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {} hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs) else: hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False)[0] hidden_states = resnet(hidden_states, temb) return hidden_states class UNetMidBlockFlatSimpleCrossAttn(nn.Module): def __init__(self, in_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attention_head_dim: int=1, output_scale_factor: float=1.0, cross_attention_dim: int=1280, skip_time_act: bool=False, only_cross_attention: bool=False, cross_attention_norm: Optional[str]=None): super().__init__() self.has_cross_attention = True self.attention_head_dim = attention_head_dim resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) self.num_heads = in_channels // self.attention_head_dim resnets = [ResnetBlockFlat(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act)] attentions = [] for _ in range(num_layers): processor = AttnAddedKVProcessor2_0() if hasattr(F, 'scaled_dot_product_attention') else AttnAddedKVProcessor() attentions.append(Attention(query_dim=in_channels, cross_attention_dim=in_channels, heads=self.num_heads, dim_head=self.attention_head_dim, added_kv_proj_dim=cross_attention_dim, norm_num_groups=resnet_groups, bias=True, upcast_softmax=True, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm, processor=processor)) resnets.append(ResnetBlockFlat(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, encoder_attention_mask: Optional[torch.Tensor]=None) -> torch.Tensor: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get('scale', None) is not None: logger.warning('Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.') if attention_mask is None: mask = None if encoder_hidden_states is None else encoder_attention_mask else: mask = attention_mask hidden_states = self.resnets[0](hidden_states, temb) for (attn, resnet) in zip(self.attentions, self.resnets[1:]): hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs) hidden_states = resnet(hidden_states, temb) return hidden_states # File: diffusers-main/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py import inspect from typing import Callable, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModel from ....models import AutoencoderKL, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers from ....utils import logging from ...pipeline_utils import DiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline logger = logging.get_logger(__name__) class VersatileDiffusionPipeline(DiffusionPipeline): tokenizer: CLIPTokenizer image_feature_extractor: CLIPImageProcessor text_encoder: CLIPTextModel image_encoder: CLIPVisionModel image_unet: UNet2DConditionModel text_unet: UNet2DConditionModel vae: AutoencoderKL scheduler: KarrasDiffusionSchedulers def __init__(self, tokenizer: CLIPTokenizer, image_feature_extractor: CLIPImageProcessor, text_encoder: CLIPTextModel, image_encoder: CLIPVisionModel, image_unet: UNet2DConditionModel, text_unet: UNet2DConditionModel, vae: AutoencoderKL, scheduler: KarrasDiffusionSchedulers): super().__init__() self.register_modules(tokenizer=tokenizer, image_feature_extractor=image_feature_extractor, text_encoder=text_encoder, image_encoder=image_encoder, image_unet=image_unet, text_unet=text_unet, vae=vae, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) @torch.no_grad() def image_variation(self, image: Union[torch.Tensor, PIL.Image.Image], height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1): expected_components = inspect.signature(VersatileDiffusionImageVariationPipeline.__init__).parameters.keys() components = {name: component for (name, component) in self.components.items() if name in expected_components} return VersatileDiffusionImageVariationPipeline(**components)(image=image, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps) @torch.no_grad() def text_to_image(self, prompt: Union[str, List[str]], height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1): expected_components = inspect.signature(VersatileDiffusionTextToImagePipeline.__init__).parameters.keys() components = {name: component for (name, component) in self.components.items() if name in expected_components} temp_pipeline = VersatileDiffusionTextToImagePipeline(**components) output = temp_pipeline(prompt=prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps) temp_pipeline._swap_unet_attention_blocks() return output @torch.no_grad() def dual_guided(self, prompt: Union[PIL.Image.Image, List[PIL.Image.Image]], image: Union[str, List[str]], text_to_image_strength: float=0.5, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1): expected_components = inspect.signature(VersatileDiffusionDualGuidedPipeline.__init__).parameters.keys() components = {name: component for (name, component) in self.components.items() if name in expected_components} temp_pipeline = VersatileDiffusionDualGuidedPipeline(**components) output = temp_pipeline(prompt=prompt, image=image, text_to_image_strength=text_to_image_strength, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps) temp_pipeline._revert_dual_attention() return output # File: diffusers-main/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py import inspect from typing import Callable, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.utils.checkpoint from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ....image_processor import VaeImageProcessor from ....models import AutoencoderKL, DualTransformer2DModel, Transformer2DModel, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers from ....utils import deprecate, logging from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .modeling_text_unet import UNetFlatConditionModel logger = logging.get_logger(__name__) class VersatileDiffusionDualGuidedPipeline(DiffusionPipeline): model_cpu_offload_seq = 'bert->unet->vqvae' tokenizer: CLIPTokenizer image_feature_extractor: CLIPImageProcessor text_encoder: CLIPTextModelWithProjection image_encoder: CLIPVisionModelWithProjection image_unet: UNet2DConditionModel text_unet: UNetFlatConditionModel vae: AutoencoderKL scheduler: KarrasDiffusionSchedulers _optional_components = ['text_unet'] def __init__(self, tokenizer: CLIPTokenizer, image_feature_extractor: CLIPImageProcessor, text_encoder: CLIPTextModelWithProjection, image_encoder: CLIPVisionModelWithProjection, image_unet: UNet2DConditionModel, text_unet: UNetFlatConditionModel, vae: AutoencoderKL, scheduler: KarrasDiffusionSchedulers): super().__init__() self.register_modules(tokenizer=tokenizer, image_feature_extractor=image_feature_extractor, text_encoder=text_encoder, image_encoder=image_encoder, image_unet=image_unet, text_unet=text_unet, vae=vae, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) if self.text_unet is not None and ('dual_cross_attention' not in self.image_unet.config or not self.image_unet.config.dual_cross_attention): self._convert_to_dual_attention() def remove_unused_weights(self): self.register_modules(text_unet=None) def _convert_to_dual_attention(self): for (name, module) in self.image_unet.named_modules(): if isinstance(module, Transformer2DModel): (parent_name, index) = name.rsplit('.', 1) index = int(index) image_transformer = self.image_unet.get_submodule(parent_name)[index] text_transformer = self.text_unet.get_submodule(parent_name)[index] config = image_transformer.config dual_transformer = DualTransformer2DModel(num_attention_heads=config.num_attention_heads, attention_head_dim=config.attention_head_dim, in_channels=config.in_channels, num_layers=config.num_layers, dropout=config.dropout, norm_num_groups=config.norm_num_groups, cross_attention_dim=config.cross_attention_dim, attention_bias=config.attention_bias, sample_size=config.sample_size, num_vector_embeds=config.num_vector_embeds, activation_fn=config.activation_fn, num_embeds_ada_norm=config.num_embeds_ada_norm) dual_transformer.transformers[0] = image_transformer dual_transformer.transformers[1] = text_transformer self.image_unet.get_submodule(parent_name)[index] = dual_transformer self.image_unet.register_to_config(dual_cross_attention=True) def _revert_dual_attention(self): for (name, module) in self.image_unet.named_modules(): if isinstance(module, DualTransformer2DModel): (parent_name, index) = name.rsplit('.', 1) index = int(index) self.image_unet.get_submodule(parent_name)[index] = module.transformers[0] self.image_unet.register_to_config(dual_cross_attention=False) def _encode_text_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): def normalize_embeddings(encoder_output): embeds = self.text_encoder.text_projection(encoder_output.last_hidden_state) embeds_pooled = encoder_output.text_embeds embeds = embeds / torch.norm(embeds_pooled.unsqueeze(1), dim=-1, keepdim=True) return embeds batch_size = len(prompt) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='max_length', return_tensors='pt').input_ids if not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = normalize_embeddings(prompt_embeds) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: uncond_tokens = [''] * batch_size max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def _encode_image_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): def normalize_embeddings(encoder_output): embeds = self.image_encoder.vision_model.post_layernorm(encoder_output.last_hidden_state) embeds = self.image_encoder.visual_projection(embeds) embeds_pooled = embeds[:, 0:1] embeds = embeds / torch.norm(embeds_pooled, dim=-1, keepdim=True) return embeds batch_size = len(prompt) if isinstance(prompt, list) else 1 image_input = self.image_feature_extractor(images=prompt, return_tensors='pt') pixel_values = image_input.pixel_values.to(device).to(self.image_encoder.dtype) image_embeddings = self.image_encoder(pixel_values) image_embeddings = normalize_embeddings(image_embeddings) (bs_embed, seq_len, _) = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: uncond_images = [np.zeros((512, 512, 3)) + 0.5] * batch_size uncond_images = self.image_feature_extractor(images=uncond_images, return_tensors='pt') pixel_values = uncond_images.pixel_values.to(device).to(self.image_encoder.dtype) negative_prompt_embeds = self.image_encoder(pixel_values) negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) return image_embeddings def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, image, height, width, callback_steps): if not isinstance(prompt, str) and (not isinstance(prompt, PIL.Image.Image)) and (not isinstance(prompt, list)): raise ValueError(f'`prompt` has to be of type `str` `PIL.Image` or `list` but is {type(prompt)}') if not isinstance(image, str) and (not isinstance(image, PIL.Image.Image)) and (not isinstance(image, list)): raise ValueError(f'`image` has to be of type `str` `PIL.Image` or `list` but is {type(image)}') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def set_transformer_params(self, mix_ratio: float=0.5, condition_types: Tuple=('text', 'image')): for (name, module) in self.image_unet.named_modules(): if isinstance(module, DualTransformer2DModel): module.mix_ratio = mix_ratio for (i, type) in enumerate(condition_types): if type == 'text': module.condition_lengths[i] = self.text_encoder.config.max_position_embeddings module.transformer_index_for_condition[i] = 1 else: module.condition_lengths[i] = 257 module.transformer_index_for_condition[i] = 0 @torch.no_grad() def __call__(self, prompt: Union[PIL.Image.Image, List[PIL.Image.Image]], image: Union[str, List[str]], text_to_image_strength: float=0.5, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, **kwargs): height = height or self.image_unet.config.sample_size * self.vae_scale_factor width = width or self.image_unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, image, height, width, callback_steps) prompt = [prompt] if not isinstance(prompt, list) else prompt image = [image] if not isinstance(image, list) else image batch_size = len(prompt) device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_text_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance) image_embeddings = self._encode_image_prompt(image, device, num_images_per_prompt, do_classifier_free_guidance) dual_prompt_embeddings = torch.cat([prompt_embeds, image_embeddings], dim=1) prompt_types = ('text', 'image') self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.image_unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, dual_prompt_embeddings.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) self.set_transformer_params(text_to_image_strength, prompt_types) for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=dual_prompt_embeddings).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py import inspect from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch import torch.utils.checkpoint from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from ....image_processor import VaeImageProcessor from ....models import AutoencoderKL, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers from ....utils import deprecate, logging from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) class VersatileDiffusionImageVariationPipeline(DiffusionPipeline): model_cpu_offload_seq = 'bert->unet->vqvae' image_feature_extractor: CLIPImageProcessor image_encoder: CLIPVisionModelWithProjection image_unet: UNet2DConditionModel vae: AutoencoderKL scheduler: KarrasDiffusionSchedulers def __init__(self, image_feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection, image_unet: UNet2DConditionModel, vae: AutoencoderKL, scheduler: KarrasDiffusionSchedulers): super().__init__() self.register_modules(image_feature_extractor=image_feature_extractor, image_encoder=image_encoder, image_unet=image_unet, vae=vae, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): def normalize_embeddings(encoder_output): embeds = self.image_encoder.vision_model.post_layernorm(encoder_output.last_hidden_state) embeds = self.image_encoder.visual_projection(embeds) embeds_pooled = embeds[:, 0:1] embeds = embeds / torch.norm(embeds_pooled, dim=-1, keepdim=True) return embeds if isinstance(prompt, torch.Tensor) and len(prompt.shape) == 4: prompt = list(prompt) batch_size = len(prompt) if isinstance(prompt, list) else 1 image_input = self.image_feature_extractor(images=prompt, return_tensors='pt') pixel_values = image_input.pixel_values.to(device).to(self.image_encoder.dtype) image_embeddings = self.image_encoder(pixel_values) image_embeddings = normalize_embeddings(image_embeddings) (bs_embed, seq_len, _) = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: uncond_images: List[str] if negative_prompt is None: uncond_images = [np.zeros((512, 512, 3)) + 0.5] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, PIL.Image.Image): uncond_images = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_images = negative_prompt uncond_images = self.image_feature_extractor(images=uncond_images, return_tensors='pt') pixel_values = uncond_images.pixel_values.to(device).to(self.image_encoder.dtype) negative_prompt_embeds = self.image_encoder(pixel_values) negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) return image_embeddings def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, image, height, width, callback_steps): if not isinstance(image, torch.Tensor) and (not isinstance(image, PIL.Image.Image)) and (not isinstance(image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is {type(image)}') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__(self, image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor], height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, **kwargs): height = height or self.image_unet.config.sample_size * self.vae_scale_factor width = width or self.image_unet.config.sample_size * self.vae_scale_factor self.check_inputs(image, height, width, callback_steps) batch_size = 1 if isinstance(image, PIL.Image.Image) else len(image) device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 image_embeddings = self._encode_prompt(image, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.image_unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, image_embeddings.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py import inspect from typing import Callable, List, Optional, Union import torch import torch.utils.checkpoint from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer from ....image_processor import VaeImageProcessor from ....models import AutoencoderKL, Transformer2DModel, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers from ....utils import deprecate, logging from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .modeling_text_unet import UNetFlatConditionModel logger = logging.get_logger(__name__) class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): model_cpu_offload_seq = 'bert->unet->vqvae' tokenizer: CLIPTokenizer image_feature_extractor: CLIPImageProcessor text_encoder: CLIPTextModelWithProjection image_unet: UNet2DConditionModel text_unet: UNetFlatConditionModel vae: AutoencoderKL scheduler: KarrasDiffusionSchedulers _optional_components = ['text_unet'] def __init__(self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, image_unet: UNet2DConditionModel, text_unet: UNetFlatConditionModel, vae: AutoencoderKL, scheduler: KarrasDiffusionSchedulers): super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, image_unet=image_unet, text_unet=text_unet, vae=vae, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) if self.text_unet is not None: self._swap_unet_attention_blocks() def _swap_unet_attention_blocks(self): for (name, module) in self.image_unet.named_modules(): if isinstance(module, Transformer2DModel): (parent_name, index) = name.rsplit('.', 1) index = int(index) (self.image_unet.get_submodule(parent_name)[index], self.text_unet.get_submodule(parent_name)[index]) = (self.text_unet.get_submodule(parent_name)[index], self.image_unet.get_submodule(parent_name)[index]) def remove_unused_weights(self): self.register_modules(text_unet=None) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): def normalize_embeddings(encoder_output): embeds = self.text_encoder.text_projection(encoder_output.last_hidden_state) embeds_pooled = encoder_output.text_embeds embeds = embeds / torch.norm(embeds_pooled.unsqueeze(1), dim=-1, keepdim=True) return embeds batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='max_length', return_tensors='pt').input_ids if not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = normalize_embeddings(prompt_embeds) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__(self, prompt: Union[str, List[str]], height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, **kwargs): height = height or self.image_unet.config.sample_size * self.vae_scale_factor width = width or self.image_unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps) batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.image_unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/deprecated/vq_diffusion/__init__.py from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline _dummy_objects.update({'LearnedClassifierFreeSamplingEmbeddings': LearnedClassifierFreeSamplingEmbeddings, 'VQDiffusionPipeline': VQDiffusionPipeline}) else: _import_structure['pipeline_vq_diffusion'] = ['LearnedClassifierFreeSamplingEmbeddings', 'VQDiffusionPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline else: from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ....configuration_utils import ConfigMixin, register_to_config from ....models import ModelMixin, Transformer2DModel, VQModel from ....schedulers import VQDiffusionScheduler from ....utils import logging from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) class LearnedClassifierFreeSamplingEmbeddings(ModelMixin, ConfigMixin): @register_to_config def __init__(self, learnable: bool, hidden_size: Optional[int]=None, length: Optional[int]=None): super().__init__() self.learnable = learnable if self.learnable: assert hidden_size is not None, 'learnable=True requires `hidden_size` to be set' assert length is not None, 'learnable=True requires `length` to be set' embeddings = torch.zeros(length, hidden_size) else: embeddings = None self.embeddings = torch.nn.Parameter(embeddings) class VQDiffusionPipeline(DiffusionPipeline): vqvae: VQModel text_encoder: CLIPTextModel tokenizer: CLIPTokenizer transformer: Transformer2DModel learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings scheduler: VQDiffusionScheduler def __init__(self, vqvae: VQModel, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, transformer: Transformer2DModel, scheduler: VQDiffusionScheduler, learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings): super().__init__() self.register_modules(vqvae=vqvae, transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings) def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt') text_input_ids = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length:]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids[:, :self.tokenizer.model_max_length] prompt_embeds = self.text_encoder(text_input_ids.to(self.device))[0] prompt_embeds = prompt_embeds / prompt_embeds.norm(dim=-1, keepdim=True) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: negative_prompt_embeds = self.learned_classifier_free_sampling_embeddings.embeddings negative_prompt_embeds = negative_prompt_embeds.unsqueeze(0).repeat(batch_size, 1, 1) else: uncond_tokens = [''] * batch_size max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(self.device))[0] negative_prompt_embeds = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1, keepdim=True) seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds @torch.no_grad() def __call__(self, prompt: Union[str, List[str]], num_inference_steps: int=100, guidance_scale: float=5.0, truncation_rate: float=1.0, num_images_per_prompt: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1) -> Union[ImagePipelineOutput, Tuple]: if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance) if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') latents_shape = (batch_size, self.transformer.num_latent_pixels) if latents is None: mask_class = self.transformer.num_vector_embeds - 1 latents = torch.full(latents_shape, mask_class).to(self.device) else: if latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}') if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError(f'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0, {self.transformer.num_vector_embeds - 1} (inclusive).') latents = latents.to(self.device) self.scheduler.set_timesteps(num_inference_steps, device=self.device) timesteps_tensor = self.scheduler.timesteps.to(self.device) sample = latents for (i, t) in enumerate(self.progress_bar(timesteps_tensor)): latent_model_input = torch.cat([sample] * 2) if do_classifier_free_guidance else sample model_output = self.transformer(latent_model_input, encoder_hidden_states=prompt_embeds, timestep=t).sample if do_classifier_free_guidance: (model_output_uncond, model_output_text) = model_output.chunk(2) model_output = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(model_output, dim=1, keepdim=True) model_output = self.truncate(model_output, truncation_rate) model_output = model_output.clamp(-70) sample = self.scheduler.step(model_output, timestep=t, sample=sample, generator=generator).prev_sample if callback is not None and i % callback_steps == 0: callback(i, t, sample) embedding_channels = self.vqvae.config.vq_embed_dim embeddings_shape = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) embeddings = self.vqvae.quantize.get_codebook_entry(sample, shape=embeddings_shape) image = self.vqvae.decode(embeddings, force_not_quantize=True).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) def truncate(self, log_p_x_0: torch.Tensor, truncation_rate: float) -> torch.Tensor: (sorted_log_p_x_0, indices) = torch.sort(log_p_x_0, 1, descending=True) sorted_p_x_0 = torch.exp(sorted_log_p_x_0) keep_mask = sorted_p_x_0.cumsum(dim=1) < truncation_rate all_true = torch.full_like(keep_mask[:, 0:1, :], True) keep_mask = torch.cat((all_true, keep_mask), dim=1) keep_mask = keep_mask[:, :-1, :] keep_mask = keep_mask.gather(1, indices.argsort(1)) rv = log_p_x_0.clone() rv[~keep_mask] = -torch.inf return rv # File: diffusers-main/src/diffusers/pipelines/dit/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {'pipeline_dit': ['DiTPipeline']} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_dit import DiTPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: diffusers-main/src/diffusers/pipelines/dit/pipeline_dit.py from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, DiTTransformer2DModel from ...schedulers import KarrasDiffusionSchedulers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class DiTPipeline(DiffusionPipeline): model_cpu_offload_seq = 'transformer->vae' def __init__(self, transformer: DiTTransformer2DModel, vae: AutoencoderKL, scheduler: KarrasDiffusionSchedulers, id2label: Optional[Dict[int, str]]=None): super().__init__() self.register_modules(transformer=transformer, vae=vae, scheduler=scheduler) self.labels = {} if id2label is not None: for (key, value) in id2label.items(): for label in value.split(','): self.labels[label.lstrip().rstrip()] = int(key) self.labels = dict(sorted(self.labels.items())) def get_label_ids(self, label: Union[str, List[str]]) -> List[int]: if not isinstance(label, list): label = list(label) for l in label: if l not in self.labels: raise ValueError(f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.') return [self.labels[l] for l in label] @torch.no_grad() def __call__(self, class_labels: List[int], guidance_scale: float=4.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, num_inference_steps: int=50, output_type: Optional[str]='pil', return_dict: bool=True) -> Union[ImagePipelineOutput, Tuple]: batch_size = len(class_labels) latent_size = self.transformer.config.sample_size latent_channels = self.transformer.config.in_channels latents = randn_tensor(shape=(batch_size, latent_channels, latent_size, latent_size), generator=generator, device=self._execution_device, dtype=self.transformer.dtype) latent_model_input = torch.cat([latents] * 2) if guidance_scale > 1 else latents class_labels = torch.tensor(class_labels, device=self._execution_device).reshape(-1) class_null = torch.tensor([1000] * batch_size, device=self._execution_device) class_labels_input = torch.cat([class_labels, class_null], 0) if guidance_scale > 1 else class_labels self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: half = latent_model_input[:len(latent_model_input) // 2] latent_model_input = torch.cat([half, half], dim=0) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) timesteps = t if not torch.is_tensor(timesteps): is_mps = latent_model_input.device.type == 'mps' if isinstance(timesteps, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=latent_model_input.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(latent_model_input.device) timesteps = timesteps.expand(latent_model_input.shape[0]) noise_pred = self.transformer(latent_model_input, timestep=timesteps, class_labels=class_labels_input).sample if guidance_scale > 1: (eps, rest) = (noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]) (cond_eps, uncond_eps) = torch.split(eps, len(eps) // 2, dim=0) half_eps = uncond_eps + guidance_scale * (cond_eps - uncond_eps) eps = torch.cat([half_eps, half_eps], dim=0) noise_pred = torch.cat([eps, rest], dim=1) if self.transformer.config.out_channels // 2 == latent_channels: (model_output, _) = torch.split(noise_pred, latent_channels, dim=1) else: model_output = noise_pred latent_model_input = self.scheduler.step(model_output, t, latent_model_input).prev_sample if guidance_scale > 1: (latents, _) = latent_model_input.chunk(2, dim=0) else: latents = latent_model_input latents = 1 / self.vae.config.scaling_factor * latents samples = self.vae.decode(latents).sample samples = (samples / 2 + 0.5).clamp(0, 1) samples = samples.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == 'pil': samples = self.numpy_to_pil(samples) self.maybe_free_model_hooks() if not return_dict: return (samples,) return ImagePipelineOutput(images=samples) # File: diffusers-main/src/diffusers/pipelines/flux/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _additional_imports = {} _import_structure = {'pipeline_output': ['FluxPipelineOutput']} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_flux'] = ['FluxPipeline'] _import_structure['pipeline_flux_controlnet'] = ['FluxControlNetPipeline'] _import_structure['pipeline_flux_img2img'] = ['FluxImg2ImgPipeline'] _import_structure['pipeline_flux_inpaint'] = ['FluxInpaintPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_flux import FluxPipeline from .pipeline_flux_controlnet import FluxControlNetPipeline from .pipeline_flux_img2img import FluxImg2ImgPipeline from .pipeline_flux_inpaint import FluxInpaintPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) for (name, value) in _additional_imports.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/flux/pipeline_flux.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast from ...image_processor import VaeImageProcessor from ...loaders import FluxLoraLoaderMixin, FromSingleFileMixin from ...models.autoencoders import AutoencoderKL from ...models.transformers import FluxTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import FluxPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import FluxPipeline\n\n >>> pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16)\n >>> pipe.to("cuda")\n >>> prompt = "A cat holding a sign that says hello world"\n >>> # Depending on the variant being used, the pipeline call will slightly vary.\n >>> # Refer to the pipeline documentation for more details.\n >>> image = pipe(prompt, num_inference_steps=4, guidance_scale=0.0).images[0]\n >>> image.save("flux.png")\n ```\n' def calculate_shift(image_seq_len, base_seq_len: int=256, max_seq_len: int=4096, base_shift: float=0.5, max_shift: float=1.16): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class FluxPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->transformer->vae' _optional_components = [] _callback_tensor_inputs = ['latents', 'prompt_embeds'] def __init__(self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, text_encoder_2: T5EncoderModel, tokenizer_2: T5TokenizerFast, transformer: FluxTransformer2DModel): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** len(self.vae.config.block_out_channels) if hasattr(self, 'vae') and self.vae is not None else 16 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.tokenizer_max_length = self.tokenizer.model_max_length if hasattr(self, 'tokenizer') and self.tokenizer is not None else 77 self.default_sample_size = 64 def _get_t5_prompt_embeds(self, prompt: Union[str, List[str]]=None, num_images_per_prompt: int=1, max_sequence_length: int=512, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer_2(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, return_length=False, return_overflowing_tokens=False, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_2(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because `max_sequence_length` is set to {max_sequence_length} tokens: {removed_text}') prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0] dtype = self.text_encoder_2.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds def _get_clip_prompt_embeds(self, prompt: Union[str, List[str]], num_images_per_prompt: int=1, device: Optional[torch.device]=None): device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer_max_length, truncation=True, return_overflowing_tokens=False, return_length=False, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer_max_length} tokens: {removed_text}') prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False) prompt_embeds = prompt_embeds.pooler_output prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) return prompt_embeds def encode_prompt(self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], device: Optional[torch.device]=None, num_images_per_prompt: int=1, prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, max_sequence_length: int=512, lora_scale: Optional[float]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 pooled_prompt_embeds = self._get_clip_prompt_embeds(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt) prompt_embeds = self._get_t5_prompt_embeds(prompt=prompt_2, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) if self.text_encoder is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) return (prompt_embeds, pooled_prompt_embeds, text_ids) def check_inputs(self, prompt, prompt_2, height, width, prompt_embeds=None, pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f'`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}') @staticmethod def _prepare_latent_image_ids(batch_size, height, width, device, dtype): latent_image_ids = torch.zeros(height // 2, width // 2, 3) latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] (latent_image_id_height, latent_image_id_width, latent_image_id_channels) = latent_image_ids.shape latent_image_ids = latent_image_ids.reshape(latent_image_id_height * latent_image_id_width, latent_image_id_channels) return latent_image_ids.to(device=device, dtype=dtype) @staticmethod def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5) latents = latents.reshape(batch_size, height // 2 * (width // 2), num_channels_latents * 4) return latents @staticmethod def _unpack_latents(latents, height, width, vae_scale_factor): (batch_size, num_patches, channels) = latents.shape height = height // vae_scale_factor width = width // vae_scale_factor latents = latents.view(batch_size, height, width, channels // 4, 2, 2) latents = latents.permute(0, 3, 1, 4, 2, 5) latents = latents.reshape(batch_size, channels // (2 * 2), height * 2, width * 2) return latents def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_vae_tiling(self): self.vae.enable_tiling() def disable_vae_tiling(self): self.vae.disable_tiling() def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): height = 2 * (int(height) // self.vae_scale_factor) width = 2 * (int(width) // self.vae_scale_factor) shape = (batch_size, num_channels_latents, height, width) if latents is not None: latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) return (latents.to(device=device, dtype=dtype), latent_image_ids) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) return (latents, latent_image_ids) @property def guidance_scale(self): return self._guidance_scale @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=28, timesteps: List[int]=None, guidance_scale: float=3.5, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, joint_attention_kwargs: Optional[Dict[str, Any]]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=512): height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor self.check_inputs(prompt, prompt_2, height, width, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length) self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.joint_attention_kwargs.get('scale', None) if self.joint_attention_kwargs is not None else None (prompt_embeds, pooled_prompt_embeds, text_ids) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale) num_channels_latents = self.transformer.config.in_channels // 4 (latents, latent_image_ids) = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) image_seq_len = latents.shape[1] mu = calculate_shift(image_seq_len, self.scheduler.config.base_image_seq_len, self.scheduler.config.max_image_seq_len, self.scheduler.config.base_shift, self.scheduler.config.max_shift) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas, mu=mu) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) if self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) else: guidance = None with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue timestep = t.expand(latents.shape[0]).to(latents.dtype) noise_pred = self.transformer(hidden_states=latents, timestep=timestep / 1000, guidance=guidance, pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False)[0] latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == 'latent': image = latents else: latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) latents = latents / self.vae.config.scaling_factor + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return FluxPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/flux/pipeline_flux_controlnet.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FluxLoraLoaderMixin, FromSingleFileMixin from ...models.autoencoders import AutoencoderKL from ...models.controlnet_flux import FluxControlNetModel, FluxMultiControlNetModel from ...models.transformers import FluxTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import FluxPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers.utils import load_image\n >>> from diffusers import FluxControlNetPipeline\n >>> from diffusers import FluxControlNetModel\n\n >>> controlnet_model = "InstantX/FLUX.1-dev-controlnet-canny"\n >>> controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)\n >>> pipe = FluxControlNetPipeline.from_pretrained(\n ... base_model, controlnet=controlnet, torch_dtype=torch.bfloat16\n ... )\n >>> pipe.to("cuda")\n >>> control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg")\n >>> prompt = "A girl in city, 25 years old, cool, futuristic"\n >>> image = pipe(\n ... prompt,\n ... control_image=control_image,\n ... controlnet_conditioning_scale=0.6,\n ... num_inference_steps=28,\n ... guidance_scale=3.5,\n ... ).images[0]\n >>> image.save("flux.png")\n ```\n' def calculate_shift(image_seq_len, base_seq_len: int=256, max_seq_len: int=4096, base_shift: float=0.5, max_shift: float=1.16): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class FluxControlNetPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->transformer->vae' _optional_components = [] _callback_tensor_inputs = ['latents', 'prompt_embeds'] def __init__(self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, text_encoder_2: T5EncoderModel, tokenizer_2: T5TokenizerFast, transformer: FluxTransformer2DModel, controlnet: Union[FluxControlNetModel, List[FluxControlNetModel], Tuple[FluxControlNetModel], FluxMultiControlNetModel]): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, transformer=transformer, scheduler=scheduler, controlnet=controlnet) self.vae_scale_factor = 2 ** len(self.vae.config.block_out_channels) if hasattr(self, 'vae') and self.vae is not None else 16 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.tokenizer_max_length = self.tokenizer.model_max_length if hasattr(self, 'tokenizer') and self.tokenizer is not None else 77 self.default_sample_size = 64 def _get_t5_prompt_embeds(self, prompt: Union[str, List[str]]=None, num_images_per_prompt: int=1, max_sequence_length: int=512, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer_2(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, return_length=False, return_overflowing_tokens=False, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_2(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because `max_sequence_length` is set to {max_sequence_length} tokens: {removed_text}') prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0] dtype = self.text_encoder_2.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds def _get_clip_prompt_embeds(self, prompt: Union[str, List[str]], num_images_per_prompt: int=1, device: Optional[torch.device]=None): device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer_max_length, truncation=True, return_overflowing_tokens=False, return_length=False, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer_max_length} tokens: {removed_text}') prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False) prompt_embeds = prompt_embeds.pooler_output prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) return prompt_embeds def encode_prompt(self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], device: Optional[torch.device]=None, num_images_per_prompt: int=1, prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, max_sequence_length: int=512, lora_scale: Optional[float]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 pooled_prompt_embeds = self._get_clip_prompt_embeds(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt) prompt_embeds = self._get_t5_prompt_embeds(prompt=prompt_2, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) if self.text_encoder is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) return (prompt_embeds, pooled_prompt_embeds, text_ids) def check_inputs(self, prompt, prompt_2, height, width, prompt_embeds=None, pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f'`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}') @staticmethod def _prepare_latent_image_ids(batch_size, height, width, device, dtype): latent_image_ids = torch.zeros(height // 2, width // 2, 3) latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] (latent_image_id_height, latent_image_id_width, latent_image_id_channels) = latent_image_ids.shape latent_image_ids = latent_image_ids.reshape(latent_image_id_height * latent_image_id_width, latent_image_id_channels) return latent_image_ids.to(device=device, dtype=dtype) @staticmethod def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5) latents = latents.reshape(batch_size, height // 2 * (width // 2), num_channels_latents * 4) return latents @staticmethod def _unpack_latents(latents, height, width, vae_scale_factor): (batch_size, num_patches, channels) = latents.shape height = height // vae_scale_factor width = width // vae_scale_factor latents = latents.view(batch_size, height, width, channels // 4, 2, 2) latents = latents.permute(0, 3, 1, 4, 2, 5) latents = latents.reshape(batch_size, channels // (2 * 2), height * 2, width * 2) return latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): height = 2 * (int(height) // self.vae_scale_factor) width = 2 * (int(width) // self.vae_scale_factor) shape = (batch_size, num_channels_latents, height, width) if latents is not None: latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) return (latents.to(device=device, dtype=dtype), latent_image_ids) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) return (latents, latent_image_ids) def prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False): if isinstance(image, torch.Tensor): pass else: image = self.image_processor.preprocess(image, height=height, width=width) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): image = torch.cat([image] * 2) return image @property def guidance_scale(self): return self._guidance_scale @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=28, timesteps: List[int]=None, guidance_scale: float=7.0, control_image: PipelineImageInput=None, control_mode: Optional[Union[int, List[int]]]=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, joint_attention_kwargs: Optional[Dict[str, Any]]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=512): height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor self.check_inputs(prompt, prompt_2, height, width, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length) self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device dtype = self.transformer.dtype lora_scale = self.joint_attention_kwargs.get('scale', None) if self.joint_attention_kwargs is not None else None (prompt_embeds, pooled_prompt_embeds, text_ids) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale) num_channels_latents = self.transformer.config.in_channels // 4 if isinstance(self.controlnet, FluxControlNetModel): control_image = self.prepare_image(image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=dtype) (height, width) = control_image.shape[-2:] control_image = self.vae.encode(control_image).latent_dist.sample() control_image = (control_image - self.vae.config.shift_factor) * self.vae.config.scaling_factor (height_control_image, width_control_image) = control_image.shape[2:] control_image = self._pack_latents(control_image, batch_size * num_images_per_prompt, num_channels_latents, height_control_image, width_control_image) if control_mode is not None: control_mode = torch.tensor(control_mode).to(device, dtype=torch.long) control_mode = control_mode.reshape([-1, 1]) elif isinstance(self.controlnet, FluxMultiControlNetModel): control_images = [] for control_image_ in control_image: control_image_ = self.prepare_image(image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=dtype) (height, width) = control_image_.shape[-2:] control_image_ = self.vae.encode(control_image_).latent_dist.sample() control_image_ = (control_image_ - self.vae.config.shift_factor) * self.vae.config.scaling_factor (height_control_image, width_control_image) = control_image_.shape[2:] control_image_ = self._pack_latents(control_image_, batch_size * num_images_per_prompt, num_channels_latents, height_control_image, width_control_image) control_images.append(control_image_) control_image = control_images control_mode_ = [] if isinstance(control_mode, list): for cmode in control_mode: if cmode is None: control_mode_.append(-1) else: control_mode_.append(cmode) control_mode = torch.tensor(control_mode_).to(device, dtype=torch.long) control_mode = control_mode.reshape([-1, 1]) num_channels_latents = self.transformer.config.in_channels // 4 (latents, latent_image_ids) = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) image_seq_len = latents.shape[1] mu = calculate_shift(image_seq_len, self.scheduler.config.base_image_seq_len, self.scheduler.config.max_image_seq_len, self.scheduler.config.base_shift, self.scheduler.config.max_shift) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas, mu=mu) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue timestep = t.expand(latents.shape[0]).to(latents.dtype) if self.transformer.config.guidance_embeds: guidance = torch.tensor([guidance_scale], device=device) guidance = guidance.expand(latents.shape[0]) else: guidance = None (controlnet_block_samples, controlnet_single_block_samples) = self.controlnet(hidden_states=latents, controlnet_cond=control_image, controlnet_mode=control_mode, conditioning_scale=controlnet_conditioning_scale, timestep=timestep / 1000, guidance=guidance, pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False) noise_pred = self.transformer(hidden_states=latents, timestep=timestep / 1000, guidance=guidance, pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, controlnet_block_samples=controlnet_block_samples, controlnet_single_block_samples=controlnet_single_block_samples, txt_ids=text_ids, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False)[0] latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == 'latent': image = latents else: latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) latents = latents / self.vae.config.scaling_factor + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return FluxPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/flux/pipeline_flux_img2img.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FluxLoraLoaderMixin from ...models.autoencoders import AutoencoderKL from ...models.transformers import FluxTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import FluxPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n\n >>> from diffusers import FluxImg2ImgPipeline\n >>> from diffusers.utils import load_image\n\n >>> device = "cuda"\n >>> pipe = FluxImg2ImgPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16)\n >>> pipe = pipe.to(device)\n\n >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"\n >>> init_image = load_image(url).resize((1024, 1024))\n\n >>> prompt = "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k"\n\n >>> images = pipe(\n ... prompt=prompt, image=init_image, num_inference_steps=4, strength=0.95, guidance_scale=0.0\n ... ).images[0]\n ```\n' def calculate_shift(image_seq_len, base_seq_len: int=256, max_seq_len: int=4096, base_shift: float=0.5, max_shift: float=1.16): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class FluxImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->transformer->vae' _optional_components = [] _callback_tensor_inputs = ['latents', 'prompt_embeds'] def __init__(self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, text_encoder_2: T5EncoderModel, tokenizer_2: T5TokenizerFast, transformer: FluxTransformer2DModel): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** len(self.vae.config.block_out_channels) if hasattr(self, 'vae') and self.vae is not None else 16 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.tokenizer_max_length = self.tokenizer.model_max_length if hasattr(self, 'tokenizer') and self.tokenizer is not None else 77 self.default_sample_size = 64 def _get_t5_prompt_embeds(self, prompt: Union[str, List[str]]=None, num_images_per_prompt: int=1, max_sequence_length: int=512, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer_2(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, return_length=False, return_overflowing_tokens=False, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_2(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because `max_sequence_length` is set to {max_sequence_length} tokens: {removed_text}') prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0] dtype = self.text_encoder_2.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds def _get_clip_prompt_embeds(self, prompt: Union[str, List[str]], num_images_per_prompt: int=1, device: Optional[torch.device]=None): device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer_max_length, truncation=True, return_overflowing_tokens=False, return_length=False, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer_max_length} tokens: {removed_text}') prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False) prompt_embeds = prompt_embeds.pooler_output prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) return prompt_embeds def encode_prompt(self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], device: Optional[torch.device]=None, num_images_per_prompt: int=1, prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, max_sequence_length: int=512, lora_scale: Optional[float]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 pooled_prompt_embeds = self._get_clip_prompt_embeds(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt) prompt_embeds = self._get_t5_prompt_embeds(prompt=prompt_2, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) if self.text_encoder is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) return (prompt_embeds, pooled_prompt_embeds, text_ids) def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(image.shape[0])] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor return image_latents def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(num_inference_steps * strength, num_inference_steps) t_start = int(max(num_inference_steps - init_timestep, 0)) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def check_inputs(self, prompt, prompt_2, strength, height, width, prompt_embeds=None, pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f'`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}') @staticmethod def _prepare_latent_image_ids(batch_size, height, width, device, dtype): latent_image_ids = torch.zeros(height // 2, width // 2, 3) latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] (latent_image_id_height, latent_image_id_width, latent_image_id_channels) = latent_image_ids.shape latent_image_ids = latent_image_ids.reshape(latent_image_id_height * latent_image_id_width, latent_image_id_channels) return latent_image_ids.to(device=device, dtype=dtype) @staticmethod def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5) latents = latents.reshape(batch_size, height // 2 * (width // 2), num_channels_latents * 4) return latents @staticmethod def _unpack_latents(latents, height, width, vae_scale_factor): (batch_size, num_patches, channels) = latents.shape height = height // vae_scale_factor width = width // vae_scale_factor latents = latents.view(batch_size, height, width, channels // 4, 2, 2) latents = latents.permute(0, 3, 1, 4, 2, 5) latents = latents.reshape(batch_size, channels // (2 * 2), height * 2, width * 2) return latents def prepare_latents(self, image, timestep, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') height = 2 * (int(height) // self.vae_scale_factor) width = 2 * (int(width) // self.vae_scale_factor) shape = (batch_size, num_channels_latents, height, width) latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) if latents is not None: return (latents.to(device=device, dtype=dtype), latent_image_ids) image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator) if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts.') else: image_latents = torch.cat([image_latents], dim=0) noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self.scheduler.scale_noise(image_latents, timestep, noise) latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) return (latents, latent_image_ids) @property def guidance_scale(self): return self._guidance_scale @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, strength: float=0.6, num_inference_steps: int=28, timesteps: List[int]=None, guidance_scale: float=7.0, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, joint_attention_kwargs: Optional[Dict[str, Any]]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=512): height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor self.check_inputs(prompt, prompt_2, strength, height, width, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length) self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False init_image = self.image_processor.preprocess(image, height=height, width=width) init_image = init_image.to(dtype=torch.float32) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.joint_attention_kwargs.get('scale', None) if self.joint_attention_kwargs is not None else None (prompt_embeds, pooled_prompt_embeds, text_ids) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale) sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) image_seq_len = int(height) // self.vae_scale_factor * (int(width) // self.vae_scale_factor) mu = calculate_shift(image_seq_len, self.scheduler.config.base_image_seq_len, self.scheduler.config.max_image_seq_len, self.scheduler.config.base_shift, self.scheduler.config.max_shift) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas, mu=mu) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) if num_inference_steps < 1: raise ValueError(f'After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipelinesteps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.') latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) num_channels_latents = self.transformer.config.in_channels // 4 (latents, latent_image_ids) = self.prepare_latents(init_image, latent_timestep, batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) if self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) else: guidance = None with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue timestep = t.expand(latents.shape[0]).to(latents.dtype) noise_pred = self.transformer(hidden_states=latents, timestep=timestep / 1000, guidance=guidance, pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False)[0] latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == 'latent': image = latents else: latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) latents = latents / self.vae.config.scaling_factor + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return FluxPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FluxLoraLoaderMixin from ...models.autoencoders import AutoencoderKL from ...models.transformers import FluxTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import FluxPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import FluxInpaintPipeline\n >>> from diffusers.utils import load_image\n\n >>> pipe = FluxInpaintPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16)\n >>> pipe.to("cuda")\n >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench"\n >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"\n >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"\n >>> source = load_image(img_url)\n >>> mask = load_image(mask_url)\n >>> image = pipe(prompt=prompt, image=source, mask_image=mask).images[0]\n >>> image.save("flux_inpainting.png")\n ```\n' def calculate_shift(image_seq_len, base_seq_len: int=256, max_seq_len: int=4096, base_shift: float=0.5, max_shift: float=1.16): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class FluxInpaintPipeline(DiffusionPipeline, FluxLoraLoaderMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->transformer->vae' _optional_components = [] _callback_tensor_inputs = ['latents', 'prompt_embeds'] def __init__(self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, text_encoder_2: T5EncoderModel, tokenizer_2: T5TokenizerFast, transformer: FluxTransformer2DModel): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** len(self.vae.config.block_out_channels) if hasattr(self, 'vae') and self.vae is not None else 16 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, vae_latent_channels=self.vae.config.latent_channels, do_normalize=False, do_binarize=True, do_convert_grayscale=True) self.tokenizer_max_length = self.tokenizer.model_max_length if hasattr(self, 'tokenizer') and self.tokenizer is not None else 77 self.default_sample_size = 64 def _get_t5_prompt_embeds(self, prompt: Union[str, List[str]]=None, num_images_per_prompt: int=1, max_sequence_length: int=512, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer_2(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, return_length=False, return_overflowing_tokens=False, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_2(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because `max_sequence_length` is set to {max_sequence_length} tokens: {removed_text}') prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0] dtype = self.text_encoder_2.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds def _get_clip_prompt_embeds(self, prompt: Union[str, List[str]], num_images_per_prompt: int=1, device: Optional[torch.device]=None): device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer_max_length, truncation=True, return_overflowing_tokens=False, return_length=False, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer_max_length} tokens: {removed_text}') prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False) prompt_embeds = prompt_embeds.pooler_output prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) return prompt_embeds def encode_prompt(self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], device: Optional[torch.device]=None, num_images_per_prompt: int=1, prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, max_sequence_length: int=512, lora_scale: Optional[float]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 pooled_prompt_embeds = self._get_clip_prompt_embeds(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt) prompt_embeds = self._get_t5_prompt_embeds(prompt=prompt_2, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) if self.text_encoder is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) return (prompt_embeds, pooled_prompt_embeds, text_ids) def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(image.shape[0])] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor return image_latents def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(num_inference_steps * strength, num_inference_steps) t_start = int(max(num_inference_steps - init_timestep, 0)) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def check_inputs(self, prompt, prompt_2, image, mask_image, strength, height, width, output_type, prompt_embeds=None, pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, padding_mask_crop=None, max_sequence_length=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if padding_mask_crop is not None: if not isinstance(image, PIL.Image.Image): raise ValueError(f'The image should be a PIL image when inpainting mask crop, but is of type {type(image)}.') if not isinstance(mask_image, PIL.Image.Image): raise ValueError(f'The mask image should be a PIL image when inpainting mask crop, but is of type {type(mask_image)}.') if output_type != 'pil': raise ValueError(f'The output type should be PIL when inpainting mask crop, but is {output_type}.') if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f'`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}') @staticmethod def _prepare_latent_image_ids(batch_size, height, width, device, dtype): latent_image_ids = torch.zeros(height // 2, width // 2, 3) latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] (latent_image_id_height, latent_image_id_width, latent_image_id_channels) = latent_image_ids.shape latent_image_ids = latent_image_ids.reshape(latent_image_id_height * latent_image_id_width, latent_image_id_channels) return latent_image_ids.to(device=device, dtype=dtype) @staticmethod def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5) latents = latents.reshape(batch_size, height // 2 * (width // 2), num_channels_latents * 4) return latents @staticmethod def _unpack_latents(latents, height, width, vae_scale_factor): (batch_size, num_patches, channels) = latents.shape height = height // vae_scale_factor width = width // vae_scale_factor latents = latents.view(batch_size, height, width, channels // 4, 2, 2) latents = latents.permute(0, 3, 1, 4, 2, 5) latents = latents.reshape(batch_size, channels // (2 * 2), height * 2, width * 2) return latents def prepare_latents(self, image, timestep, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') height = 2 * (int(height) // self.vae_scale_factor) width = 2 * (int(width) // self.vae_scale_factor) shape = (batch_size, num_channels_latents, height, width) latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator) if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts.') else: image_latents = torch.cat([image_latents], dim=0) if latents is None: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self.scheduler.scale_noise(image_latents, timestep, noise) else: noise = latents.to(device) latents = noise noise = self._pack_latents(noise, batch_size, num_channels_latents, height, width) image_latents = self._pack_latents(image_latents, batch_size, num_channels_latents, height, width) latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) return (latents, noise, image_latents, latent_image_ids) def prepare_mask_latents(self, mask, masked_image, batch_size, num_channels_latents, num_images_per_prompt, height, width, dtype, device, generator): height = 2 * (int(height) // self.vae_scale_factor) width = 2 * (int(width) // self.vae_scale_factor) mask = torch.nn.functional.interpolate(mask, size=(height, width)) mask = mask.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt masked_image = masked_image.to(device=device, dtype=dtype) if masked_image.shape[1] == 16: masked_image_latents = masked_image else: masked_image_latents = retrieve_latents(self.vae.encode(masked_image), generator=generator) masked_image_latents = (masked_image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError(f"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number of masks that you pass is divisible by the total requested batch size.") mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError(f"The passed images and the required batch size don't match. Images are supposed to be duplicated to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed. Make sure the number of images that you pass is divisible by the total requested batch size.") masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) masked_image_latents = self._pack_latents(masked_image_latents, batch_size, num_channels_latents, height, width) mask = self._pack_latents(mask.repeat(1, num_channels_latents, 1, 1), batch_size, num_channels_latents, height, width) return (mask, masked_image_latents) @property def guidance_scale(self): return self._guidance_scale @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, mask_image: PipelineImageInput=None, masked_image_latents: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, padding_mask_crop: Optional[int]=None, strength: float=0.6, num_inference_steps: int=28, timesteps: List[int]=None, guidance_scale: float=7.0, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, joint_attention_kwargs: Optional[Dict[str, Any]]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=512): height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor self.check_inputs(prompt, prompt_2, image, mask_image, strength, height, width, output_type=output_type, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, padding_mask_crop=padding_mask_crop, max_sequence_length=max_sequence_length) self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False if padding_mask_crop is not None: crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) resize_mode = 'fill' else: crops_coords = None resize_mode = 'default' original_image = image init_image = self.image_processor.preprocess(image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode) init_image = init_image.to(dtype=torch.float32) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.joint_attention_kwargs.get('scale', None) if self.joint_attention_kwargs is not None else None (prompt_embeds, pooled_prompt_embeds, text_ids) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale) sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) image_seq_len = int(height) // self.vae_scale_factor * (int(width) // self.vae_scale_factor) mu = calculate_shift(image_seq_len, self.scheduler.config.base_image_seq_len, self.scheduler.config.max_image_seq_len, self.scheduler.config.base_shift, self.scheduler.config.max_shift) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas, mu=mu) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) if num_inference_steps < 1: raise ValueError(f'After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipelinesteps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.') latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) num_channels_latents = self.transformer.config.in_channels // 4 num_channels_transformer = self.transformer.config.in_channels (latents, noise, image_latents, latent_image_ids) = self.prepare_latents(init_image, latent_timestep, batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) mask_condition = self.mask_processor.preprocess(mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords) if masked_image_latents is None: masked_image = init_image * (mask_condition < 0.5) else: masked_image = masked_image_latents (mask, masked_image_latents) = self.prepare_mask_latents(mask_condition, masked_image, batch_size, num_channels_latents, num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) if self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) else: guidance = None with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue timestep = t.expand(latents.shape[0]).to(latents.dtype) noise_pred = self.transformer(hidden_states=latents, timestep=timestep / 1000, guidance=guidance, pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False)[0] latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] init_latents_proper = image_latents init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.scale_noise(init_latents_proper, torch.tensor([noise_timestep]), noise) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == 'latent': image = latents else: latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) latents = latents / self.vae.config.scaling_factor + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return FluxPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/flux/pipeline_output.py from dataclasses import dataclass from typing import List, Union import numpy as np import PIL.Image from ...utils import BaseOutput @dataclass class FluxPipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] # File: diffusers-main/src/diffusers/pipelines/free_init_utils.py import math from typing import Tuple, Union import torch import torch.fft as fft from ..utils.torch_utils import randn_tensor class FreeInitMixin: def enable_free_init(self, num_iters: int=3, use_fast_sampling: bool=False, method: str='butterworth', order: int=4, spatial_stop_frequency: float=0.25, temporal_stop_frequency: float=0.25): self._free_init_num_iters = num_iters self._free_init_use_fast_sampling = use_fast_sampling self._free_init_method = method self._free_init_order = order self._free_init_spatial_stop_frequency = spatial_stop_frequency self._free_init_temporal_stop_frequency = temporal_stop_frequency def disable_free_init(self): self._free_init_num_iters = None @property def free_init_enabled(self): return hasattr(self, '_free_init_num_iters') and self._free_init_num_iters is not None def _get_free_init_freq_filter(self, shape: Tuple[int, ...], device: Union[str, torch.dtype], filter_type: str, order: float, spatial_stop_frequency: float, temporal_stop_frequency: float) -> torch.Tensor: (time, height, width) = (shape[-3], shape[-2], shape[-1]) mask = torch.zeros(shape) if spatial_stop_frequency == 0 or temporal_stop_frequency == 0: return mask if filter_type == 'butterworth': def retrieve_mask(x): return 1 / (1 + (x / spatial_stop_frequency ** 2) ** order) elif filter_type == 'gaussian': def retrieve_mask(x): return math.exp(-1 / (2 * spatial_stop_frequency ** 2) * x) elif filter_type == 'ideal': def retrieve_mask(x): return 1 if x <= spatial_stop_frequency * 2 else 0 else: raise NotImplementedError('`filter_type` must be one of gaussian, butterworth or ideal') for t in range(time): for h in range(height): for w in range(width): d_square = (spatial_stop_frequency / temporal_stop_frequency * (2 * t / time - 1)) ** 2 + (2 * h / height - 1) ** 2 + (2 * w / width - 1) ** 2 mask[..., t, h, w] = retrieve_mask(d_square) return mask.to(device) def _apply_freq_filter(self, x: torch.Tensor, noise: torch.Tensor, low_pass_filter: torch.Tensor) -> torch.Tensor: x_freq = fft.fftn(x, dim=(-3, -2, -1)) x_freq = fft.fftshift(x_freq, dim=(-3, -2, -1)) noise_freq = fft.fftn(noise, dim=(-3, -2, -1)) noise_freq = fft.fftshift(noise_freq, dim=(-3, -2, -1)) high_pass_filter = 1 - low_pass_filter x_freq_low = x_freq * low_pass_filter noise_freq_high = noise_freq * high_pass_filter x_freq_mixed = x_freq_low + noise_freq_high x_freq_mixed = fft.ifftshift(x_freq_mixed, dim=(-3, -2, -1)) x_mixed = fft.ifftn(x_freq_mixed, dim=(-3, -2, -1)).real return x_mixed def _apply_free_init(self, latents: torch.Tensor, free_init_iteration: int, num_inference_steps: int, device: torch.device, dtype: torch.dtype, generator: torch.Generator): if free_init_iteration == 0: self._free_init_initial_noise = latents.detach().clone() else: latent_shape = latents.shape free_init_filter_shape = (1, *latent_shape[1:]) free_init_freq_filter = self._get_free_init_freq_filter(shape=free_init_filter_shape, device=device, filter_type=self._free_init_method, order=self._free_init_order, spatial_stop_frequency=self._free_init_spatial_stop_frequency, temporal_stop_frequency=self._free_init_temporal_stop_frequency) current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1 diffuse_timesteps = torch.full((latent_shape[0],), current_diffuse_timestep).long() z_t = self.scheduler.add_noise(original_samples=latents, noise=self._free_init_initial_noise, timesteps=diffuse_timesteps.to(device)).to(dtype=torch.float32) z_rand = randn_tensor(shape=latent_shape, generator=generator, device=device, dtype=torch.float32) latents = self._apply_freq_filter(z_t, z_rand, low_pass_filter=free_init_freq_filter) latents = latents.to(dtype) if self._free_init_use_fast_sampling: num_inference_steps = max(1, int(num_inference_steps / self._free_init_num_iters * (free_init_iteration + 1))) if num_inference_steps > 0: self.scheduler.set_timesteps(num_inference_steps, device=device) return (latents, self.scheduler.timesteps) # File: diffusers-main/src/diffusers/pipelines/free_noise_utils.py from typing import Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from ..models.attention import BasicTransformerBlock, FreeNoiseTransformerBlock from ..models.resnet import Downsample2D, ResnetBlock2D, Upsample2D from ..models.transformers.transformer_2d import Transformer2DModel from ..models.unets.unet_motion_model import AnimateDiffTransformer3D, CrossAttnDownBlockMotion, DownBlockMotion, UpBlockMotion from ..pipelines.pipeline_utils import DiffusionPipeline from ..utils import logging from ..utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) class SplitInferenceModule(nn.Module): def __init__(self, module: nn.Module, split_size: int=1, split_dim: int=0, input_kwargs_to_split: List[str]=['hidden_states']) -> None: super().__init__() self.module = module self.split_size = split_size self.split_dim = split_dim self.input_kwargs_to_split = set(input_kwargs_to_split) def forward(self, *args, **kwargs) -> Union[torch.Tensor, Tuple[torch.Tensor]]: split_inputs = {} for key in list(kwargs.keys()): if key not in self.input_kwargs_to_split or not torch.is_tensor(kwargs[key]): continue split_inputs[key] = torch.split(kwargs[key], self.split_size, self.split_dim) kwargs.pop(key) results = [] for split_input in zip(*split_inputs.values()): inputs = dict(zip(split_inputs.keys(), split_input)) inputs.update(kwargs) intermediate_tensor_or_tensor_tuple = self.module(*args, **inputs) results.append(intermediate_tensor_or_tensor_tuple) if isinstance(results[0], torch.Tensor): return torch.cat(results, dim=self.split_dim) elif isinstance(results[0], tuple): return tuple([torch.cat(x, dim=self.split_dim) for x in zip(*results)]) else: raise ValueError("In order to use the SplitInferenceModule, it is necessary for the underlying `module` to either return a torch.Tensor or a tuple of torch.Tensor's.") class AnimateDiffFreeNoiseMixin: def _enable_free_noise_in_block(self, block: Union[CrossAttnDownBlockMotion, DownBlockMotion, UpBlockMotion]): for motion_module in block.motion_modules: num_transformer_blocks = len(motion_module.transformer_blocks) for i in range(num_transformer_blocks): if isinstance(motion_module.transformer_blocks[i], FreeNoiseTransformerBlock): motion_module.transformer_blocks[i].set_free_noise_properties(self._free_noise_context_length, self._free_noise_context_stride, self._free_noise_weighting_scheme) else: assert isinstance(motion_module.transformer_blocks[i], BasicTransformerBlock) basic_transfomer_block = motion_module.transformer_blocks[i] motion_module.transformer_blocks[i] = FreeNoiseTransformerBlock(dim=basic_transfomer_block.dim, num_attention_heads=basic_transfomer_block.num_attention_heads, attention_head_dim=basic_transfomer_block.attention_head_dim, dropout=basic_transfomer_block.dropout, cross_attention_dim=basic_transfomer_block.cross_attention_dim, activation_fn=basic_transfomer_block.activation_fn, attention_bias=basic_transfomer_block.attention_bias, only_cross_attention=basic_transfomer_block.only_cross_attention, double_self_attention=basic_transfomer_block.double_self_attention, positional_embeddings=basic_transfomer_block.positional_embeddings, num_positional_embeddings=basic_transfomer_block.num_positional_embeddings, context_length=self._free_noise_context_length, context_stride=self._free_noise_context_stride, weighting_scheme=self._free_noise_weighting_scheme).to(device=self.device, dtype=self.dtype) motion_module.transformer_blocks[i].load_state_dict(basic_transfomer_block.state_dict(), strict=True) motion_module.transformer_blocks[i].set_chunk_feed_forward(basic_transfomer_block._chunk_size, basic_transfomer_block._chunk_dim) def _disable_free_noise_in_block(self, block: Union[CrossAttnDownBlockMotion, DownBlockMotion, UpBlockMotion]): for motion_module in block.motion_modules: num_transformer_blocks = len(motion_module.transformer_blocks) for i in range(num_transformer_blocks): if isinstance(motion_module.transformer_blocks[i], FreeNoiseTransformerBlock): free_noise_transfomer_block = motion_module.transformer_blocks[i] motion_module.transformer_blocks[i] = BasicTransformerBlock(dim=free_noise_transfomer_block.dim, num_attention_heads=free_noise_transfomer_block.num_attention_heads, attention_head_dim=free_noise_transfomer_block.attention_head_dim, dropout=free_noise_transfomer_block.dropout, cross_attention_dim=free_noise_transfomer_block.cross_attention_dim, activation_fn=free_noise_transfomer_block.activation_fn, attention_bias=free_noise_transfomer_block.attention_bias, only_cross_attention=free_noise_transfomer_block.only_cross_attention, double_self_attention=free_noise_transfomer_block.double_self_attention, positional_embeddings=free_noise_transfomer_block.positional_embeddings, num_positional_embeddings=free_noise_transfomer_block.num_positional_embeddings).to(device=self.device, dtype=self.dtype) motion_module.transformer_blocks[i].load_state_dict(free_noise_transfomer_block.state_dict(), strict=True) motion_module.transformer_blocks[i].set_chunk_feed_forward(free_noise_transfomer_block._chunk_size, free_noise_transfomer_block._chunk_dim) def _check_inputs_free_noise(self, prompt, negative_prompt, prompt_embeds, negative_prompt_embeds, num_frames) -> None: if not isinstance(prompt, (str, dict)): raise ValueError(f'Expected `prompt` to have type `str` or `dict` but found type(prompt)={type(prompt)!r}') if negative_prompt is not None: if not isinstance(negative_prompt, (str, dict)): raise ValueError(f'Expected `negative_prompt` to have type `str` or `dict` but found type(negative_prompt)={type(negative_prompt)!r}') if prompt_embeds is not None or negative_prompt_embeds is not None: raise ValueError('`prompt_embeds` and `negative_prompt_embeds` is not supported in FreeNoise yet.') frame_indices = [isinstance(x, int) for x in prompt.keys()] frame_prompts = [isinstance(x, str) for x in prompt.values()] min_frame = min(list(prompt.keys())) max_frame = max(list(prompt.keys())) if not all(frame_indices): raise ValueError('Expected integer keys in `prompt` dict for FreeNoise.') if not all(frame_prompts): raise ValueError('Expected str values in `prompt` dict for FreeNoise.') if min_frame != 0: raise ValueError('The minimum frame index in `prompt` dict must be 0 as a starting prompt is necessary.') if max_frame >= num_frames: raise ValueError(f'The maximum frame index in `prompt` dict must be lesser than num_frames={num_frames!r} and follow 0-based indexing.') def _encode_prompt_free_noise(self, prompt: Union[str, Dict[int, str]], num_frames: int, device: torch.device, num_videos_per_prompt: int, do_classifier_free_guidance: bool, negative_prompt: Optional[Union[str, Dict[int, str]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None) -> torch.Tensor: if negative_prompt is None: negative_prompt = '' if isinstance(prompt, str): prompt = {0: prompt} if isinstance(negative_prompt, str): negative_prompt = {0: negative_prompt} self._check_inputs_free_noise(prompt, negative_prompt, prompt_embeds, negative_prompt_embeds, num_frames) prompt = dict(sorted(prompt.items())) negative_prompt = dict(sorted(negative_prompt.items())) prompt[num_frames - 1] = prompt[list(prompt.keys())[-1]] negative_prompt[num_frames - 1] = negative_prompt[list(negative_prompt.keys())[-1]] frame_indices = list(prompt.keys()) frame_prompts = list(prompt.values()) frame_negative_indices = list(negative_prompt.keys()) frame_negative_prompts = list(negative_prompt.values()) (prompt_embeds, _) = self.encode_prompt(prompt=frame_prompts, device=device, num_images_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=False, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, lora_scale=lora_scale, clip_skip=clip_skip) shape = (num_frames, *prompt_embeds.shape[1:]) prompt_interpolation_embeds = prompt_embeds.new_zeros(shape) for i in range(len(frame_indices) - 1): start_frame = frame_indices[i] end_frame = frame_indices[i + 1] start_tensor = prompt_embeds[i].unsqueeze(0) end_tensor = prompt_embeds[i + 1].unsqueeze(0) prompt_interpolation_embeds[start_frame:end_frame + 1] = self._free_noise_prompt_interpolation_callback(start_frame, end_frame, start_tensor, end_tensor) negative_prompt_embeds = None negative_prompt_interpolation_embeds = None if do_classifier_free_guidance: (_, negative_prompt_embeds) = self.encode_prompt(prompt=[''] * len(frame_negative_prompts), device=device, num_images_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=True, negative_prompt=frame_negative_prompts, prompt_embeds=None, negative_prompt_embeds=None, lora_scale=lora_scale, clip_skip=clip_skip) negative_prompt_interpolation_embeds = negative_prompt_embeds.new_zeros(shape) for i in range(len(frame_negative_indices) - 1): start_frame = frame_negative_indices[i] end_frame = frame_negative_indices[i + 1] start_tensor = negative_prompt_embeds[i].unsqueeze(0) end_tensor = negative_prompt_embeds[i + 1].unsqueeze(0) negative_prompt_interpolation_embeds[start_frame:end_frame + 1] = self._free_noise_prompt_interpolation_callback(start_frame, end_frame, start_tensor, end_tensor) prompt_embeds = prompt_interpolation_embeds negative_prompt_embeds = negative_prompt_interpolation_embeds if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return (prompt_embeds, negative_prompt_embeds) def _prepare_latents_free_noise(self, batch_size: int, num_channels_latents: int, num_frames: int, height: int, width: int, dtype: torch.dtype, device: torch.device, generator: Optional[torch.Generator]=None, latents: Optional[torch.Tensor]=None): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') context_num_frames = self._free_noise_context_length if self._free_noise_context_length == 'repeat_context' else num_frames shape = (batch_size, num_channels_latents, context_num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) if self._free_noise_noise_type == 'random': return latents else: if latents.size(2) == num_frames: return latents elif latents.size(2) != self._free_noise_context_length: raise ValueError(f'You have passed `latents` as a parameter to FreeNoise. The expected number of frames is either {num_frames} or {self._free_noise_context_length}, but found {latents.size(2)}') latents = latents.to(device) if self._free_noise_noise_type == 'shuffle_context': for i in range(self._free_noise_context_length, num_frames, self._free_noise_context_stride): window_start = max(0, i - self._free_noise_context_length) window_end = min(num_frames, window_start + self._free_noise_context_stride) window_length = window_end - window_start if window_length == 0: break indices = torch.LongTensor(list(range(window_start, window_end))) shuffled_indices = indices[torch.randperm(window_length, generator=generator)] current_start = i current_end = min(num_frames, current_start + window_length) if current_end == current_start + window_length: latents[:, :, current_start:current_end] = latents[:, :, shuffled_indices] else: prefix_length = current_end - current_start shuffled_indices = shuffled_indices[:prefix_length] latents[:, :, current_start:current_end] = latents[:, :, shuffled_indices] elif self._free_noise_noise_type == 'repeat_context': num_repeats = (num_frames + self._free_noise_context_length - 1) // self._free_noise_context_length latents = torch.cat([latents] * num_repeats, dim=2) latents = latents[:, :, :num_frames] return latents def _lerp(self, start_index: int, end_index: int, start_tensor: torch.Tensor, end_tensor: torch.Tensor) -> torch.Tensor: num_indices = end_index - start_index + 1 interpolated_tensors = [] for i in range(num_indices): alpha = i / (num_indices - 1) interpolated_tensor = (1 - alpha) * start_tensor + alpha * end_tensor interpolated_tensors.append(interpolated_tensor) interpolated_tensors = torch.cat(interpolated_tensors) return interpolated_tensors def enable_free_noise(self, context_length: Optional[int]=16, context_stride: int=4, weighting_scheme: str='pyramid', noise_type: str='shuffle_context', prompt_interpolation_callback: Optional[Callable[[DiffusionPipeline, int, int, torch.Tensor, torch.Tensor], torch.Tensor]]=None) -> None: allowed_weighting_scheme = ['flat', 'pyramid', 'delayed_reverse_sawtooth'] allowed_noise_type = ['shuffle_context', 'repeat_context', 'random'] if context_length > self.motion_adapter.config.motion_max_seq_length: logger.warning(f'You have set context_length={context_length!r} which is greater than self.motion_adapter.config.motion_max_seq_length={self.motion_adapter.config.motion_max_seq_length!r}. This can lead to bad generation results.') if weighting_scheme not in allowed_weighting_scheme: raise ValueError(f'The parameter `weighting_scheme` must be one of {allowed_weighting_scheme}, but got weighting_scheme={weighting_scheme!r}') if noise_type not in allowed_noise_type: raise ValueError(f'The parameter `noise_type` must be one of {allowed_noise_type}, but got noise_type={noise_type!r}') self._free_noise_context_length = context_length or self.motion_adapter.config.motion_max_seq_length self._free_noise_context_stride = context_stride self._free_noise_weighting_scheme = weighting_scheme self._free_noise_noise_type = noise_type self._free_noise_prompt_interpolation_callback = prompt_interpolation_callback or self._lerp if hasattr(self.unet.mid_block, 'motion_modules'): blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] else: blocks = [*self.unet.down_blocks, *self.unet.up_blocks] for block in blocks: self._enable_free_noise_in_block(block) def disable_free_noise(self) -> None: self._free_noise_context_length = None if hasattr(self.unet.mid_block, 'motion_modules'): blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] else: blocks = [*self.unet.down_blocks, *self.unet.up_blocks] blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] for block in blocks: self._disable_free_noise_in_block(block) def _enable_split_inference_motion_modules_(self, motion_modules: List[AnimateDiffTransformer3D], spatial_split_size: int) -> None: for motion_module in motion_modules: motion_module.proj_in = SplitInferenceModule(motion_module.proj_in, spatial_split_size, 0, ['input']) for i in range(len(motion_module.transformer_blocks)): motion_module.transformer_blocks[i] = SplitInferenceModule(motion_module.transformer_blocks[i], spatial_split_size, 0, ['hidden_states', 'encoder_hidden_states']) motion_module.proj_out = SplitInferenceModule(motion_module.proj_out, spatial_split_size, 0, ['input']) def _enable_split_inference_attentions_(self, attentions: List[Transformer2DModel], temporal_split_size: int) -> None: for i in range(len(attentions)): attentions[i] = SplitInferenceModule(attentions[i], temporal_split_size, 0, ['hidden_states', 'encoder_hidden_states']) def _enable_split_inference_resnets_(self, resnets: List[ResnetBlock2D], temporal_split_size: int) -> None: for i in range(len(resnets)): resnets[i] = SplitInferenceModule(resnets[i], temporal_split_size, 0, ['input_tensor', 'temb']) def _enable_split_inference_samplers_(self, samplers: Union[List[Downsample2D], List[Upsample2D]], temporal_split_size: int) -> None: for i in range(len(samplers)): samplers[i] = SplitInferenceModule(samplers[i], temporal_split_size, 0, ['hidden_states']) def enable_free_noise_split_inference(self, spatial_split_size: int=256, temporal_split_size: int=16) -> None: blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] for block in blocks: if getattr(block, 'motion_modules', None) is not None: self._enable_split_inference_motion_modules_(block.motion_modules, spatial_split_size) if getattr(block, 'attentions', None) is not None: self._enable_split_inference_attentions_(block.attentions, temporal_split_size) if getattr(block, 'resnets', None) is not None: self._enable_split_inference_resnets_(block.resnets, temporal_split_size) if getattr(block, 'downsamplers', None) is not None: self._enable_split_inference_samplers_(block.downsamplers, temporal_split_size) if getattr(block, 'upsamplers', None) is not None: self._enable_split_inference_samplers_(block.upsamplers, temporal_split_size) @property def free_noise_enabled(self): return hasattr(self, '_free_noise_context_length') and self._free_noise_context_length is not None # File: diffusers-main/src/diffusers/pipelines/hunyuandit/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_hunyuandit'] = ['HunyuanDiTPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_hunyuandit import HunyuanDiTPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py import inspect from typing import Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from transformers import BertModel, BertTokenizer, CLIPImageProcessor, MT5Tokenizer, T5EncoderModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, HunyuanDiT2DModel from ...models.embeddings import get_2d_rotary_pos_embed from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...schedulers import DDPMScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import HunyuanDiTPipeline\n\n >>> pipe = HunyuanDiTPipeline.from_pretrained(\n ... "Tencent-Hunyuan/HunyuanDiT-Diffusers", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> # You may also use English prompt as HunyuanDiT supports both English and Chinese\n >>> # prompt = "An astronaut riding a horse"\n >>> prompt = "一个宇航员在骑马"\n >>> image = pipe(prompt).images[0]\n ```\n' STANDARD_RATIO = np.array([1.0, 4.0 / 3.0, 3.0 / 4.0, 16.0 / 9.0, 9.0 / 16.0]) STANDARD_SHAPE = [[(1024, 1024), (1280, 1280)], [(1024, 768), (1152, 864), (1280, 960)], [(768, 1024), (864, 1152), (960, 1280)], [(1280, 768)], [(768, 1280)]] STANDARD_AREA = [np.array([w * h for (w, h) in shapes]) for shapes in STANDARD_SHAPE] SUPPORTED_SHAPE = [(1024, 1024), (1280, 1280), (1024, 768), (1152, 864), (1280, 960), (768, 1024), (864, 1152), (960, 1280), (1280, 768), (768, 1280)] def map_to_standard_shapes(target_width, target_height): target_ratio = target_width / target_height closest_ratio_idx = np.argmin(np.abs(STANDARD_RATIO - target_ratio)) closest_area_idx = np.argmin(np.abs(STANDARD_AREA[closest_ratio_idx] - target_width * target_height)) (width, height) = STANDARD_SHAPE[closest_ratio_idx][closest_area_idx] return (width, height) def get_resize_crop_region_for_grid(src, tgt_size): th = tw = tgt_size (h, w) = src r = h / w if r > 1: resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return ((crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)) def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class HunyuanDiTPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->text_encoder_2->transformer->vae' _optional_components = ['safety_checker', 'feature_extractor', 'text_encoder_2', 'tokenizer_2', 'text_encoder', 'tokenizer'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'prompt_embeds_2', 'negative_prompt_embeds_2'] def __init__(self, vae: AutoencoderKL, text_encoder: BertModel, tokenizer: BertTokenizer, transformer: HunyuanDiT2DModel, scheduler: DDPMScheduler, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True, text_encoder_2=T5EncoderModel, tokenizer_2=MT5Tokenizer): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, tokenizer_2=tokenizer_2, transformer=transformer, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, text_encoder_2=text_encoder_2) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, 'vae') and self.vae is not None else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) self.default_sample_size = self.transformer.config.sample_size if hasattr(self, 'transformer') and self.transformer is not None else 128 def encode_prompt(self, prompt: str, device: torch.device=None, dtype: torch.dtype=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, max_sequence_length: Optional[int]=None, text_encoder_index: int=0): if dtype is None: if self.text_encoder_2 is not None: dtype = self.text_encoder_2.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None if device is None: device = self._execution_device tokenizers = [self.tokenizer, self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] tokenizer = tokenizers[text_encoder_index] text_encoder = text_encoders[text_encoder_index] if max_sequence_length is None: if text_encoder_index == 0: max_length = 77 if text_encoder_index == 1: max_length = 256 else: max_length = max_sequence_length if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) prompt_embeds = prompt_embeds[0] prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return (prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, prompt_embeds_2=None, negative_prompt_embeds_2=None, prompt_attention_mask_2=None, negative_prompt_attention_mask_2=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is None and prompt_embeds_2 is None: raise ValueError('Provide either `prompt` or `prompt_embeds_2`. Cannot leave both `prompt` and `prompt_embeds_2` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError('Must provide `prompt_attention_mask` when specifying `prompt_embeds`.') if prompt_embeds_2 is not None and prompt_attention_mask_2 is None: raise ValueError('Must provide `prompt_attention_mask_2` when specifying `prompt_embeds_2`.') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError('Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.') if negative_prompt_embeds_2 is not None and negative_prompt_attention_mask_2 is None: raise ValueError('Must provide `negative_prompt_attention_mask_2` when specifying `negative_prompt_embeds_2`.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds_2 is not None and negative_prompt_embeds_2 is not None: if prompt_embeds_2.shape != negative_prompt_embeds_2.shape: raise ValueError(f'`prompt_embeds_2` and `negative_prompt_embeds_2` must have the same shape when passed directly, but got: `prompt_embeds_2` {prompt_embeds_2.shape} != `negative_prompt_embeds_2` {negative_prompt_embeds_2.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: Optional[int]=50, guidance_scale: Optional[float]=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: Optional[float]=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, prompt_embeds_2: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds_2: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, prompt_attention_mask_2: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_attention_mask_2: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], guidance_rescale: float=0.0, original_size: Optional[Tuple[int, int]]=(1024, 1024), target_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), use_resolution_binning: bool=True): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor height = int(height // 16 * 16) width = int(width // 16 * 16) if use_resolution_binning and (height, width) not in SUPPORTED_SHAPE: (width, height) = map_to_standard_shapes(width, height) height = int(height) width = int(width) logger.warning(f'Reshaped to (height, width)=({height}, {width}), Supported shapes are {SUPPORTED_SHAPE}') self.check_inputs(prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, prompt_embeds_2, negative_prompt_embeds_2, prompt_attention_mask_2, negative_prompt_attention_mask_2, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device (prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask) = self.encode_prompt(prompt=prompt, device=device, dtype=self.transformer.dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=77, text_encoder_index=0) (prompt_embeds_2, negative_prompt_embeds_2, prompt_attention_mask_2, negative_prompt_attention_mask_2) = self.encode_prompt(prompt=prompt, device=device, dtype=self.transformer.dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds_2, negative_prompt_embeds=negative_prompt_embeds_2, prompt_attention_mask=prompt_attention_mask_2, negative_prompt_attention_mask=negative_prompt_attention_mask_2, max_sequence_length=256, text_encoder_index=1) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) grid_height = height // 8 // self.transformer.config.patch_size grid_width = width // 8 // self.transformer.config.patch_size base_size = 512 // 8 // self.transformer.config.patch_size grid_crops_coords = get_resize_crop_region_for_grid((grid_height, grid_width), base_size) image_rotary_emb = get_2d_rotary_pos_embed(self.transformer.inner_dim // self.transformer.num_heads, grid_crops_coords, (grid_height, grid_width)) style = torch.tensor([0], device=device) target_size = target_size or (height, width) add_time_ids = list(original_size + target_size + crops_coords_top_left) add_time_ids = torch.tensor([add_time_ids], dtype=prompt_embeds.dtype) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask]) prompt_embeds_2 = torch.cat([negative_prompt_embeds_2, prompt_embeds_2]) prompt_attention_mask_2 = torch.cat([negative_prompt_attention_mask_2, prompt_attention_mask_2]) add_time_ids = torch.cat([add_time_ids] * 2, dim=0) style = torch.cat([style] * 2, dim=0) prompt_embeds = prompt_embeds.to(device=device) prompt_attention_mask = prompt_attention_mask.to(device=device) prompt_embeds_2 = prompt_embeds_2.to(device=device) prompt_attention_mask_2 = prompt_attention_mask_2.to(device=device) add_time_ids = add_time_ids.to(dtype=prompt_embeds.dtype, device=device).repeat(batch_size * num_images_per_prompt, 1) style = style.to(device=device).repeat(batch_size * num_images_per_prompt) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) t_expand = torch.tensor([t] * latent_model_input.shape[0], device=device).to(dtype=latent_model_input.dtype) noise_pred = self.transformer(latent_model_input, t_expand, encoder_hidden_states=prompt_embeds, text_embedding_mask=prompt_attention_mask, encoder_hidden_states_t5=prompt_embeds_2, text_embedding_mask_t5=prompt_attention_mask_2, image_meta_size=add_time_ids, style=style, image_rotary_emb=image_rotary_emb, return_dict=False)[0] (noise_pred, _) = noise_pred.chunk(2, dim=1) if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) prompt_embeds_2 = callback_outputs.pop('prompt_embeds_2', prompt_embeds_2) negative_prompt_embeds_2 = callback_outputs.pop('negative_prompt_embeds_2', negative_prompt_embeds_2) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/i2vgen_xl/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_i2vgen_xl'] = ['I2VGenXLPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_i2vgen_xl import I2VGenXLPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py import inspect from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessor from ...models import AutoencoderKL from ...models.unets.unet_i2vgen_xl import I2VGenXLUNet from ...schedulers import DDIMScheduler from ...utils import BaseOutput, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import I2VGenXLPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> pipeline = I2VGenXLPipeline.from_pretrained(\n ... "ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16"\n ... )\n >>> pipeline.enable_model_cpu_offload()\n\n >>> image_url = (\n ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/i2vgen_xl_images/img_0009.png"\n ... )\n >>> image = load_image(image_url).convert("RGB")\n\n >>> prompt = "Papers were floating in the air on a table in the library"\n >>> negative_prompt = "Distorted, discontinuous, Ugly, blurry, low resolution, motionless, static, disfigured, disconnected limbs, Ugly faces, incomplete arms"\n >>> generator = torch.manual_seed(8888)\n\n >>> frames = pipeline(\n ... prompt=prompt,\n ... image=image,\n ... num_inference_steps=50,\n ... negative_prompt=negative_prompt,\n ... guidance_scale=9.0,\n ... generator=generator,\n ... ).frames[0]\n >>> video_path = export_to_gif(frames, "i2v.gif")\n ```\n' @dataclass class I2VGenXLPipelineOutput(BaseOutput): frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]] class I2VGenXLPipeline(DiffusionPipeline, StableDiffusionMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, image_encoder: CLIPVisionModelWithProjection, feature_extractor: CLIPImageProcessor, unet: I2VGenXLUNet, scheduler: DDIMScheduler): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, image_encoder=image_encoder, feature_extractor=feature_extractor, unet=unet, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor, do_resize=False) @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 def encode_prompt(self, prompt, device, num_videos_per_prompt, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, clip_skip: Optional[int]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, seq_len, -1) if self.do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None if clip_skip is None: negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] else: negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) negative_prompt_embeds = negative_prompt_embeds[-1][-(clip_skip + 1)] negative_prompt_embeds = self.text_encoder.text_model.final_layer_norm(negative_prompt_embeds) if self.do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_videos_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return (prompt_embeds, negative_prompt_embeds) def _encode_image(self, image, device, num_videos_per_prompt): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.video_processor.pil_to_numpy(image) image = self.video_processor.numpy_to_pt(image) image = self.feature_extractor(images=image, do_normalize=True, do_center_crop=False, do_resize=False, do_rescale=False, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) image_embeddings = self.image_encoder(image).image_embeds image_embeddings = image_embeddings.unsqueeze(1) (bs_embed, seq_len, _) = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_videos_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) if self.do_classifier_free_guidance: negative_image_embeddings = torch.zeros_like(image_embeddings) image_embeddings = torch.cat([negative_image_embeddings, image_embeddings]) return image_embeddings def decode_latents(self, latents, decode_chunk_size=None): latents = 1 / self.vae.config.scaling_factor * latents (batch_size, channels, num_frames, height, width) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) if decode_chunk_size is not None: frames = [] for i in range(0, latents.shape[0], decode_chunk_size): frame = self.vae.decode(latents[i:i + decode_chunk_size]).sample frames.append(frame) image = torch.cat(frames, dim=0) else: image = self.vae.decode(latents).sample decode_shape = (batch_size, num_frames, -1) + image.shape[2:] video = image[None, :].reshape(decode_shape).permute(0, 2, 1, 3, 4) video = video.float() return video def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, image, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if not isinstance(image, torch.Tensor) and (not isinstance(image, PIL.Image.Image)) and (not isinstance(image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is {type(image)}') def prepare_image_latents(self, image, device, num_frames, num_videos_per_prompt): image = image.to(device=device) image_latents = self.vae.encode(image).latent_dist.sample() image_latents = image_latents * self.vae.config.scaling_factor image_latents = image_latents.unsqueeze(2) frame_position_mask = [] for frame_idx in range(num_frames - 1): scale = (frame_idx + 1) / (num_frames - 1) frame_position_mask.append(torch.ones_like(image_latents[:, :, :1]) * scale) if frame_position_mask: frame_position_mask = torch.cat(frame_position_mask, dim=2) image_latents = torch.cat([image_latents, frame_position_mask], dim=2) image_latents = image_latents.repeat(num_videos_per_prompt, 1, 1, 1, 1) if self.do_classifier_free_guidance: image_latents = torch.cat([image_latents] * 2) return image_latents def prepare_latents(self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, height: Optional[int]=704, width: Optional[int]=1280, target_fps: Optional[int]=16, num_frames: int=16, num_inference_steps: int=50, guidance_scale: float=9.0, negative_prompt: Optional[Union[str, List[str]]]=None, eta: float=0.0, num_videos_per_prompt: Optional[int]=1, decode_chunk_size: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=1): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, image, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device self._guidance_scale = guidance_scale (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_videos_per_prompt, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) cropped_image = _center_crop_wide(image, (width, width)) cropped_image = _resize_bilinear(cropped_image, (self.feature_extractor.crop_size['width'], self.feature_extractor.crop_size['height'])) image_embeddings = self._encode_image(cropped_image, device, num_videos_per_prompt) resized_image = _center_crop_wide(image, (width, height)) image = self.video_processor.preprocess(resized_image).to(device=device, dtype=image_embeddings.dtype) image_latents = self.prepare_image_latents(image, device=device, num_frames=num_frames, num_videos_per_prompt=num_videos_per_prompt) if self.do_classifier_free_guidance: fps_tensor = torch.tensor([target_fps, target_fps]).to(device) else: fps_tensor = torch.tensor([target_fps]).to(device) fps_tensor = fps_tensor.repeat(batch_size * num_videos_per_prompt, 1).ravel() self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_videos_per_prompt, num_channels_latents, num_frames, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, fps=fps_tensor, image_latents=image_latents, image_embeddings=image_embeddings, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) (batch_size, channel, frames, width, height) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * frames, channel, width, height) noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(batch_size * frames, channel, width, height) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample latents = latents[None, :].reshape(batch_size, frames, channel, width, height).permute(0, 2, 1, 3, 4) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == 'latent': video = latents else: video_tensor = self.decode_latents(latents, decode_chunk_size=decode_chunk_size) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (video,) return I2VGenXLPipelineOutput(frames=video) def _convert_pt_to_pil(image: Union[torch.Tensor, List[torch.Tensor]]): if isinstance(image, list) and isinstance(image[0], torch.Tensor): image = torch.cat(image, 0) if isinstance(image, torch.Tensor): if image.ndim == 3: image = image.unsqueeze(0) image_numpy = VaeImageProcessor.pt_to_numpy(image) image_pil = VaeImageProcessor.numpy_to_pil(image_numpy) image = image_pil return image def _resize_bilinear(image: Union[torch.Tensor, List[torch.Tensor], PIL.Image.Image, List[PIL.Image.Image]], resolution: Tuple[int, int]): image = _convert_pt_to_pil(image) if isinstance(image, list): image = [u.resize(resolution, PIL.Image.BILINEAR) for u in image] else: image = image.resize(resolution, PIL.Image.BILINEAR) return image def _center_crop_wide(image: Union[torch.Tensor, List[torch.Tensor], PIL.Image.Image, List[PIL.Image.Image]], resolution: Tuple[int, int]): image = _convert_pt_to_pil(image) if isinstance(image, list): scale = min(image[0].size[0] / resolution[0], image[0].size[1] / resolution[1]) image = [u.resize((round(u.width // scale), round(u.height // scale)), resample=PIL.Image.BOX) for u in image] x1 = (image[0].width - resolution[0]) // 2 y1 = (image[0].height - resolution[1]) // 2 image = [u.crop((x1, y1, x1 + resolution[0], y1 + resolution[1])) for u in image] return image else: scale = min(image.size[0] / resolution[0], image.size[1] / resolution[1]) image = image.resize((round(image.width // scale), round(image.height // scale)), resample=PIL.Image.BOX) x1 = (image.width - resolution[0]) // 2 y1 = (image.height - resolution[1]) // 2 image = image.crop((x1, y1, x1 + resolution[0], y1 + resolution[1])) return image # File: diffusers-main/src/diffusers/pipelines/kandinsky/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_kandinsky'] = ['KandinskyPipeline'] _import_structure['pipeline_kandinsky_combined'] = ['KandinskyCombinedPipeline', 'KandinskyImg2ImgCombinedPipeline', 'KandinskyInpaintCombinedPipeline'] _import_structure['pipeline_kandinsky_img2img'] = ['KandinskyImg2ImgPipeline'] _import_structure['pipeline_kandinsky_inpaint'] = ['KandinskyInpaintPipeline'] _import_structure['pipeline_kandinsky_prior'] = ['KandinskyPriorPipeline', 'KandinskyPriorPipelineOutput'] _import_structure['text_encoder'] = ['MultilingualCLIP'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_combined import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py from typing import Callable, List, Optional, Union import torch from transformers import XLMRobertaTokenizer from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_encoder import MultilingualCLIP logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n' def get_new_h_w(h, w, scale_factor=8): new_h = h // scale_factor ** 2 if h % scale_factor ** 2 != 0: new_h += 1 new_w = w // scale_factor ** 2 if w % scale_factor ** 2 != 0: new_w += 1 return (new_h * scale_factor, new_w * scale_factor) class KandinskyPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->unet->movq' def __init__(self, text_encoder: MultilingualCLIP, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DDPMScheduler], movq: VQModel): super().__init__() self.register_modules(text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer(prompt, padding='max_length', truncation=True, max_length=77, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids.to(device) text_mask = text_inputs.attention_mask.to(device) (prompt_embeds, text_encoder_hidden_states) = self.text_encoder(input_ids=text_input_ids, attention_mask=text_mask) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=77, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') uncond_text_input_ids = uncond_input.input_ids.to(device) uncond_text_mask = uncond_input.attention_mask.to(device) (negative_prompt_embeds, uncond_text_encoder_hidden_states) = self.text_encoder(input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask) seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(batch_size * num_images_per_prompt, seq_len, -1) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return (prompt_embeds, text_encoder_hidden_states, text_mask) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]], image_embeds: Union[torch.Tensor, List[torch.Tensor]], negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], negative_prompt: Optional[Union[str, List[str]]]=None, height: int=512, width: int=512, num_inference_steps: int=100, guidance_scale: float=4.0, num_images_per_prompt: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, return_dict: bool=True): if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, text_encoder_hidden_states, _) = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt) if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(dtype=prompt_embeds.dtype, device=device) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps_tensor = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels (height, width) = get_new_h_w(height, width, self.movq_scale_factor) latents = self.prepare_latents((batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, device, generator, latents, self.scheduler) for (i, t) in enumerate(self.progress_bar(timesteps_tensor)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents added_cond_kwargs = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds} noise_pred = self.unet(sample=latent_model_input, timestep=t, encoder_hidden_states=text_encoder_hidden_states, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred, variance_pred) = noise_pred.split(latents.shape[1], dim=1) (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (_, variance_pred_text) = variance_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not (hasattr(self.scheduler.config, 'variance_type') and self.scheduler.config.variance_type in ['learned', 'learned_range']): (noise_pred, _) = noise_pred.split(latents.shape[1], dim=1) latents = self.scheduler.step(noise_pred, t, latents, generator=generator).prev_sample if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) image = self.movq.decode(latents, force_not_quantize=True)['sample'] self.maybe_free_model_hooks() if output_type not in ['pt', 'np', 'pil']: raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}') if output_type in ['np', 'pil']: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_combined.py from typing import Callable, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, XLMRobertaTokenizer from ...models import PriorTransformer, UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler, DDPMScheduler, UnCLIPScheduler from ...utils import replace_example_docstring from ..pipeline_utils import DiffusionPipeline from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline from .text_encoder import MultilingualCLIP TEXT2IMAGE_EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n from diffusers import AutoPipelineForText2Image\n import torch\n\n pipe = AutoPipelineForText2Image.from_pretrained(\n "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16\n )\n pipe.enable_model_cpu_offload()\n\n prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"\n\n image = pipe(prompt=prompt, num_inference_steps=25).images[0]\n ```\n' IMAGE2IMAGE_EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n from diffusers import AutoPipelineForImage2Image\n import torch\n import requests\n from io import BytesIO\n from PIL import Image\n import os\n\n pipe = AutoPipelineForImage2Image.from_pretrained(\n "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16\n )\n pipe.enable_model_cpu_offload()\n\n prompt = "A fantasy landscape, Cinematic lighting"\n negative_prompt = "low quality, bad quality"\n\n url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"\n\n response = requests.get(url)\n image = Image.open(BytesIO(response.content)).convert("RGB")\n image.thumbnail((768, 768))\n\n image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0]\n ```\n' INPAINT_EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n from diffusers import AutoPipelineForInpainting\n from diffusers.utils import load_image\n import torch\n import numpy as np\n\n pipe = AutoPipelineForInpainting.from_pretrained(\n "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16\n )\n pipe.enable_model_cpu_offload()\n\n prompt = "A fantasy landscape, Cinematic lighting"\n negative_prompt = "low quality, bad quality"\n\n original_image = load_image(\n "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"\n )\n\n mask = np.zeros((768, 768), dtype=np.float32)\n # Let\'s mask out an area above the cat\'s head\n mask[:250, 250:-250] = 1\n\n image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0]\n ```\n' class KandinskyCombinedPipeline(DiffusionPipeline): _load_connected_pipes = True model_cpu_offload_seq = 'text_encoder->unet->movq->prior_prior->prior_image_encoder->prior_text_encoder' _exclude_from_cpu_offload = ['prior_prior'] def __init__(self, text_encoder: MultilingualCLIP, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DDPMScheduler], movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor): super().__init__() self.register_modules(text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor) self.prior_pipe = KandinskyPriorPipeline(prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor) self.decoder_pipe = KandinskyPipeline(text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id=0): self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]]=None, num_inference_steps: int=100, guidance_scale: float=4.0, num_images_per_prompt: int=1, height: int=512, width: int=512, prior_guidance_scale: float=4.0, prior_num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, return_dict: bool=True): prior_outputs = self.prior_pipe(prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type='pt', return_dict=False) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = image_embeds.shape[0] // len(prompt) * prompt outputs = self.decoder_pipe(prompt=prompt, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict) self.maybe_free_model_hooks() return outputs class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline): _load_connected_pipes = True model_cpu_offload_seq = 'prior_text_encoder->prior_image_encoder->prior_prior->text_encoder->unet->movq' _exclude_from_cpu_offload = ['prior_prior'] def __init__(self, text_encoder: MultilingualCLIP, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DDPMScheduler], movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor): super().__init__() self.register_modules(text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor) self.prior_pipe = KandinskyPriorPipeline(prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor) self.decoder_pipe = KandinskyImg2ImgPipeline(text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id=0): self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]]=None, num_inference_steps: int=100, guidance_scale: float=4.0, num_images_per_prompt: int=1, strength: float=0.3, height: int=512, width: int=512, prior_guidance_scale: float=4.0, prior_num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, return_dict: bool=True): prior_outputs = self.prior_pipe(prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type='pt', return_dict=False) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt image = [image] if isinstance(prompt, PIL.Image.Image) else image if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = image_embeds.shape[0] // len(prompt) * prompt if isinstance(image, (list, tuple)) and len(image) < image_embeds.shape[0] and (image_embeds.shape[0] % len(image) == 0): image = image_embeds.shape[0] // len(image) * image outputs = self.decoder_pipe(prompt=prompt, image=image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, strength=strength, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict) self.maybe_free_model_hooks() return outputs class KandinskyInpaintCombinedPipeline(DiffusionPipeline): _load_connected_pipes = True model_cpu_offload_seq = 'prior_text_encoder->prior_image_encoder->prior_prior->text_encoder->unet->movq' _exclude_from_cpu_offload = ['prior_prior'] def __init__(self, text_encoder: MultilingualCLIP, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DDPMScheduler], movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor): super().__init__() self.register_modules(text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor) self.prior_pipe = KandinskyPriorPipeline(prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor) self.decoder_pipe = KandinskyInpaintPipeline(text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id=0): self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], mask_image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]]=None, num_inference_steps: int=100, guidance_scale: float=4.0, num_images_per_prompt: int=1, height: int=512, width: int=512, prior_guidance_scale: float=4.0, prior_num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, return_dict: bool=True): prior_outputs = self.prior_pipe(prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type='pt', return_dict=False) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt image = [image] if isinstance(prompt, PIL.Image.Image) else image mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = image_embeds.shape[0] // len(prompt) * prompt if isinstance(image, (list, tuple)) and len(image) < image_embeds.shape[0] and (image_embeds.shape[0] % len(image) == 0): image = image_embeds.shape[0] // len(image) * image if isinstance(mask_image, (list, tuple)) and len(mask_image) < image_embeds.shape[0] and (image_embeds.shape[0] % len(mask_image) == 0): mask_image = image_embeds.shape[0] // len(mask_image) * mask_image outputs = self.decoder_pipe(prompt=prompt, image=image, mask_image=mask_image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict) self.maybe_free_model_hooks() return outputs # File: diffusers-main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_img2img.py from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from PIL import Image from transformers import XLMRobertaTokenizer from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_encoder import MultilingualCLIP logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import KandinskyImg2ImgPipeline, KandinskyPriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyImg2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... prompt,\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n' def get_new_h_w(h, w, scale_factor=8): new_h = h // scale_factor ** 2 if h % scale_factor ** 2 != 0: new_h += 1 new_w = w // scale_factor ** 2 if w % scale_factor ** 2 != 0: new_w += 1 return (new_h * scale_factor, new_w * scale_factor) def prepare_image(pil_image, w=512, h=512): pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) arr = np.array(pil_image.convert('RGB')) arr = arr.astype(np.float32) / 127.5 - 1 arr = np.transpose(arr, [2, 0, 1]) image = torch.from_numpy(arr).unsqueeze(0) return image class KandinskyImg2ImgPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->unet->movq' def __init__(self, text_encoder: MultilingualCLIP, movq: VQModel, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: DDIMScheduler): super().__init__() self.register_modules(text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return (timesteps, num_inference_steps - t_start) def prepare_latents(self, latents, latent_timestep, shape, dtype, device, generator, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma shape = latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self.add_noise(latents, noise, latent_timestep) return latents def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer(prompt, padding='max_length', max_length=77, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids.to(device) text_mask = text_inputs.attention_mask.to(device) (prompt_embeds, text_encoder_hidden_states) = self.text_encoder(input_ids=text_input_ids, attention_mask=text_mask) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=77, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') uncond_text_input_ids = uncond_input.input_ids.to(device) uncond_text_mask = uncond_input.attention_mask.to(device) (negative_prompt_embeds, uncond_text_encoder_hidden_states) = self.text_encoder(input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask) seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(batch_size * num_images_per_prompt, seq_len, -1) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return (prompt_embeds, text_encoder_hidden_states, text_mask) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: betas = torch.linspace(0.0001, 0.02, 1000, dtype=torch.float32) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_cumprod = alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], image_embeds: torch.Tensor, negative_image_embeds: torch.Tensor, negative_prompt: Optional[Union[str, List[str]]]=None, height: int=512, width: int=512, num_inference_steps: int=100, strength: float=0.3, guidance_scale: float=7.0, num_images_per_prompt: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, output_type: Optional[str]='pil', callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, return_dict: bool=True): if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, text_encoder_hidden_states, _) = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt) if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(dtype=prompt_embeds.dtype, device=device) if not isinstance(image, list): image = [image] if not all((isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image)): raise ValueError(f'Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor') image = torch.cat([prepare_image(i, width, height) for i in image], dim=0) image = image.to(dtype=prompt_embeds.dtype, device=device) latents = self.movq.encode(image)['latents'] latents = latents.repeat_interleave(num_images_per_prompt, dim=0) self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps_tensor, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = int(self.scheduler.config.num_train_timesteps * strength) - 2 latent_timestep = torch.tensor([latent_timestep] * batch_size, dtype=timesteps_tensor.dtype, device=device) num_channels_latents = self.unet.config.in_channels (height, width) = get_new_h_w(height, width, self.movq_scale_factor) latents = self.prepare_latents(latents, latent_timestep, (batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, device, generator, self.scheduler) for (i, t) in enumerate(self.progress_bar(timesteps_tensor)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents added_cond_kwargs = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds} noise_pred = self.unet(sample=latent_model_input, timestep=t, encoder_hidden_states=text_encoder_hidden_states, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred, variance_pred) = noise_pred.split(latents.shape[1], dim=1) (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (_, variance_pred_text) = variance_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not (hasattr(self.scheduler.config, 'variance_type') and self.scheduler.config.variance_type in ['learned', 'learned_range']): (noise_pred, _) = noise_pred.split(latents.shape[1], dim=1) latents = self.scheduler.step(noise_pred, t, latents, generator=generator).prev_sample if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) image = self.movq.decode(latents, force_not_quantize=True)['sample'] self.maybe_free_model_hooks() if output_type not in ['pt', 'np', 'pil']: raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}') if output_type in ['np', 'pil']: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py from copy import deepcopy from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from packaging import version from PIL import Image from transformers import XLMRobertaTokenizer from ... import __version__ from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_encoder import MultilingualCLIP logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import KandinskyInpaintPipeline, KandinskyPriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n >>> import numpy as np\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "a hat"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyInpaintPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... )\n\n >>> mask = np.zeros((768, 768), dtype=np.float32)\n >>> mask[:250, 250:-250] = 1\n\n >>> out = pipe(\n ... prompt,\n ... image=init_image,\n ... mask_image=mask,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... )\n\n >>> image = out.images[0]\n >>> image.save("cat_with_hat.png")\n ```\n' def get_new_h_w(h, w, scale_factor=8): new_h = h // scale_factor ** 2 if h % scale_factor ** 2 != 0: new_h += 1 new_w = w // scale_factor ** 2 if w % scale_factor ** 2 != 0: new_w += 1 return (new_h * scale_factor, new_w * scale_factor) def prepare_mask(masks): prepared_masks = [] for mask in masks: old_mask = deepcopy(mask) for i in range(mask.shape[1]): for j in range(mask.shape[2]): if old_mask[0][i][j] == 1: continue if i != 0: mask[:, i - 1, j] = 0 if j != 0: mask[:, i, j - 1] = 0 if i != 0 and j != 0: mask[:, i - 1, j - 1] = 0 if i != mask.shape[1] - 1: mask[:, i + 1, j] = 0 if j != mask.shape[2] - 1: mask[:, i, j + 1] = 0 if i != mask.shape[1] - 1 and j != mask.shape[2] - 1: mask[:, i + 1, j + 1] = 0 prepared_masks.append(mask) return torch.stack(prepared_masks, dim=0) def prepare_mask_and_masked_image(image, mask, height, width): if image is None: raise ValueError('`image` input cannot be undefined.') if mask is None: raise ValueError('`mask_image` input cannot be undefined.') if isinstance(image, torch.Tensor): if not isinstance(mask, torch.Tensor): raise TypeError(f'`image` is a torch.Tensor but `mask` (type: {type(mask)} is not') if image.ndim == 3: assert image.shape[0] == 3, 'Image outside a batch should be of shape (3, H, W)' image = image.unsqueeze(0) if mask.ndim == 2: mask = mask.unsqueeze(0).unsqueeze(0) if mask.ndim == 3: if mask.shape[0] == 1: mask = mask.unsqueeze(0) else: mask = mask.unsqueeze(1) assert image.ndim == 4 and mask.ndim == 4, 'Image and Mask must have 4 dimensions' assert image.shape[-2:] == mask.shape[-2:], 'Image and Mask must have the same spatial dimensions' assert image.shape[0] == mask.shape[0], 'Image and Mask must have the same batch size' if image.min() < -1 or image.max() > 1: raise ValueError('Image should be in [-1, 1] range') if mask.min() < 0 or mask.max() > 1: raise ValueError('Mask should be in [0, 1] range') mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 image = image.to(dtype=torch.float32) elif isinstance(mask, torch.Tensor): raise TypeError(f'`mask` is a torch.Tensor but `image` (type: {type(image)} is not') else: if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [i.resize((width, height), resample=Image.BICUBIC, reducing_gap=1) for i in image] image = [np.array(i.convert('RGB'))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 if isinstance(mask, (PIL.Image.Image, np.ndarray)): mask = [mask] if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] mask = np.concatenate([np.array(m.convert('L'))[None, None, :] for m in mask], axis=0) mask = mask.astype(np.float32) / 255.0 elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): mask = np.concatenate([m[None, None, :] for m in mask], axis=0) mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) mask = 1 - mask return (mask, image) class KandinskyInpaintPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->unet->movq' def __init__(self, text_encoder: MultilingualCLIP, movq: VQModel, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: DDIMScheduler): super().__init__() self.register_modules(text_encoder=text_encoder, movq=movq, tokenizer=tokenizer, unet=unet, scheduler=scheduler) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) self._warn_has_been_called = False def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer(prompt, padding='max_length', max_length=77, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids.to(device) text_mask = text_inputs.attention_mask.to(device) (prompt_embeds, text_encoder_hidden_states) = self.text_encoder(input_ids=text_input_ids, attention_mask=text_mask) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=77, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') uncond_text_input_ids = uncond_input.input_ids.to(device) uncond_text_mask = uncond_input.attention_mask.to(device) (negative_prompt_embeds, uncond_text_encoder_hidden_states) = self.text_encoder(input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask) seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(batch_size * num_images_per_prompt, seq_len, -1) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return (prompt_embeds, text_encoder_hidden_states, text_mask) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image], mask_image: Union[torch.Tensor, PIL.Image.Image, np.ndarray], image_embeds: torch.Tensor, negative_image_embeds: torch.Tensor, negative_prompt: Optional[Union[str, List[str]]]=None, height: int=512, width: int=512, num_inference_steps: int=100, guidance_scale: float=4.0, num_images_per_prompt: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, return_dict: bool=True): if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse('0.23.0.dev0'): logger.warning("Please note that the expected format of `mask_image` has recently been changed. Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. This way, Kandinsky's masking behavior is aligned with Stable Diffusion. THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. This warning will be surpressed after the first inference call and will be removed in diffusers>0.23.0") self._warn_has_been_called = True if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, text_encoder_hidden_states, _) = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt) if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(dtype=prompt_embeds.dtype, device=device) (mask_image, image) = prepare_mask_and_masked_image(image, mask_image, height, width) image = image.to(dtype=prompt_embeds.dtype, device=device) image = self.movq.encode(image)['latents'] mask_image = mask_image.to(dtype=prompt_embeds.dtype, device=device) image_shape = tuple(image.shape[-2:]) mask_image = F.interpolate(mask_image, image_shape, mode='nearest') mask_image = prepare_mask(mask_image) masked_image = image * mask_image mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) masked_image = masked_image.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: mask_image = mask_image.repeat(2, 1, 1, 1) masked_image = masked_image.repeat(2, 1, 1, 1) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps_tensor = self.scheduler.timesteps num_channels_latents = self.movq.config.latent_channels (sample_height, sample_width) = get_new_h_w(height, width, self.movq_scale_factor) latents = self.prepare_latents((batch_size, num_channels_latents, sample_height, sample_width), text_encoder_hidden_states.dtype, device, generator, latents, self.scheduler) num_channels_mask = mask_image.shape[1] num_channels_masked_image = masked_image.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image} = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of `pipeline.unet` or your `mask_image` or `image` input.') for (i, t) in enumerate(self.progress_bar(timesteps_tensor)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = torch.cat([latent_model_input, masked_image, mask_image], dim=1) added_cond_kwargs = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds} noise_pred = self.unet(sample=latent_model_input, timestep=t, encoder_hidden_states=text_encoder_hidden_states, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred, variance_pred) = noise_pred.split(latents.shape[1], dim=1) (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (_, variance_pred_text) = variance_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not (hasattr(self.scheduler.config, 'variance_type') and self.scheduler.config.variance_type in ['learned', 'learned_range']): (noise_pred, _) = noise_pred.split(latents.shape[1], dim=1) latents = self.scheduler.step(noise_pred, t, latents, generator=generator).prev_sample if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) image = self.movq.decode(latents, force_not_quantize=True)['sample'] self.maybe_free_model_hooks() if output_type not in ['pt', 'np', 'pil']: raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}') if output_type in ['np', 'pil']: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...models import PriorTransformer from ...schedulers import UnCLIPScheduler from ...utils import BaseOutput, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n' EXAMPLE_INTERPOLATE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPriorPipeline, KandinskyPipeline\n >>> from diffusers.utils import load_image\n >>> import PIL\n\n >>> import torch\n >>> from torchvision import transforms\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> img1 = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... )\n\n >>> img2 = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/starry_night.jpeg"\n ... )\n\n >>> images_texts = ["a cat", img1, img2]\n >>> weights = [0.3, 0.3, 0.4]\n >>> image_emb, zero_image_emb = pipe_prior.interpolate(images_texts, weights)\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... "",\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=150,\n ... ).images[0]\n\n >>> image.save("starry_cat.png")\n ```\n' @dataclass class KandinskyPriorPipelineOutput(BaseOutput): image_embeds: Union[torch.Tensor, np.ndarray] negative_image_embeds: Union[torch.Tensor, np.ndarray] class KandinskyPriorPipeline(DiffusionPipeline): _exclude_from_cpu_offload = ['prior'] model_cpu_offload_seq = 'text_encoder->prior' def __init__(self, prior: PriorTransformer, image_encoder: CLIPVisionModelWithProjection, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, scheduler: UnCLIPScheduler, image_processor: CLIPImageProcessor): super().__init__() self.register_modules(prior=prior, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, image_encoder=image_encoder, image_processor=image_processor) @torch.no_grad() @replace_example_docstring(EXAMPLE_INTERPOLATE_DOC_STRING) def interpolate(self, images_and_prompts: List[Union[str, PIL.Image.Image, torch.Tensor]], weights: List[float], num_images_per_prompt: int=1, num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, negative_prior_prompt: Optional[str]=None, negative_prompt: str='', guidance_scale: float=4.0, device=None): device = device or self.device if len(images_and_prompts) != len(weights): raise ValueError(f'`images_and_prompts` contains {len(images_and_prompts)} items and `weights` contains {len(weights)} items - they should be lists of same length') image_embeddings = [] for (cond, weight) in zip(images_and_prompts, weights): if isinstance(cond, str): image_emb = self(cond, num_inference_steps=num_inference_steps, num_images_per_prompt=num_images_per_prompt, generator=generator, latents=latents, negative_prompt=negative_prior_prompt, guidance_scale=guidance_scale).image_embeds elif isinstance(cond, (PIL.Image.Image, torch.Tensor)): if isinstance(cond, PIL.Image.Image): cond = self.image_processor(cond, return_tensors='pt').pixel_values[0].unsqueeze(0).to(dtype=self.image_encoder.dtype, device=device) image_emb = self.image_encoder(cond)['image_embeds'] else: raise ValueError(f'`images_and_prompts` can only contains elements to be of type `str`, `PIL.Image.Image` or `torch.Tensor` but is {type(cond)}') image_embeddings.append(image_emb * weight) image_emb = torch.cat(image_embeddings).sum(dim=0, keepdim=True) out_zero = self(negative_prompt, num_inference_steps=num_inference_steps, num_images_per_prompt=num_images_per_prompt, generator=generator, latents=latents, negative_prompt=negative_prior_prompt, guidance_scale=guidance_scale) zero_image_emb = out_zero.negative_image_embeds if negative_prompt == '' else out_zero.image_embeds return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=zero_image_emb) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def get_zero_embed(self, batch_size=1, device=None): device = device or self.device zero_img = torch.zeros(1, 3, self.image_encoder.config.image_size, self.image_encoder.config.image_size).to(device=device, dtype=self.image_encoder.dtype) zero_image_emb = self.image_encoder(zero_img)['image_embeds'] zero_image_emb = zero_image_emb.repeat(batch_size, 1) return zero_image_emb def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids[:, :self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds text_encoder_hidden_states = text_encoder_output.last_hidden_state prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(batch_size * num_images_per_prompt, seq_len, -1) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return (prompt_embeds, text_encoder_hidden_states, text_mask) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: int=1, num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, guidance_scale: float=4.0, output_type: Optional[str]='pt', return_dict: bool=True): if isinstance(prompt, str): prompt = [prompt] elif not isinstance(prompt, list): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] elif not isinstance(negative_prompt, list) and negative_prompt is not None: raise ValueError(f'`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}') if negative_prompt is not None: prompt = prompt + negative_prompt negative_prompt = 2 * negative_prompt device = self._execution_device batch_size = len(prompt) batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, text_encoder_hidden_states, text_mask) = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt) self.scheduler.set_timesteps(num_inference_steps, device=device) prior_timesteps_tensor = self.scheduler.timesteps embedding_dim = self.prior.config.embedding_dim latents = self.prepare_latents((batch_size, embedding_dim), prompt_embeds.dtype, device, generator, latents, self.scheduler) for (i, t) in enumerate(self.progress_bar(prior_timesteps_tensor)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents predicted_image_embedding = self.prior(latent_model_input, timestep=t, proj_embedding=prompt_embeds, encoder_hidden_states=text_encoder_hidden_states, attention_mask=text_mask).predicted_image_embedding if do_classifier_free_guidance: (predicted_image_embedding_uncond, predicted_image_embedding_text) = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + guidance_scale * (predicted_image_embedding_text - predicted_image_embedding_uncond) if i + 1 == prior_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = prior_timesteps_tensor[i + 1] latents = self.scheduler.step(predicted_image_embedding, timestep=t, sample=latents, generator=generator, prev_timestep=prev_timestep).prev_sample latents = self.prior.post_process_latents(latents) image_embeddings = latents if negative_prompt is None: zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device) self.maybe_free_model_hooks() else: (image_embeddings, zero_embeds) = image_embeddings.chunk(2) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.prior_hook.offload() if output_type not in ['pt', 'np']: raise ValueError(f'Only the output types `pt` and `np` are supported not output_type={output_type}') if output_type == 'np': image_embeddings = image_embeddings.cpu().numpy() zero_embeds = zero_embeds.cpu().numpy() if not return_dict: return (image_embeddings, zero_embeds) return KandinskyPriorPipelineOutput(image_embeds=image_embeddings, negative_image_embeds=zero_embeds) # File: diffusers-main/src/diffusers/pipelines/kandinsky/text_encoder.py import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class MCLIPConfig(XLMRobertaConfig): model_type = 'M-CLIP' def __init__(self, transformerDimSize=1024, imageDimSize=768, **kwargs): self.transformerDimensions = transformerDimSize self.numDims = imageDimSize super().__init__(**kwargs) class MultilingualCLIP(PreTrainedModel): config_class = MCLIPConfig def __init__(self, config, *args, **kwargs): super().__init__(config, *args, **kwargs) self.transformer = XLMRobertaModel(config) self.LinearTransformation = torch.nn.Linear(in_features=config.transformerDimensions, out_features=config.numDims) def forward(self, input_ids, attention_mask): embs = self.transformer(input_ids=input_ids, attention_mask=attention_mask)[0] embs2 = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return (self.LinearTransformation(embs2), embs) # File: diffusers-main/src/diffusers/pipelines/kandinsky2_2/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_kandinsky2_2'] = ['KandinskyV22Pipeline'] _import_structure['pipeline_kandinsky2_2_combined'] = ['KandinskyV22CombinedPipeline', 'KandinskyV22Img2ImgCombinedPipeline', 'KandinskyV22InpaintCombinedPipeline'] _import_structure['pipeline_kandinsky2_2_controlnet'] = ['KandinskyV22ControlnetPipeline'] _import_structure['pipeline_kandinsky2_2_controlnet_img2img'] = ['KandinskyV22ControlnetImg2ImgPipeline'] _import_structure['pipeline_kandinsky2_2_img2img'] = ['KandinskyV22Img2ImgPipeline'] _import_structure['pipeline_kandinsky2_2_inpainting'] = ['KandinskyV22InpaintPipeline'] _import_structure['pipeline_kandinsky2_2_prior'] = ['KandinskyV22PriorPipeline'] _import_structure['pipeline_kandinsky2_2_prior_emb2emb'] = ['KandinskyV22PriorEmb2EmbPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_kandinsky2_2 import KandinskyV22Pipeline from .pipeline_kandinsky2_2_combined import KandinskyV22CombinedPipeline, KandinskyV22Img2ImgCombinedPipeline, KandinskyV22InpaintCombinedPipeline from .pipeline_kandinsky2_2_controlnet import KandinskyV22ControlnetPipeline from .pipeline_kandinsky2_2_controlnet_img2img import KandinskyV22ControlnetImg2ImgPipeline from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline from .pipeline_kandinsky2_2_prior_emb2emb import KandinskyV22PriorEmb2EmbPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py from typing import Callable, Dict, List, Optional, Union import torch from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDPMScheduler from ...utils import deprecate, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def downscale_height_and_width(height, width, scale_factor=8): new_height = height // scale_factor ** 2 if height % scale_factor ** 2 != 0: new_height += 1 new_width = width // scale_factor ** 2 if width % scale_factor ** 2 != 0: new_width += 1 return (new_height * scale_factor, new_width * scale_factor) class KandinskyV22Pipeline(DiffusionPipeline): model_cpu_offload_seq = 'unet->movq' _callback_tensor_inputs = ['latents', 'image_embeds', 'negative_image_embeds'] def __init__(self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel): super().__init__() self.register_modules(unet=unet, scheduler=scheduler, movq=movq) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, image_embeds: Union[torch.Tensor, List[torch.Tensor]], negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], height: int=512, width: int=512, num_inference_steps: int=100, guidance_scale: float=4.0, num_images_per_prompt: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') device = self._execution_device self._guidance_scale = guidance_scale if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) batch_size = image_embeds.shape[0] * num_images_per_prompt if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if self.do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(dtype=self.unet.dtype, device=device) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels (height, width) = downscale_height_and_width(height, width, self.movq_scale_factor) latents = self.prepare_latents((batch_size, num_channels_latents, height, width), image_embeds.dtype, device, generator, latents, self.scheduler) self._num_timesteps = len(timesteps) for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents added_cond_kwargs = {'image_embeds': image_embeds} noise_pred = self.unet(sample=latent_model_input, timestep=t, encoder_hidden_states=None, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred, variance_pred) = noise_pred.split(latents.shape[1], dim=1) (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (_, variance_pred_text) = variance_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not (hasattr(self.scheduler.config, 'variance_type') and self.scheduler.config.variance_type in ['learned', 'learned_range']): (noise_pred, _) = noise_pred.split(latents.shape[1], dim=1) latents = self.scheduler.step(noise_pred, t, latents, generator=generator)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) image_embeds = callback_outputs.pop('image_embeds', image_embeds) negative_image_embeds = callback_outputs.pop('negative_image_embeds', negative_image_embeds) if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if output_type not in ['pt', 'np', 'pil', 'latent']: raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}') if not output_type == 'latent': image = self.movq.decode(latents, force_not_quantize=True)['sample'] if output_type in ['np', 'pil']: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == 'pil': image = self.numpy_to_pil(image) else: image = latents self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py from typing import Callable, Dict, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...models import PriorTransformer, UNet2DConditionModel, VQModel from ...schedulers import DDPMScheduler, UnCLIPScheduler from ...utils import deprecate, logging, replace_example_docstring from ..pipeline_utils import DiffusionPipeline from .pipeline_kandinsky2_2 import KandinskyV22Pipeline from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline logger = logging.get_logger(__name__) TEXT2IMAGE_EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n from diffusers import AutoPipelineForText2Image\n import torch\n\n pipe = AutoPipelineForText2Image.from_pretrained(\n "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n )\n pipe.enable_model_cpu_offload()\n\n prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k"\n\n image = pipe(prompt=prompt, num_inference_steps=25).images[0]\n ```\n' IMAGE2IMAGE_EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n from diffusers import AutoPipelineForImage2Image\n import torch\n import requests\n from io import BytesIO\n from PIL import Image\n import os\n\n pipe = AutoPipelineForImage2Image.from_pretrained(\n "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n )\n pipe.enable_model_cpu_offload()\n\n prompt = "A fantasy landscape, Cinematic lighting"\n negative_prompt = "low quality, bad quality"\n\n url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"\n\n response = requests.get(url)\n image = Image.open(BytesIO(response.content)).convert("RGB")\n image.thumbnail((768, 768))\n\n image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0]\n ```\n' INPAINT_EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n from diffusers import AutoPipelineForInpainting\n from diffusers.utils import load_image\n import torch\n import numpy as np\n\n pipe = AutoPipelineForInpainting.from_pretrained(\n "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16\n )\n pipe.enable_model_cpu_offload()\n\n prompt = "A fantasy landscape, Cinematic lighting"\n negative_prompt = "low quality, bad quality"\n\n original_image = load_image(\n "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"\n )\n\n mask = np.zeros((768, 768), dtype=np.float32)\n # Let\'s mask out an area above the cat\'s head\n mask[:250, 250:-250] = 1\n\n image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0]\n ```\n' class KandinskyV22CombinedPipeline(DiffusionPipeline): model_cpu_offload_seq = 'prior_text_encoder->prior_image_encoder->unet->movq' _load_connected_pipes = True _exclude_from_cpu_offload = ['prior_prior'] def __init__(self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor): super().__init__() self.register_modules(unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor) self.prior_pipe = KandinskyV22PriorPipeline(prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor) self.decoder_pipe = KandinskyV22Pipeline(unet=unet, scheduler=scheduler, movq=movq) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id: Optional[int]=None, device: Union[torch.device, str]='cuda'): self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]]=None, num_inference_steps: int=100, guidance_scale: float=4.0, num_images_per_prompt: int=1, height: int=512, width: int=512, prior_guidance_scale: float=4.0, prior_num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, return_dict: bool=True, prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, prior_callback_on_step_end_tensor_inputs: List[str]=['latents'], callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents']): prior_outputs = self.prior_pipe(prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type='pt', return_dict=False, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = image_embeds.shape[0] // len(prompt) * prompt outputs = self.decoder_pipe(image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs) self.maybe_free_model_hooks() return outputs class KandinskyV22Img2ImgCombinedPipeline(DiffusionPipeline): model_cpu_offload_seq = 'prior_text_encoder->prior_image_encoder->unet->movq' _load_connected_pipes = True _exclude_from_cpu_offload = ['prior_prior'] def __init__(self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor): super().__init__() self.register_modules(unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor) self.prior_pipe = KandinskyV22PriorPipeline(prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor) self.decoder_pipe = KandinskyV22Img2ImgPipeline(unet=unet, scheduler=scheduler, movq=movq) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_model_cpu_offload(self, gpu_id: Optional[int]=None, device: Union[torch.device, str]='cuda'): self.prior_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) def enable_sequential_cpu_offload(self, gpu_id: Optional[int]=None, device: Union[torch.device, str]='cuda'): self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]]=None, num_inference_steps: int=100, guidance_scale: float=4.0, strength: float=0.3, num_images_per_prompt: int=1, height: int=512, width: int=512, prior_guidance_scale: float=4.0, prior_num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, return_dict: bool=True, prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, prior_callback_on_step_end_tensor_inputs: List[str]=['latents'], callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents']): prior_outputs = self.prior_pipe(prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type='pt', return_dict=False, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt image = [image] if isinstance(image, PIL.Image.Image) else image if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = image_embeds.shape[0] // len(prompt) * prompt if isinstance(image, (list, tuple)) and len(image) < image_embeds.shape[0] and (image_embeds.shape[0] % len(image) == 0): image = image_embeds.shape[0] // len(image) * image outputs = self.decoder_pipe(image=image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, strength=strength, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs) self.maybe_free_model_hooks() return outputs class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline): model_cpu_offload_seq = 'prior_text_encoder->prior_image_encoder->unet->movq' _load_connected_pipes = True _exclude_from_cpu_offload = ['prior_prior'] def __init__(self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor): super().__init__() self.register_modules(unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor) self.prior_pipe = KandinskyV22PriorPipeline(prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor) self.decoder_pipe = KandinskyV22InpaintPipeline(unet=unet, scheduler=scheduler, movq=movq) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id: Optional[int]=None, device: Union[torch.device, str]='cuda'): self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], mask_image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]]=None, num_inference_steps: int=100, guidance_scale: float=4.0, num_images_per_prompt: int=1, height: int=512, width: int=512, prior_guidance_scale: float=4.0, prior_num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, prior_callback_on_step_end_tensor_inputs: List[str]=['latents'], callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): prior_kwargs = {} if kwargs.get('prior_callback', None) is not None: prior_kwargs['callback'] = kwargs.pop('prior_callback') deprecate('prior_callback', '1.0.0', 'Passing `prior_callback` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`') if kwargs.get('prior_callback_steps', None) is not None: deprecate('prior_callback_steps', '1.0.0', 'Passing `prior_callback_steps` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`') prior_kwargs['callback_steps'] = kwargs.pop('prior_callback_steps') prior_outputs = self.prior_pipe(prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type='pt', return_dict=False, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, **prior_kwargs) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt image = [image] if isinstance(image, PIL.Image.Image) else image mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = image_embeds.shape[0] // len(prompt) * prompt if isinstance(image, (list, tuple)) and len(image) < image_embeds.shape[0] and (image_embeds.shape[0] % len(image) == 0): image = image_embeds.shape[0] // len(image) * image if isinstance(mask_image, (list, tuple)) and len(mask_image) < image_embeds.shape[0] and (image_embeds.shape[0] % len(mask_image) == 0): mask_image = image_embeds.shape[0] // len(mask_image) * mask_image outputs = self.decoder_pipe(image=image, mask_image=mask_image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs) self.maybe_free_model_hooks() return outputs # File: diffusers-main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py from typing import Callable, List, Optional, Union import torch from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDPMScheduler from ...utils import logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def downscale_height_and_width(height, width, scale_factor=8): new_height = height // scale_factor ** 2 if height % scale_factor ** 2 != 0: new_height += 1 new_width = width // scale_factor ** 2 if width % scale_factor ** 2 != 0: new_width += 1 return (new_height * scale_factor, new_width * scale_factor) class KandinskyV22ControlnetPipeline(DiffusionPipeline): model_cpu_offload_seq = 'unet->movq' def __init__(self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel): super().__init__() self.register_modules(unet=unet, scheduler=scheduler, movq=movq) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents @torch.no_grad() def __call__(self, image_embeds: Union[torch.Tensor, List[torch.Tensor]], negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], hint: torch.Tensor, height: int=512, width: int=512, num_inference_steps: int=100, guidance_scale: float=4.0, num_images_per_prompt: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, return_dict: bool=True): device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if isinstance(hint, list): hint = torch.cat(hint, dim=0) batch_size = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) hint = hint.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(dtype=self.unet.dtype, device=device) hint = torch.cat([hint, hint], dim=0).to(dtype=self.unet.dtype, device=device) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps_tensor = self.scheduler.timesteps num_channels_latents = self.movq.config.latent_channels (height, width) = downscale_height_and_width(height, width, self.movq_scale_factor) latents = self.prepare_latents((batch_size, num_channels_latents, height, width), image_embeds.dtype, device, generator, latents, self.scheduler) for (i, t) in enumerate(self.progress_bar(timesteps_tensor)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents added_cond_kwargs = {'image_embeds': image_embeds, 'hint': hint} noise_pred = self.unet(sample=latent_model_input, timestep=t, encoder_hidden_states=None, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred, variance_pred) = noise_pred.split(latents.shape[1], dim=1) (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (_, variance_pred_text) = variance_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not (hasattr(self.scheduler.config, 'variance_type') and self.scheduler.config.variance_type in ['learned', 'learned_range']): (noise_pred, _) = noise_pred.split(latents.shape[1], dim=1) latents = self.scheduler.step(noise_pred, t, latents, generator=generator)[0] if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) image = self.movq.decode(latents, force_not_quantize=True)['sample'] self.maybe_free_model_hooks() if output_type not in ['pt', 'np', 'pil']: raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}') if output_type in ['np', 'pil']: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from PIL import Image from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDPMScheduler from ...utils import logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22ControlnetImg2ImgPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> img_emb = pipe_prior(prompt=prompt, image=img, strength=0.85, generator=generator)\n >>> negative_emb = pipe_prior(prompt=negative_prior_prompt, image=img, strength=1, generator=generator)\n\n >>> images = pipe(\n ... image=img,\n ... strength=0.5,\n ... image_embeds=img_emb.image_embeds,\n ... negative_image_embeds=negative_emb.image_embeds,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n' def downscale_height_and_width(height, width, scale_factor=8): new_height = height // scale_factor ** 2 if height % scale_factor ** 2 != 0: new_height += 1 new_width = width // scale_factor ** 2 if width % scale_factor ** 2 != 0: new_width += 1 return (new_height * scale_factor, new_width * scale_factor) def prepare_image(pil_image, w=512, h=512): pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) arr = np.array(pil_image.convert('RGB')) arr = arr.astype(np.float32) / 127.5 - 1 arr = np.transpose(arr, [2, 0, 1]) image = torch.from_numpy(arr).unsqueeze(0) return image class KandinskyV22ControlnetImg2ImgPipeline(DiffusionPipeline): model_cpu_offload_seq = 'unet->movq' def __init__(self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel): super().__init__() self.register_modules(unet=unet, scheduler=scheduler, movq=movq) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return (timesteps, num_inference_steps - t_start) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): init_latents = [self.movq.encode(image[i:i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = self.movq.encode(image).latent_dist.sample(generator) init_latents = self.movq.config.scaling_factor * init_latents init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents @torch.no_grad() def __call__(self, image_embeds: Union[torch.Tensor, List[torch.Tensor]], image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], hint: torch.Tensor, height: int=512, width: int=512, num_inference_steps: int=100, guidance_scale: float=4.0, strength: float=0.3, num_images_per_prompt: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, output_type: Optional[str]='pil', callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, return_dict: bool=True): device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if isinstance(hint, list): hint = torch.cat(hint, dim=0) batch_size = image_embeds.shape[0] if do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) hint = hint.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(dtype=self.unet.dtype, device=device) hint = torch.cat([hint, hint], dim=0).to(dtype=self.unet.dtype, device=device) if not isinstance(image, list): image = [image] if not all((isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image)): raise ValueError(f'Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor') image = torch.cat([prepare_image(i, width, height) for i in image], dim=0) image = image.to(dtype=image_embeds.dtype, device=device) latents = self.movq.encode(image)['latents'] latents = latents.repeat_interleave(num_images_per_prompt, dim=0) self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) (height, width) = downscale_height_and_width(height, width, self.movq_scale_factor) latents = self.prepare_latents(latents, latent_timestep, batch_size, num_images_per_prompt, image_embeds.dtype, device, generator) for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents added_cond_kwargs = {'image_embeds': image_embeds, 'hint': hint} noise_pred = self.unet(sample=latent_model_input, timestep=t, encoder_hidden_states=None, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred, variance_pred) = noise_pred.split(latents.shape[1], dim=1) (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (_, variance_pred_text) = variance_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not (hasattr(self.scheduler.config, 'variance_type') and self.scheduler.config.variance_type in ['learned', 'learned_range']): (noise_pred, _) = noise_pred.split(latents.shape[1], dim=1) latents = self.scheduler.step(noise_pred, t, latents, generator=generator)[0] if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) image = self.movq.decode(latents, force_not_quantize=True)['sample'] self.maybe_free_model_hooks() if output_type not in ['pt', 'np', 'pil']: raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}') if output_type in ['np', 'pil']: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py from typing import Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from PIL import Image from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDPMScheduler from ...utils import deprecate, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n' def downscale_height_and_width(height, width, scale_factor=8): new_height = height // scale_factor ** 2 if height % scale_factor ** 2 != 0: new_height += 1 new_width = width // scale_factor ** 2 if width % scale_factor ** 2 != 0: new_width += 1 return (new_height * scale_factor, new_width * scale_factor) def prepare_image(pil_image, w=512, h=512): pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) arr = np.array(pil_image.convert('RGB')) arr = arr.astype(np.float32) / 127.5 - 1 arr = np.transpose(arr, [2, 0, 1]) image = torch.from_numpy(arr).unsqueeze(0) return image class KandinskyV22Img2ImgPipeline(DiffusionPipeline): model_cpu_offload_seq = 'unet->movq' _callback_tensor_inputs = ['latents', 'image_embeds', 'negative_image_embeds'] def __init__(self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel): super().__init__() self.register_modules(unet=unet, scheduler=scheduler, movq=movq) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return (timesteps, num_inference_steps - t_start) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): init_latents = [self.movq.encode(image[i:i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = self.movq.encode(image).latent_dist.sample(generator) init_latents = self.movq.config.scaling_factor * init_latents init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() def __call__(self, image_embeds: Union[torch.Tensor, List[torch.Tensor]], image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], height: int=512, width: int=512, num_inference_steps: int=100, guidance_scale: float=4.0, strength: float=0.3, num_images_per_prompt: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') device = self._execution_device self._guidance_scale = guidance_scale if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) batch_size = image_embeds.shape[0] if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if self.do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(dtype=self.unet.dtype, device=device) if not isinstance(image, list): image = [image] if not all((isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image)): raise ValueError(f'Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor') image = torch.cat([prepare_image(i, width, height) for i in image], dim=0) image = image.to(dtype=image_embeds.dtype, device=device) latents = self.movq.encode(image)['latents'] latents = latents.repeat_interleave(num_images_per_prompt, dim=0) self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) (height, width) = downscale_height_and_width(height, width, self.movq_scale_factor) latents = self.prepare_latents(latents, latent_timestep, batch_size, num_images_per_prompt, image_embeds.dtype, device, generator) self._num_timesteps = len(timesteps) for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents added_cond_kwargs = {'image_embeds': image_embeds} noise_pred = self.unet(sample=latent_model_input, timestep=t, encoder_hidden_states=None, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred, variance_pred) = noise_pred.split(latents.shape[1], dim=1) (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (_, variance_pred_text) = variance_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not (hasattr(self.scheduler.config, 'variance_type') and self.scheduler.config.variance_type in ['learned', 'learned_range']): (noise_pred, _) = noise_pred.split(latents.shape[1], dim=1) latents = self.scheduler.step(noise_pred, t, latents, generator=generator)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) image_embeds = callback_outputs.pop('image_embeds', image_embeds) negative_image_embeds = callback_outputs.pop('negative_image_embeds', negative_image_embeds) if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if output_type not in ['pt', 'np', 'pil', 'latent']: raise ValueError(f'Only the output types `pt`, `pil` ,`np` and `latent` are supported not output_type={output_type}') if not output_type == 'latent': image = self.movq.decode(latents, force_not_quantize=True)['sample'] if output_type in ['np', 'pil']: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == 'pil': image = self.numpy_to_pil(image) else: image = latents self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py from copy import deepcopy from typing import Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from packaging import version from PIL import Image from ... import __version__ from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDPMScheduler from ...utils import deprecate, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22InpaintPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n >>> import numpy as np\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "a hat"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22InpaintPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... )\n\n >>> mask = np.zeros((768, 768), dtype=np.float32)\n >>> mask[:250, 250:-250] = 1\n\n >>> out = pipe(\n ... image=init_image,\n ... mask_image=mask,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... )\n\n >>> image = out.images[0]\n >>> image.save("cat_with_hat.png")\n ```\n' def downscale_height_and_width(height, width, scale_factor=8): new_height = height // scale_factor ** 2 if height % scale_factor ** 2 != 0: new_height += 1 new_width = width // scale_factor ** 2 if width % scale_factor ** 2 != 0: new_width += 1 return (new_height * scale_factor, new_width * scale_factor) def prepare_mask(masks): prepared_masks = [] for mask in masks: old_mask = deepcopy(mask) for i in range(mask.shape[1]): for j in range(mask.shape[2]): if old_mask[0][i][j] == 1: continue if i != 0: mask[:, i - 1, j] = 0 if j != 0: mask[:, i, j - 1] = 0 if i != 0 and j != 0: mask[:, i - 1, j - 1] = 0 if i != mask.shape[1] - 1: mask[:, i + 1, j] = 0 if j != mask.shape[2] - 1: mask[:, i, j + 1] = 0 if i != mask.shape[1] - 1 and j != mask.shape[2] - 1: mask[:, i + 1, j + 1] = 0 prepared_masks.append(mask) return torch.stack(prepared_masks, dim=0) def prepare_mask_and_masked_image(image, mask, height, width): if image is None: raise ValueError('`image` input cannot be undefined.') if mask is None: raise ValueError('`mask_image` input cannot be undefined.') if isinstance(image, torch.Tensor): if not isinstance(mask, torch.Tensor): raise TypeError(f'`image` is a torch.Tensor but `mask` (type: {type(mask)} is not') if image.ndim == 3: assert image.shape[0] == 3, 'Image outside a batch should be of shape (3, H, W)' image = image.unsqueeze(0) if mask.ndim == 2: mask = mask.unsqueeze(0).unsqueeze(0) if mask.ndim == 3: if mask.shape[0] == 1: mask = mask.unsqueeze(0) else: mask = mask.unsqueeze(1) assert image.ndim == 4 and mask.ndim == 4, 'Image and Mask must have 4 dimensions' assert image.shape[-2:] == mask.shape[-2:], 'Image and Mask must have the same spatial dimensions' assert image.shape[0] == mask.shape[0], 'Image and Mask must have the same batch size' if image.min() < -1 or image.max() > 1: raise ValueError('Image should be in [-1, 1] range') if mask.min() < 0 or mask.max() > 1: raise ValueError('Mask should be in [0, 1] range') mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 image = image.to(dtype=torch.float32) elif isinstance(mask, torch.Tensor): raise TypeError(f'`mask` is a torch.Tensor but `image` (type: {type(image)} is not') else: if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [i.resize((width, height), resample=Image.BICUBIC, reducing_gap=1) for i in image] image = [np.array(i.convert('RGB'))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 if isinstance(mask, (PIL.Image.Image, np.ndarray)): mask = [mask] if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] mask = np.concatenate([np.array(m.convert('L'))[None, None, :] for m in mask], axis=0) mask = mask.astype(np.float32) / 255.0 elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): mask = np.concatenate([m[None, None, :] for m in mask], axis=0) mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) mask = 1 - mask return (mask, image) class KandinskyV22InpaintPipeline(DiffusionPipeline): model_cpu_offload_seq = 'unet->movq' _callback_tensor_inputs = ['latents', 'image_embeds', 'negative_image_embeds', 'masked_image', 'mask_image'] def __init__(self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel): super().__init__() self.register_modules(unet=unet, scheduler=scheduler, movq=movq) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) self._warn_has_been_called = False def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() def __call__(self, image_embeds: Union[torch.Tensor, List[torch.Tensor]], image: Union[torch.Tensor, PIL.Image.Image], mask_image: Union[torch.Tensor, PIL.Image.Image, np.ndarray], negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], height: int=512, width: int=512, num_inference_steps: int=100, guidance_scale: float=4.0, num_images_per_prompt: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse('0.23.0.dev0'): logger.warning("Please note that the expected format of `mask_image` has recently been changed. Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. This way, Kandinsky's masking behavior is aligned with Stable Diffusion. THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. This warning will be surpressed after the first inference call and will be removed in diffusers>0.23.0") self._warn_has_been_called = True callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') self._guidance_scale = guidance_scale device = self._execution_device if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) batch_size = image_embeds.shape[0] * num_images_per_prompt if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if self.do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(dtype=self.unet.dtype, device=device) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps (mask_image, image) = prepare_mask_and_masked_image(image, mask_image, height, width) image = image.to(dtype=image_embeds.dtype, device=device) image = self.movq.encode(image)['latents'] mask_image = mask_image.to(dtype=image_embeds.dtype, device=device) image_shape = tuple(image.shape[-2:]) mask_image = F.interpolate(mask_image, image_shape, mode='nearest') mask_image = prepare_mask(mask_image) masked_image = image * mask_image mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) masked_image = masked_image.repeat_interleave(num_images_per_prompt, dim=0) if self.do_classifier_free_guidance: mask_image = mask_image.repeat(2, 1, 1, 1) masked_image = masked_image.repeat(2, 1, 1, 1) num_channels_latents = self.movq.config.latent_channels (height, width) = downscale_height_and_width(height, width, self.movq_scale_factor) latents = self.prepare_latents((batch_size, num_channels_latents, height, width), image_embeds.dtype, device, generator, latents, self.scheduler) noise = torch.clone(latents) self._num_timesteps = len(timesteps) for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = torch.cat([latent_model_input, masked_image, mask_image], dim=1) added_cond_kwargs = {'image_embeds': image_embeds} noise_pred = self.unet(sample=latent_model_input, timestep=t, encoder_hidden_states=None, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred, variance_pred) = noise_pred.split(latents.shape[1], dim=1) (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (_, variance_pred_text) = variance_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not (hasattr(self.scheduler.config, 'variance_type') and self.scheduler.config.variance_type in ['learned', 'learned_range']): (noise_pred, _) = noise_pred.split(latents.shape[1], dim=1) latents = self.scheduler.step(noise_pred, t, latents, generator=generator)[0] init_latents_proper = image[:1] init_mask = mask_image[:1] if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise(init_latents_proper, noise, torch.tensor([noise_timestep])) latents = init_mask * init_latents_proper + (1 - init_mask) * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) image_embeds = callback_outputs.pop('image_embeds', image_embeds) negative_image_embeds = callback_outputs.pop('negative_image_embeds', negative_image_embeds) masked_image = callback_outputs.pop('masked_image', masked_image) mask_image = callback_outputs.pop('mask_image', mask_image) if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) latents = mask_image[:1] * image[:1] + (1 - mask_image[:1]) * latents if output_type not in ['pt', 'np', 'pil', 'latent']: raise ValueError(f'Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}') if not output_type == 'latent': image = self.movq.decode(latents, force_not_quantize=True)['sample'] if output_type in ['np', 'pil']: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == 'pil': image = self.numpy_to_pil(image) else: image = latents self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py from typing import Callable, Dict, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...models import PriorTransformer from ...schedulers import UnCLIPScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..kandinsky import KandinskyPriorPipelineOutput from ..pipeline_utils import DiffusionPipeline logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> image_emb, negative_image_emb = pipe_prior(prompt).to_tuple()\n\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' EXAMPLE_INTERPOLATE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22Pipeline\n >>> from diffusers.utils import load_image\n >>> import PIL\n >>> import torch\n >>> from torchvision import transforms\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n >>> img1 = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... )\n >>> img2 = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/starry_night.jpeg"\n ... )\n >>> images_texts = ["a cat", img1, img2]\n >>> weights = [0.3, 0.3, 0.4]\n >>> out = pipe_prior.interpolate(images_texts, weights)\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=out.image_embeds,\n ... negative_image_embeds=out.negative_image_embeds,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images[0]\n >>> image.save("starry_cat.png")\n ```\n' class KandinskyV22PriorPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->image_encoder->prior' _exclude_from_cpu_offload = ['prior'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'text_encoder_hidden_states', 'text_mask'] def __init__(self, prior: PriorTransformer, image_encoder: CLIPVisionModelWithProjection, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, scheduler: UnCLIPScheduler, image_processor: CLIPImageProcessor): super().__init__() self.register_modules(prior=prior, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, image_encoder=image_encoder, image_processor=image_processor) @torch.no_grad() @replace_example_docstring(EXAMPLE_INTERPOLATE_DOC_STRING) def interpolate(self, images_and_prompts: List[Union[str, PIL.Image.Image, torch.Tensor]], weights: List[float], num_images_per_prompt: int=1, num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, negative_prior_prompt: Optional[str]=None, negative_prompt: str='', guidance_scale: float=4.0, device=None): device = device or self.device if len(images_and_prompts) != len(weights): raise ValueError(f'`images_and_prompts` contains {len(images_and_prompts)} items and `weights` contains {len(weights)} items - they should be lists of same length') image_embeddings = [] for (cond, weight) in zip(images_and_prompts, weights): if isinstance(cond, str): image_emb = self(cond, num_inference_steps=num_inference_steps, num_images_per_prompt=num_images_per_prompt, generator=generator, latents=latents, negative_prompt=negative_prior_prompt, guidance_scale=guidance_scale).image_embeds.unsqueeze(0) elif isinstance(cond, (PIL.Image.Image, torch.Tensor)): if isinstance(cond, PIL.Image.Image): cond = self.image_processor(cond, return_tensors='pt').pixel_values[0].unsqueeze(0).to(dtype=self.image_encoder.dtype, device=device) image_emb = self.image_encoder(cond)['image_embeds'].repeat(num_images_per_prompt, 1).unsqueeze(0) else: raise ValueError(f'`images_and_prompts` can only contains elements to be of type `str`, `PIL.Image.Image` or `torch.Tensor` but is {type(cond)}') image_embeddings.append(image_emb * weight) image_emb = torch.cat(image_embeddings).sum(dim=0) out_zero = self(negative_prompt, num_inference_steps=num_inference_steps, num_images_per_prompt=num_images_per_prompt, generator=generator, latents=latents, negative_prompt=negative_prior_prompt, guidance_scale=guidance_scale) zero_image_emb = out_zero.negative_image_embeds if negative_prompt == '' else out_zero.image_embeds return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=zero_image_emb) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def get_zero_embed(self, batch_size=1, device=None): device = device or self.device zero_img = torch.zeros(1, 3, self.image_encoder.config.image_size, self.image_encoder.config.image_size).to(device=device, dtype=self.image_encoder.dtype) zero_image_emb = self.image_encoder(zero_img)['image_embeds'] zero_image_emb = zero_image_emb.repeat(batch_size, 1) return zero_image_emb def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids[:, :self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds text_encoder_hidden_states = text_encoder_output.last_hidden_state prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(batch_size * num_images_per_prompt, seq_len, -1) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return (prompt_embeds, text_encoder_hidden_states, text_mask) @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def guidance_scale(self): return self._guidance_scale @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: int=1, num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, guidance_scale: float=4.0, output_type: Optional[str]='pt', return_dict: bool=True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents']): if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if isinstance(prompt, str): prompt = [prompt] elif not isinstance(prompt, list): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] elif not isinstance(negative_prompt, list) and negative_prompt is not None: raise ValueError(f'`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}') if negative_prompt is not None: prompt = prompt + negative_prompt negative_prompt = 2 * negative_prompt device = self._execution_device batch_size = len(prompt) batch_size = batch_size * num_images_per_prompt self._guidance_scale = guidance_scale (prompt_embeds, text_encoder_hidden_states, text_mask) = self._encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps embedding_dim = self.prior.config.embedding_dim latents = self.prepare_latents((batch_size, embedding_dim), prompt_embeds.dtype, device, generator, latents, self.scheduler) self._num_timesteps = len(timesteps) for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents predicted_image_embedding = self.prior(latent_model_input, timestep=t, proj_embedding=prompt_embeds, encoder_hidden_states=text_encoder_hidden_states, attention_mask=text_mask).predicted_image_embedding if self.do_classifier_free_guidance: (predicted_image_embedding_uncond, predicted_image_embedding_text) = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + self.guidance_scale * (predicted_image_embedding_text - predicted_image_embedding_uncond) if i + 1 == timesteps.shape[0]: prev_timestep = None else: prev_timestep = timesteps[i + 1] latents = self.scheduler.step(predicted_image_embedding, timestep=t, sample=latents, generator=generator, prev_timestep=prev_timestep).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) text_encoder_hidden_states = callback_outputs.pop('text_encoder_hidden_states', text_encoder_hidden_states) text_mask = callback_outputs.pop('text_mask', text_mask) latents = self.prior.post_process_latents(latents) image_embeddings = latents if negative_prompt is None: zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device) else: (image_embeddings, zero_embeds) = image_embeddings.chunk(2) self.maybe_free_model_hooks() if output_type not in ['pt', 'np']: raise ValueError(f'Only the output types `pt` and `np` are supported not output_type={output_type}') if output_type == 'np': image_embeddings = image_embeddings.cpu().numpy() zero_embeds = zero_embeds.cpu().numpy() if not return_dict: return (image_embeddings, zero_embeds) return KandinskyPriorPipelineOutput(image_embeds=image_embeddings, negative_image_embeds=zero_embeds) # File: diffusers-main/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py from typing import List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...models import PriorTransformer from ...schedulers import UnCLIPScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..kandinsky import KandinskyPriorPipelineOutput from ..pipeline_utils import DiffusionPipeline logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorEmb2EmbPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... )\n >>> image_emb, nagative_image_emb = pipe_prior(prompt, image=img, strength=0.2).to_tuple()\n\n >>> pipe = KandinskyPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder, torch_dtype=torch.float16"\n ... )\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n' EXAMPLE_INTERPOLATE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22Pipeline\n >>> from diffusers.utils import load_image\n >>> import PIL\n\n >>> import torch\n >>> from torchvision import transforms\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> img1 = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... )\n\n >>> img2 = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/starry_night.jpeg"\n ... )\n\n >>> images_texts = ["a cat", img1, img2]\n >>> weights = [0.3, 0.3, 0.4]\n >>> image_emb, zero_image_emb = pipe_prior.interpolate(images_texts, weights)\n\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=150,\n ... ).images[0]\n\n >>> image.save("starry_cat.png")\n ```\n' class KandinskyV22PriorEmb2EmbPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->image_encoder->prior' _exclude_from_cpu_offload = ['prior'] def __init__(self, prior: PriorTransformer, image_encoder: CLIPVisionModelWithProjection, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, scheduler: UnCLIPScheduler, image_processor: CLIPImageProcessor): super().__init__() self.register_modules(prior=prior, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, image_encoder=image_encoder, image_processor=image_processor) def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return (timesteps, num_inference_steps - t_start) @torch.no_grad() @replace_example_docstring(EXAMPLE_INTERPOLATE_DOC_STRING) def interpolate(self, images_and_prompts: List[Union[str, PIL.Image.Image, torch.Tensor]], weights: List[float], num_images_per_prompt: int=1, num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, negative_prior_prompt: Optional[str]=None, negative_prompt: str='', guidance_scale: float=4.0, device=None): device = device or self.device if len(images_and_prompts) != len(weights): raise ValueError(f'`images_and_prompts` contains {len(images_and_prompts)} items and `weights` contains {len(weights)} items - they should be lists of same length') image_embeddings = [] for (cond, weight) in zip(images_and_prompts, weights): if isinstance(cond, str): image_emb = self(cond, num_inference_steps=num_inference_steps, num_images_per_prompt=num_images_per_prompt, generator=generator, latents=latents, negative_prompt=negative_prior_prompt, guidance_scale=guidance_scale).image_embeds.unsqueeze(0) elif isinstance(cond, (PIL.Image.Image, torch.Tensor)): image_emb = self._encode_image(cond, device=device, num_images_per_prompt=num_images_per_prompt).unsqueeze(0) else: raise ValueError(f'`images_and_prompts` can only contains elements to be of type `str`, `PIL.Image.Image` or `torch.Tensor` but is {type(cond)}') image_embeddings.append(image_emb * weight) image_emb = torch.cat(image_embeddings).sum(dim=0) return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=torch.randn_like(image_emb)) def _encode_image(self, image: Union[torch.Tensor, List[PIL.Image.Image]], device, num_images_per_prompt): if not isinstance(image, torch.Tensor): image = self.image_processor(image, return_tensors='pt').pixel_values.to(dtype=self.image_encoder.dtype, device=device) image_emb = self.image_encoder(image)['image_embeds'] image_emb = image_emb.repeat_interleave(num_images_per_prompt, dim=0) image_emb.to(device=device) return image_emb def prepare_latents(self, emb, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): emb = emb.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt init_latents = emb if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def get_zero_embed(self, batch_size=1, device=None): device = device or self.device zero_img = torch.zeros(1, 3, self.image_encoder.config.image_size, self.image_encoder.config.image_size).to(device=device, dtype=self.image_encoder.dtype) zero_image_emb = self.image_encoder(zero_img)['image_embeds'] zero_image_emb = zero_image_emb.repeat(batch_size, 1) return zero_image_emb def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids[:, :self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds text_encoder_hidden_states = text_encoder_output.last_hidden_state prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(batch_size * num_images_per_prompt, seq_len, -1) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return (prompt_embeds, text_encoder_hidden_states, text_mask) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]], image: Union[torch.Tensor, List[torch.Tensor], PIL.Image.Image, List[PIL.Image.Image]], strength: float=0.3, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: int=1, num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, guidance_scale: float=4.0, output_type: Optional[str]='pt', return_dict: bool=True): if isinstance(prompt, str): prompt = [prompt] elif not isinstance(prompt, list): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] elif not isinstance(negative_prompt, list) and negative_prompt is not None: raise ValueError(f'`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}') if negative_prompt is not None: prompt = prompt + negative_prompt negative_prompt = 2 * negative_prompt device = self._execution_device batch_size = len(prompt) batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, text_encoder_hidden_states, text_mask) = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt) if not isinstance(image, List): image = [image] if isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) if isinstance(image, torch.Tensor) and image.ndim == 2: image_embeds = image.repeat_interleave(num_images_per_prompt, dim=0) elif isinstance(image, torch.Tensor) and image.ndim != 4: raise ValueError(f' if pass `image` as pytorch tensor, or a list of pytorch tensor, please make sure each tensor has shape [batch_size, channels, height, width], currently {image[0].unsqueeze(0).shape}') else: image_embeds = self._encode_image(image, device, num_images_per_prompt) self.scheduler.set_timesteps(num_inference_steps, device=device) latents = image_embeds (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size) latents = self.prepare_latents(latents, latent_timestep, batch_size // num_images_per_prompt, num_images_per_prompt, prompt_embeds.dtype, device, generator) for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents predicted_image_embedding = self.prior(latent_model_input, timestep=t, proj_embedding=prompt_embeds, encoder_hidden_states=text_encoder_hidden_states, attention_mask=text_mask).predicted_image_embedding if do_classifier_free_guidance: (predicted_image_embedding_uncond, predicted_image_embedding_text) = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + guidance_scale * (predicted_image_embedding_text - predicted_image_embedding_uncond) if i + 1 == timesteps.shape[0]: prev_timestep = None else: prev_timestep = timesteps[i + 1] latents = self.scheduler.step(predicted_image_embedding, timestep=t, sample=latents, generator=generator, prev_timestep=prev_timestep).prev_sample latents = self.prior.post_process_latents(latents) image_embeddings = latents if negative_prompt is None: zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device) else: (image_embeddings, zero_embeds) = image_embeddings.chunk(2) self.maybe_free_model_hooks() if output_type not in ['pt', 'np']: raise ValueError(f'Only the output types `pt` and `np` are supported not output_type={output_type}') if output_type == 'np': image_embeddings = image_embeddings.cpu().numpy() zero_embeds = zero_embeds.cpu().numpy() if not return_dict: return (image_embeddings, zero_embeds) return KandinskyPriorPipelineOutput(image_embeds=image_embeddings, negative_image_embeds=zero_embeds) # File: diffusers-main/src/diffusers/pipelines/kandinsky3/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_kandinsky3'] = ['Kandinsky3Pipeline'] _import_structure['pipeline_kandinsky3_img2img'] = ['Kandinsky3Img2ImgPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_kandinsky3 import Kandinsky3Pipeline from .pipeline_kandinsky3_img2img import Kandinsky3Img2ImgPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py import argparse import fnmatch from safetensors.torch import load_file from diffusers import Kandinsky3UNet MAPPING = {'to_time_embed.1': 'time_embedding.linear_1', 'to_time_embed.3': 'time_embedding.linear_2', 'in_layer': 'conv_in', 'out_layer.0': 'conv_norm_out', 'out_layer.2': 'conv_out', 'down_samples': 'down_blocks', 'up_samples': 'up_blocks', 'projection_lin': 'encoder_hid_proj.projection_linear', 'projection_ln': 'encoder_hid_proj.projection_norm', 'feature_pooling': 'add_time_condition', 'to_query': 'to_q', 'to_key': 'to_k', 'to_value': 'to_v', 'output_layer': 'to_out.0', 'self_attention_block': 'attentions.0'} DYNAMIC_MAP = {'resnet_attn_blocks.*.0': 'resnets_in.*', 'resnet_attn_blocks.*.1': ('attentions.*', 1), 'resnet_attn_blocks.*.2': 'resnets_out.*'} def convert_state_dict(unet_state_dict): converted_state_dict = {} for key in unet_state_dict: new_key = key for (pattern, new_pattern) in MAPPING.items(): new_key = new_key.replace(pattern, new_pattern) for (dyn_pattern, dyn_new_pattern) in DYNAMIC_MAP.items(): has_matched = False if fnmatch.fnmatch(new_key, f'*.{dyn_pattern}.*') and (not has_matched): star = int(new_key.split(dyn_pattern.split('.')[0])[-1].split('.')[1]) if isinstance(dyn_new_pattern, tuple): new_star = star + dyn_new_pattern[-1] dyn_new_pattern = dyn_new_pattern[0] else: new_star = star pattern = dyn_pattern.replace('*', str(star)) new_pattern = dyn_new_pattern.replace('*', str(new_star)) new_key = new_key.replace(pattern, new_pattern) has_matched = True converted_state_dict[new_key] = unet_state_dict[key] return converted_state_dict def main(model_path, output_path): unet_state_dict = load_file(model_path) config = {} converted_state_dict = convert_state_dict(unet_state_dict) unet = Kandinsky3UNet(config) unet.load_state_dict(converted_state_dict) unet.save_pretrained(output_path) print(f'Converted model saved to {output_path}') if __name__ == '__main__': parser = argparse.ArgumentParser(description='Convert U-Net PyTorch model to Kandinsky3UNet format') parser.add_argument('--model_path', type=str, required=True, help='Path to the original U-Net PyTorch model') parser.add_argument('--output_path', type=str, required=True, help='Path to save the converted model') args = parser.parse_args() main(args.model_path, args.output_path) # File: diffusers-main/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3.py from typing import Callable, Dict, List, Optional, Union import torch from transformers import T5EncoderModel, T5Tokenizer from ...loaders import StableDiffusionLoraLoaderMixin from ...models import Kandinsky3UNet, VQModel from ...schedulers import DDPMScheduler from ...utils import deprecate, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import AutoPipelineForText2Image\n >>> import torch\n\n >>> pipe = AutoPipelineForText2Image.from_pretrained(\n ... "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16\n ... )\n >>> pipe.enable_model_cpu_offload()\n\n >>> prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background."\n\n >>> generator = torch.Generator(device="cpu").manual_seed(0)\n >>> image = pipe(prompt, num_inference_steps=25, generator=generator).images[0]\n ```\n\n' def downscale_height_and_width(height, width, scale_factor=8): new_height = height // scale_factor ** 2 if height % scale_factor ** 2 != 0: new_height += 1 new_width = width // scale_factor ** 2 if width % scale_factor ** 2 != 0: new_width += 1 return (new_height * scale_factor, new_width * scale_factor) class Kandinsky3Pipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): model_cpu_offload_seq = 'text_encoder->unet->movq' _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'negative_attention_mask', 'attention_mask'] def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, unet: Kandinsky3UNet, scheduler: DDPMScheduler, movq: VQModel): super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, movq=movq) def process_embeds(self, embeddings, attention_mask, cut_context): if cut_context: embeddings[attention_mask == 0] = torch.zeros_like(embeddings[attention_mask == 0]) max_seq_length = attention_mask.sum(-1).max() + 1 embeddings = embeddings[:, :max_seq_length] attention_mask = attention_mask[:, :max_seq_length] return (embeddings, attention_mask) @torch.no_grad() def encode_prompt(self, prompt, do_classifier_free_guidance=True, num_images_per_prompt=1, device=None, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, _cut_context=False, attention_mask: Optional[torch.Tensor]=None, negative_attention_mask: Optional[torch.Tensor]=None): if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = 128 if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids.to(device) attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids, attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] (prompt_embeds, attention_mask) = self.process_embeds(prompt_embeds, attention_mask, _cut_context) prompt_embeds = prompt_embeds * attention_mask.unsqueeze(2) if self.text_encoder is not None: dtype = self.text_encoder.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) attention_mask = attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if negative_prompt is not None: uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=128, truncation=True, return_attention_mask=True, return_tensors='pt') text_input_ids = uncond_input.input_ids.to(device) negative_attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder(text_input_ids, attention_mask=negative_attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds[:, :prompt_embeds.shape[1]] negative_attention_mask = negative_attention_mask[:, :prompt_embeds.shape[1]] negative_prompt_embeds = negative_prompt_embeds * negative_attention_mask.unsqueeze(2) else: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_attention_mask = torch.zeros_like(attention_mask) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) if negative_prompt_embeds.shape != prompt_embeds.shape: negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_attention_mask = negative_attention_mask.repeat(num_images_per_prompt, 1) else: negative_prompt_embeds = None negative_attention_mask = None return (prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def check_inputs(self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, attention_mask=None, negative_attention_mask=None): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if negative_prompt_embeds is not None and negative_attention_mask is None: raise ValueError('Please provide `negative_attention_mask` along with `negative_prompt_embeds`') if negative_prompt_embeds is not None and negative_attention_mask is not None: if negative_prompt_embeds.shape[:2] != negative_attention_mask.shape: raise ValueError(f'`negative_prompt_embeds` and `negative_attention_mask` must have the same batch_size and token length when passed directly, but got: `negative_prompt_embeds` {negative_prompt_embeds.shape[:2]} != `negative_attention_mask` {negative_attention_mask.shape}.') if prompt_embeds is not None and attention_mask is None: raise ValueError('Please provide `attention_mask` along with `prompt_embeds`') if prompt_embeds is not None and attention_mask is not None: if prompt_embeds.shape[:2] != attention_mask.shape: raise ValueError(f'`prompt_embeds` and `attention_mask` must have the same batch_size and token length when passed directly, but got: `prompt_embeds` {prompt_embeds.shape[:2]} != `attention_mask` {attention_mask.shape}.') @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, num_inference_steps: int=25, guidance_scale: float=3.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, height: Optional[int]=1024, width: Optional[int]=1024, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, negative_attention_mask: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, latents=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') cut_context = True device = self._execution_device self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, attention_mask, negative_attention_mask) self._guidance_scale = guidance_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] (prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask) = self.encode_prompt(prompt, self.do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, device=device, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, _cut_context=cut_context, attention_mask=attention_mask, negative_attention_mask=negative_attention_mask) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) attention_mask = torch.cat([negative_attention_mask, attention_mask]).bool() self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps (height, width) = downscale_height_and_width(height, width, 8) latents = self.prepare_latents((batch_size * num_images_per_prompt, 4, height, width), prompt_embeds.dtype, device, generator, latents, self.scheduler) if hasattr(self, 'text_encoder_offload_hook') and self.text_encoder_offload_hook is not None: self.text_encoder_offload_hook.offload() num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, encoder_attention_mask=attention_mask, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = (guidance_scale + 1.0) * noise_pred_text - guidance_scale * noise_pred_uncond latents = self.scheduler.step(noise_pred, t, latents, generator=generator).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) attention_mask = callback_outputs.pop('attention_mask', attention_mask) negative_attention_mask = callback_outputs.pop('negative_attention_mask', negative_attention_mask) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if output_type not in ['pt', 'np', 'pil', 'latent']: raise ValueError(f'Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}') if not output_type == 'latent': image = self.movq.decode(latents, force_not_quantize=True)['sample'] if output_type in ['np', 'pil']: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == 'pil': image = self.numpy_to_pil(image) else: image = latents self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py import inspect from typing import Callable, Dict, List, Optional, Union import numpy as np import PIL import PIL.Image import torch from transformers import T5EncoderModel, T5Tokenizer from ...loaders import StableDiffusionLoraLoaderMixin from ...models import Kandinsky3UNet, VQModel from ...schedulers import DDPMScheduler from ...utils import deprecate, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import AutoPipelineForImage2Image\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe = AutoPipelineForImage2Image.from_pretrained(\n ... "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16\n ... )\n >>> pipe.enable_model_cpu_offload()\n\n >>> prompt = "A painting of the inside of a subway train with tiny raccoons."\n >>> image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png"\n ... )\n\n >>> generator = torch.Generator(device="cpu").manual_seed(0)\n >>> image = pipe(prompt, image=image, strength=0.75, num_inference_steps=25, generator=generator).images[0]\n ```\n' def downscale_height_and_width(height, width, scale_factor=8): new_height = height // scale_factor ** 2 if height % scale_factor ** 2 != 0: new_height += 1 new_width = width // scale_factor ** 2 if width % scale_factor ** 2 != 0: new_width += 1 return (new_height * scale_factor, new_width * scale_factor) def prepare_image(pil_image): arr = np.array(pil_image.convert('RGB')) arr = arr.astype(np.float32) / 127.5 - 1 arr = np.transpose(arr, [2, 0, 1]) image = torch.from_numpy(arr).unsqueeze(0) return image class Kandinsky3Img2ImgPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): model_cpu_offload_seq = 'text_encoder->movq->unet->movq' _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'negative_attention_mask', 'attention_mask'] def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, unet: Kandinsky3UNet, scheduler: DDPMScheduler, movq: VQModel): super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, movq=movq) def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return (timesteps, num_inference_steps - t_start) def _process_embeds(self, embeddings, attention_mask, cut_context): if cut_context: embeddings[attention_mask == 0] = torch.zeros_like(embeddings[attention_mask == 0]) max_seq_length = attention_mask.sum(-1).max() + 1 embeddings = embeddings[:, :max_seq_length] attention_mask = attention_mask[:, :max_seq_length] return (embeddings, attention_mask) @torch.no_grad() def encode_prompt(self, prompt, do_classifier_free_guidance=True, num_images_per_prompt=1, device=None, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, _cut_context=False, attention_mask: Optional[torch.Tensor]=None, negative_attention_mask: Optional[torch.Tensor]=None): if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = 128 if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids.to(device) attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids, attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] (prompt_embeds, attention_mask) = self._process_embeds(prompt_embeds, attention_mask, _cut_context) prompt_embeds = prompt_embeds * attention_mask.unsqueeze(2) if self.text_encoder is not None: dtype = self.text_encoder.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) attention_mask = attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if negative_prompt is not None: uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=128, truncation=True, return_attention_mask=True, return_tensors='pt') text_input_ids = uncond_input.input_ids.to(device) negative_attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder(text_input_ids, attention_mask=negative_attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds[:, :prompt_embeds.shape[1]] negative_attention_mask = negative_attention_mask[:, :prompt_embeds.shape[1]] negative_prompt_embeds = negative_prompt_embeds * negative_attention_mask.unsqueeze(2) else: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_attention_mask = torch.zeros_like(attention_mask) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) if negative_prompt_embeds.shape != prompt_embeds.shape: negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_attention_mask = negative_attention_mask.repeat(num_images_per_prompt, 1) else: negative_prompt_embeds = None negative_attention_mask = None return (prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): init_latents = [self.movq.encode(image[i:i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = self.movq.encode(image).latent_dist.sample(generator) init_latents = self.movq.config.scaling_factor * init_latents init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, attention_mask=None, negative_attention_mask=None): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if negative_prompt_embeds is not None and negative_attention_mask is None: raise ValueError('Please provide `negative_attention_mask` along with `negative_prompt_embeds`') if negative_prompt_embeds is not None and negative_attention_mask is not None: if negative_prompt_embeds.shape[:2] != negative_attention_mask.shape: raise ValueError(f'`negative_prompt_embeds` and `negative_attention_mask` must have the same batch_size and token length when passed directly, but got: `negative_prompt_embeds` {negative_prompt_embeds.shape[:2]} != `negative_attention_mask` {negative_attention_mask.shape}.') if prompt_embeds is not None and attention_mask is None: raise ValueError('Please provide `attention_mask` along with `prompt_embeds`') if prompt_embeds is not None and attention_mask is not None: if prompt_embeds.shape[:2] != attention_mask.shape: raise ValueError(f'`prompt_embeds` and `attention_mask` must have the same batch_size and token length when passed directly, but got: `prompt_embeds` {prompt_embeds.shape[:2]} != `attention_mask` {attention_mask.shape}.') @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]]=None, strength: float=0.3, num_inference_steps: int=25, guidance_scale: float=3.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, negative_attention_mask: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') cut_context = True self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, attention_mask, negative_attention_mask) self._guidance_scale = guidance_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device (prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask) = self.encode_prompt(prompt, self.do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, device=device, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, _cut_context=cut_context, attention_mask=attention_mask, negative_attention_mask=negative_attention_mask) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) attention_mask = torch.cat([negative_attention_mask, attention_mask]).bool() if not isinstance(image, list): image = [image] if not all((isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image)): raise ValueError(f'Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor') image = torch.cat([prepare_image(i) for i in image], dim=0) image = image.to(dtype=prompt_embeds.dtype, device=device) self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latents = self.movq.encode(image)['latents'] latents = latents.repeat_interleave(num_images_per_prompt, dim=0) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) latents = self.prepare_latents(latents, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator) if hasattr(self, 'text_encoder_offload_hook') and self.text_encoder_offload_hook is not None: self.text_encoder_offload_hook.offload() num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, encoder_attention_mask=attention_mask)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = (guidance_scale + 1.0) * noise_pred_text - guidance_scale * noise_pred_uncond latents = self.scheduler.step(noise_pred, t, latents, generator=generator).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) attention_mask = callback_outputs.pop('attention_mask', attention_mask) negative_attention_mask = callback_outputs.pop('negative_attention_mask', negative_attention_mask) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if output_type not in ['pt', 'np', 'pil', 'latent']: raise ValueError(f'Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}') if not output_type == 'latent': image = self.movq.decode(latents, force_not_quantize=True)['sample'] if output_type in ['np', 'pil']: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == 'pil': image = self.numpy_to_pil(image) else: image = latents self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/kolors/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_sentencepiece_available, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()) and is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_and_sentencepiece_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_sentencepiece_objects)) else: _import_structure['pipeline_kolors'] = ['KolorsPipeline'] _import_structure['pipeline_kolors_img2img'] = ['KolorsImg2ImgPipeline'] _import_structure['text_encoder'] = ['ChatGLMModel'] _import_structure['tokenizer'] = ['ChatGLMTokenizer'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()) and is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_sentencepiece_objects import * else: from .pipeline_kolors import KolorsPipeline from .pipeline_kolors_img2img import KolorsImg2ImgPipeline from .text_encoder import ChatGLMModel from .tokenizer import ChatGLMTokenizer else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/kolors/pipeline_kolors.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import IPAdapterMixin, StableDiffusionXLLoraLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor from ...schedulers import KarrasDiffusionSchedulers from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import KolorsPipelineOutput from .text_encoder import ChatGLMModel from .tokenizer import ChatGLMTokenizer if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import KolorsPipeline\n\n >>> pipe = KolorsPipeline.from_pretrained(\n ... "Kwai-Kolors/Kolors-diffusers", variant="fp16", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = (\n ... "A photo of a ladybug, macro, zoom, high quality, film, holding a wooden sign with the text \'KOLORS\'"\n ... )\n >>> image = pipe(prompt).images[0]\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class KolorsPipeline(DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['image_encoder', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'negative_add_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: ChatGLMModel, tokenizer: ChatGLMTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection=None, feature_extractor: CLIPImageProcessor=None, force_zeros_for_empty_prompt: bool=False): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, 'vae') and self.vae is not None else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = self.unet.config.sample_size def encode_prompt(self, prompt, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, max_sequence_length: int=256): device = device or self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer] text_encoders = [self.text_encoder] if prompt_embeds is None: prompt_embeds_list = [] for (tokenizer, text_encoder) in zip(tokenizers, text_encoders): text_inputs = tokenizer(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, return_tensors='pt').to(device) output = text_encoder(input_ids=text_inputs['input_ids'], attention_mask=text_inputs['attention_mask'], position_ids=text_inputs['position_ids'], output_hidden_states=True) prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_embeds_list.append(prompt_embeds) prompt_embeds = prompt_embeds_list[0] zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt negative_prompt_embeds_list = [] for (tokenizer, text_encoder) in zip(tokenizers, text_encoders): uncond_input = tokenizer(uncond_tokens, padding='max_length', max_length=max_sequence_length, truncation=True, return_tensors='pt').to(device) output = text_encoder(input_ids=uncond_input['input_ids'], attention_mask=uncond_input['attention_mask'], position_ids=uncond_input['position_ids'], output_hidden_states=True) negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = negative_prompt_embeds_list[0] bs_embed = pooled_prompt_embeds.shape[0] pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, num_inference_steps, height, width, negative_prompt=None, prompt_embeds=None, pooled_prompt_embeds=None, negative_prompt_embeds=None, negative_pooled_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None): if not isinstance(num_inference_steps, int) or num_inference_steps <= 0: raise ValueError(f'`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type {type(num_inference_steps)}.') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') if max_sequence_length is not None and max_sequence_length > 256: raise ValueError(f'`max_sequence_length` cannot be greater than 256 but is {max_sequence_length}') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, original_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Optional[Tuple[int, int]]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=256): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) self.check_inputs(prompt, num_inference_steps, height, width, negative_prompt, prompt_embeds, pooled_prompt_embeds, negative_prompt_embeds, negative_pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length) self._guidance_scale = guidance_scale self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) add_text_embeds = pooled_prompt_embeds text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) add_time_ids = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids(negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if self.denoising_end is not None and isinstance(self.denoising_end, float) and (self.denoising_end > 0) and (self.denoising_end < 1): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - self.denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) negative_add_time_ids = callback_outputs.pop('negative_add_time_ids', negative_add_time_ids) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) elif latents.dtype != self.vae.dtype: if torch.backends.mps.is_available(): self.vae = self.vae.to(latents.dtype) latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == 'latent': image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return KolorsPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/kolors/pipeline_kolors_img2img.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import IPAdapterMixin, StableDiffusionXLLoraLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor from ...schedulers import KarrasDiffusionSchedulers from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import KolorsPipelineOutput from .text_encoder import ChatGLMModel from .tokenizer import ChatGLMTokenizer if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import KolorsImg2ImgPipeline\n >>> from diffusers.utils import load_image\n\n >>> pipe = KolorsImg2ImgPipeline.from_pretrained(\n ... "Kwai-Kolors/Kolors-diffusers", variant="fp16", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n >>> url = (\n ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/kolors/bunny_source.png"\n ... )\n\n\n >>> init_image = load_image(url)\n >>> prompt = "high quality image of a capybara wearing sunglasses. In the background of the image there are trees, poles, grass and other objects. At the bottom of the object there is the road., 8k, highly detailed."\n >>> image = pipe(prompt, image=init_image).images[0]\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class KolorsImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin): model_cpu_offload_seq = 'text_encoder->image_encoder-unet->vae' _optional_components = ['image_encoder', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'negative_add_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: ChatGLMModel, tokenizer: ChatGLMTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection=None, feature_extractor: CLIPImageProcessor=None, force_zeros_for_empty_prompt: bool=False): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, 'vae') and self.vae is not None else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = self.unet.config.sample_size def encode_prompt(self, prompt, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, max_sequence_length: int=256): device = device or self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer] text_encoders = [self.text_encoder] if prompt_embeds is None: prompt_embeds_list = [] for (tokenizer, text_encoder) in zip(tokenizers, text_encoders): text_inputs = tokenizer(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, return_tensors='pt').to(device) output = text_encoder(input_ids=text_inputs['input_ids'], attention_mask=text_inputs['attention_mask'], position_ids=text_inputs['position_ids'], output_hidden_states=True) prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_embeds_list.append(prompt_embeds) prompt_embeds = prompt_embeds_list[0] zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt negative_prompt_embeds_list = [] for (tokenizer, text_encoder) in zip(tokenizers, text_encoders): uncond_input = tokenizer(uncond_tokens, padding='max_length', max_length=max_sequence_length, truncation=True, return_tensors='pt').to(device) output = text_encoder(input_ids=uncond_input['input_ids'], attention_mask=uncond_input['attention_mask'], position_ids=uncond_input['position_ids'], output_hidden_states=True) negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = negative_prompt_embeds_list[0] bs_embed = pooled_prompt_embeds.shape[0] pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, strength, num_inference_steps, height, width, negative_prompt=None, prompt_embeds=None, pooled_prompt_embeds=None, negative_prompt_embeds=None, negative_pooled_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if not isinstance(num_inference_steps, int) or num_inference_steps <= 0: raise ValueError(f'`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type {type(num_inference_steps)}.') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') if max_sequence_length is not None and max_sequence_length > 256: raise ValueError(f'`max_sequence_length` cannot be greater than 256 but is {max_sequence_length}') def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) else: discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - denoising_start * self.scheduler.config.num_train_timesteps)) num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: num_inference_steps = num_inference_steps + 1 t_start = len(self.scheduler.timesteps) - num_inference_steps timesteps = self.scheduler.timesteps[t_start:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start) return (timesteps, num_inference_steps) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') latents_mean = latents_std = None if hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None: latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.text_encoder_2.to('cpu') torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} ') init_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) if latents_mean is not None and latents_std is not None: latents_mean = latents_mean.to(device=device, dtype=dtype) latents_std = latents_std.to(device=device, dtype=dtype) init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std else: init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_start(self): return self._denoising_start @property def denoising_end(self): return self._denoising_end @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, strength: float=0.3, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, denoising_start: Optional[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, original_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Optional[Tuple[int, int]]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=256): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) self.check_inputs(prompt, strength, num_inference_steps, height, width, negative_prompt, prompt_embeds, pooled_prompt_embeds, negative_prompt_embeds, negative_pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length) self._guidance_scale = guidance_scale self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) image = self.image_processor.preprocess(image) def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) add_noise = True if self.denoising_start is None else False if latents is None: latents = self.prepare_latents(image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, add_noise) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) (height, width) = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) add_text_embeds = pooled_prompt_embeds text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) add_time_ids = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids(negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if self.denoising_end is not None and self.denoising_start is not None and denoising_value_valid(self.denoising_end) and denoising_value_valid(self.denoising_start) and (self.denoising_start >= self.denoising_end): raise ValueError(f'`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: ' + f' {self.denoising_end} when using type float.') elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - self.denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) negative_add_time_ids = callback_outputs.pop('negative_add_time_ids', negative_add_time_ids) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) elif latents.dtype != self.vae.dtype: if torch.backends.mps.is_available(): self.vae = self.vae.to(latents.dtype) latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == 'latent': image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return KolorsPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/kolors/pipeline_output.py from dataclasses import dataclass from typing import List, Union import numpy as np import PIL.Image from ...utils import BaseOutput @dataclass class KolorsPipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] # File: diffusers-main/src/diffusers/pipelines/kolors/text_encoder.py import math from typing import List, Optional, Tuple import torch import torch.nn.functional as F from torch import nn from torch.nn import LayerNorm from torch.nn.utils import skip_init from transformers import PretrainedConfig, PreTrainedModel from transformers.modeling_outputs import BaseModelOutputWithPast from ...utils import logging logger = logging.get_logger(__name__) class ChatGLMConfig(PretrainedConfig): model_type = 'chatglm' def __init__(self, num_layers=28, padded_vocab_size=65024, hidden_size=4096, ffn_hidden_size=13696, kv_channels=128, num_attention_heads=32, seq_length=2048, hidden_dropout=0.0, classifier_dropout=None, attention_dropout=0.0, layernorm_epsilon=1e-05, rmsnorm=True, apply_residual_connection_post_layernorm=False, post_layer_norm=True, add_bias_linear=False, add_qkv_bias=False, bias_dropout_fusion=True, multi_query_attention=False, multi_query_group_num=1, apply_query_key_layer_scaling=True, attention_softmax_in_fp32=True, fp32_residual_connection=False, quantization_bit=0, pre_seq_len=None, prefix_projection=False, **kwargs): self.num_layers = num_layers self.vocab_size = padded_vocab_size self.padded_vocab_size = padded_vocab_size self.hidden_size = hidden_size self.ffn_hidden_size = ffn_hidden_size self.kv_channels = kv_channels self.num_attention_heads = num_attention_heads self.seq_length = seq_length self.hidden_dropout = hidden_dropout self.classifier_dropout = classifier_dropout self.attention_dropout = attention_dropout self.layernorm_epsilon = layernorm_epsilon self.rmsnorm = rmsnorm self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm self.post_layer_norm = post_layer_norm self.add_bias_linear = add_bias_linear self.add_qkv_bias = add_qkv_bias self.bias_dropout_fusion = bias_dropout_fusion self.multi_query_attention = multi_query_attention self.multi_query_group_num = multi_query_group_num self.apply_query_key_layer_scaling = apply_query_key_layer_scaling self.attention_softmax_in_fp32 = attention_softmax_in_fp32 self.fp32_residual_connection = fp32_residual_connection self.quantization_bit = quantization_bit self.pre_seq_len = pre_seq_len self.prefix_projection = prefix_projection super().__init__(**kwargs) class RMSNorm(torch.nn.Module): def __init__(self, normalized_shape, eps=1e-05, device=None, dtype=None, **kwargs): super().__init__() self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype)) self.eps = eps def forward(self, hidden_states: torch.Tensor): input_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.eps) return (self.weight * hidden_states).to(input_dtype) def _config_to_kwargs(args): common_kwargs = {'dtype': args.torch_dtype} return common_kwargs class CoreAttention(torch.nn.Module): def __init__(self, config: ChatGLMConfig, layer_number): super(CoreAttention, self).__init__() self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32 if self.apply_query_key_layer_scaling: self.attention_softmax_in_fp32 = True self.layer_number = max(1, layer_number) projection_size = config.kv_channels * config.num_attention_heads self.hidden_size_per_partition = projection_size self.hidden_size_per_attention_head = projection_size // config.num_attention_heads self.num_attention_heads_per_partition = config.num_attention_heads coeff = None self.norm_factor = math.sqrt(self.hidden_size_per_attention_head) if self.apply_query_key_layer_scaling: coeff = self.layer_number self.norm_factor *= coeff self.coeff = coeff self.attention_dropout = torch.nn.Dropout(config.attention_dropout) def forward(self, query_layer, key_layer, value_layer, attention_mask): pytorch_major_version = int(torch.__version__.split('.')[0]) if pytorch_major_version >= 2: (query_layer, key_layer, value_layer) = [k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer]] if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]: context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, is_causal=True) else: if attention_mask is not None: attention_mask = ~attention_mask context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attention_mask) context_layer = context_layer.permute(2, 0, 1, 3) new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) context_layer = context_layer.reshape(*new_context_layer_shape) else: output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0)) query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1) key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1) matmul_input_buffer = torch.empty(output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype, device=query_layer.device) matmul_result = torch.baddbmm(matmul_input_buffer, query_layer.transpose(0, 1), key_layer.transpose(0, 1).transpose(1, 2), beta=0.0, alpha=1.0 / self.norm_factor) attention_scores = matmul_result.view(*output_size) if self.attention_softmax_in_fp32: attention_scores = attention_scores.float() if self.coeff is not None: attention_scores = attention_scores * self.coeff if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]: attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3], device=attention_scores.device, dtype=torch.bool) attention_mask.tril_() attention_mask = ~attention_mask if attention_mask is not None: attention_scores = attention_scores.masked_fill(attention_mask, float('-inf')) attention_probs = F.softmax(attention_scores, dim=-1) attention_probs = attention_probs.type_as(value_layer) attention_probs = self.attention_dropout(attention_probs) output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3)) value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1) attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) context_layer = context_layer.view(*output_size) context_layer = context_layer.permute(2, 0, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer def split_tensor_along_last_dim(tensor: torch.Tensor, num_partitions: int, contiguous_split_chunks: bool=False) -> List[torch.Tensor]: last_dim = tensor.dim() - 1 last_dim_size = tensor.size()[last_dim] // num_partitions tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) if contiguous_split_chunks: return tuple((chunk.contiguous() for chunk in tensor_list)) return tensor_list @torch.jit.script def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor: (sq, _b, np, _hn) = (x.size(0), x.size(1), x.size(2), x.size(3)) rot_dim = rope_cache.shape[-2] * 2 (x, x_pass) = (x[..., :rot_dim], x[..., rot_dim:]) rope_cache = rope_cache[:sq] xshaped = x.reshape(sq, -1, np, rot_dim // 2, 2) rope_cache = rope_cache.view(sq, -1, 1, xshaped.size(3), 2) x_out2 = torch.stack([xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1], xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1]], -1) x_out2 = x_out2.flatten(3) return torch.cat((x_out2, x_pass), dim=-1) class SelfAttention(torch.nn.Module): def __init__(self, config: ChatGLMConfig, layer_number, device=None): super(SelfAttention, self).__init__() self.layer_number = max(1, layer_number) self.projection_size = config.kv_channels * config.num_attention_heads self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads self.num_attention_heads_per_partition = config.num_attention_heads self.multi_query_attention = config.multi_query_attention self.qkv_hidden_size = 3 * self.projection_size if self.multi_query_attention: self.num_multi_query_groups_per_partition = config.multi_query_group_num self.qkv_hidden_size = self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size, bias=config.add_bias_linear or config.add_qkv_bias, device=device, **_config_to_kwargs(config)) self.core_attention = CoreAttention(config, self.layer_number) self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear, device=device, **_config_to_kwargs(config)) def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None): if self.multi_query_attention: num_attention_heads = self.num_multi_query_groups_per_partition else: num_attention_heads = self.num_attention_heads_per_partition return torch.empty(inference_max_sequence_len, batch_size, num_attention_heads, self.hidden_size_per_attention_head, dtype=dtype, device=device) def forward(self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True): mixed_x_layer = self.query_key_value(hidden_states) if self.multi_query_attention: (query_layer, key_layer, value_layer) = mixed_x_layer.split([self.num_attention_heads_per_partition * self.hidden_size_per_attention_head, self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head], dim=-1) query_layer = query_layer.view(query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)) key_layer = key_layer.view(key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)) value_layer = value_layer.view(value_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)) else: new_tensor_shape = mixed_x_layer.size()[:-1] + (self.num_attention_heads_per_partition, 3 * self.hidden_size_per_attention_head) mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3) if rotary_pos_emb is not None: query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb) key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb) if kv_cache is not None: (cache_k, cache_v) = kv_cache key_layer = torch.cat((cache_k, key_layer), dim=0) value_layer = torch.cat((cache_v, value_layer), dim=0) if use_cache: kv_cache = (key_layer, value_layer) else: kv_cache = None if self.multi_query_attention: key_layer = key_layer.unsqueeze(-2) key_layer = key_layer.expand(-1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1) key_layer = key_layer.contiguous().view(key_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)) value_layer = value_layer.unsqueeze(-2) value_layer = value_layer.expand(-1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1) value_layer = value_layer.contiguous().view(value_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)) context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask) output = self.dense(context_layer) return (output, kv_cache) class MLP(torch.nn.Module): def __init__(self, config: ChatGLMConfig, device=None): super(MLP, self).__init__() self.add_bias = config.add_bias_linear self.dense_h_to_4h = nn.Linear(config.hidden_size, config.ffn_hidden_size * 2, bias=self.add_bias, device=device, **_config_to_kwargs(config)) def swiglu(x): x = torch.chunk(x, 2, dim=-1) return F.silu(x[0]) * x[1] self.activation_func = swiglu self.dense_4h_to_h = nn.Linear(config.ffn_hidden_size, config.hidden_size, bias=self.add_bias, device=device, **_config_to_kwargs(config)) def forward(self, hidden_states): intermediate_parallel = self.dense_h_to_4h(hidden_states) intermediate_parallel = self.activation_func(intermediate_parallel) output = self.dense_4h_to_h(intermediate_parallel) return output class GLMBlock(torch.nn.Module): def __init__(self, config: ChatGLMConfig, layer_number, device=None): super(GLMBlock, self).__init__() self.layer_number = layer_number self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm self.fp32_residual_connection = config.fp32_residual_connection LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, dtype=config.torch_dtype) self.self_attention = SelfAttention(config, layer_number, device=device) self.hidden_dropout = config.hidden_dropout self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, dtype=config.torch_dtype) self.mlp = MLP(config, device=device) def forward(self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True): layernorm_output = self.input_layernorm(hidden_states) (attention_output, kv_cache) = self.self_attention(layernorm_output, attention_mask, rotary_pos_emb, kv_cache=kv_cache, use_cache=use_cache) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training) layernorm_input = residual + layernorm_input layernorm_output = self.post_attention_layernorm(layernorm_input) mlp_output = self.mlp(layernorm_output) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = layernorm_input output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training) output = residual + output return (output, kv_cache) class GLMTransformer(torch.nn.Module): def __init__(self, config: ChatGLMConfig, device=None): super(GLMTransformer, self).__init__() self.fp32_residual_connection = config.fp32_residual_connection self.post_layer_norm = config.post_layer_norm self.num_layers = config.num_layers def build_layer(layer_number): return GLMBlock(config, layer_number, device=device) self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)]) if self.post_layer_norm: LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, dtype=config.torch_dtype) self.gradient_checkpointing = False def _get_layer(self, layer_number): return self.layers[layer_number] def forward(self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None, use_cache: Optional[bool]=True, output_hidden_states: Optional[bool]=False): if not kv_caches: kv_caches = [None for _ in range(self.num_layers)] presents = () if use_cache else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False all_self_attentions = None all_hidden_states = () if output_hidden_states else None for index in range(self.num_layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer = self._get_layer(index) if self.gradient_checkpointing and self.training: layer_ret = torch.utils.checkpoint.checkpoint(layer, hidden_states, attention_mask, rotary_pos_emb, kv_caches[index], use_cache) else: layer_ret = layer(hidden_states, attention_mask, rotary_pos_emb, kv_cache=kv_caches[index], use_cache=use_cache) (hidden_states, kv_cache) = layer_ret if use_cache: presents = presents + (kv_cache,) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.post_layer_norm: hidden_states = self.final_layernorm(hidden_states) return (hidden_states, presents, all_hidden_states, all_self_attentions) class ChatGLMPreTrainedModel(PreTrainedModel): is_parallelizable = False supports_gradient_checkpointing = True config_class = ChatGLMConfig base_model_prefix = 'transformer' _no_split_modules = ['GLMBlock'] def _init_weights(self, module: nn.Module): return def get_masks(self, input_ids, past_key_values, padding_mask=None): (batch_size, seq_length) = input_ids.shape full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device) full_attention_mask.tril_() past_length = 0 if past_key_values: past_length = past_key_values[0][0].shape[0] if past_length: full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length, device=input_ids.device), full_attention_mask), dim=-1) if padding_mask is not None: full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1) if not past_length and padding_mask is not None: full_attention_mask -= padding_mask.unsqueeze(-1) - 1 full_attention_mask = (full_attention_mask < 0.5).bool() full_attention_mask.unsqueeze_(1) return full_attention_mask def get_position_ids(self, input_ids, device): (batch_size, seq_length) = input_ids.shape position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) return position_ids def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, GLMTransformer): module.gradient_checkpointing = value def default_init(cls, *args, **kwargs): return cls(*args, **kwargs) class Embedding(torch.nn.Module): def __init__(self, config: ChatGLMConfig, device=None): super(Embedding, self).__init__() self.hidden_size = config.hidden_size self.word_embeddings = nn.Embedding(config.padded_vocab_size, self.hidden_size, dtype=config.torch_dtype, device=device) self.fp32_residual_connection = config.fp32_residual_connection def forward(self, input_ids): words_embeddings = self.word_embeddings(input_ids) embeddings = words_embeddings embeddings = embeddings.transpose(0, 1).contiguous() if self.fp32_residual_connection: embeddings = embeddings.float() return embeddings class RotaryEmbedding(nn.Module): def __init__(self, dim, original_impl=False, device=None, dtype=None): super().__init__() inv_freq = 1.0 / 10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim) self.register_buffer('inv_freq', inv_freq) self.dim = dim self.original_impl = original_impl def forward_impl(self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int=10000): theta = 1.0 / base ** (torch.arange(0, n_elem, 2, dtype=torch.float, device=device) / n_elem) seq_idx = torch.arange(seq_len, dtype=torch.float, device=device) idx_theta = torch.outer(seq_idx, theta).float() cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1) if dtype in (torch.float16, torch.bfloat16, torch.int8): cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half() return cache def forward(self, max_seq_len, offset=0): return self.forward_impl(max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device) class PrefixEncoder(torch.nn.Module): def __init__(self, config: ChatGLMConfig): super().__init__() self.prefix_projection = config.prefix_projection if self.prefix_projection: kv_size = config.num_layers * config.kv_channels * config.multi_query_group_num * 2 self.embedding = torch.nn.Embedding(config.pre_seq_len, kv_size) self.trans = torch.nn.Sequential(torch.nn.Linear(kv_size, config.hidden_size), torch.nn.Tanh(), torch.nn.Linear(config.hidden_size, kv_size)) else: self.embedding = torch.nn.Embedding(config.pre_seq_len, config.num_layers * config.kv_channels * config.multi_query_group_num * 2) def forward(self, prefix: torch.Tensor): if self.prefix_projection: prefix_tokens = self.embedding(prefix) past_key_values = self.trans(prefix_tokens) else: past_key_values = self.embedding(prefix) return past_key_values class ChatGLMModel(ChatGLMPreTrainedModel): def __init__(self, config: ChatGLMConfig, device=None, empty_init=True): super().__init__(config) if empty_init: init_method = skip_init else: init_method = default_init init_kwargs = {} if device is not None: init_kwargs['device'] = device self.embedding = init_method(Embedding, config, **init_kwargs) self.num_layers = config.num_layers self.multi_query_group_num = config.multi_query_group_num self.kv_channels = config.kv_channels self.seq_length = config.seq_length rotary_dim = config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, original_impl=config.original_rope, device=device, dtype=config.torch_dtype) self.encoder = init_method(GLMTransformer, config, **init_kwargs) self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False, dtype=config.torch_dtype, **init_kwargs) self.pre_seq_len = config.pre_seq_len self.prefix_projection = config.prefix_projection if self.pre_seq_len is not None: for param in self.parameters(): param.requires_grad = False self.prefix_tokens = torch.arange(self.pre_seq_len).long() self.prefix_encoder = PrefixEncoder(config) self.dropout = torch.nn.Dropout(0.1) def get_input_embeddings(self): return self.embedding.word_embeddings def get_prompt(self, batch_size, device, dtype=torch.half): prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device) past_key_values = self.prefix_encoder(prefix_tokens).type(dtype) past_key_values = past_key_values.view(batch_size, self.pre_seq_len, self.num_layers * 2, self.multi_query_group_num, self.kv_channels) past_key_values = self.dropout(past_key_values) past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2) return past_key_values def forward(self, input_ids, position_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.BoolTensor]=None, full_attention_mask: Optional[torch.BoolTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]=None, inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict (batch_size, seq_length) = input_ids.shape if inputs_embeds is None: inputs_embeds = self.embedding(input_ids) if self.pre_seq_len is not None: if past_key_values is None: past_key_values = self.get_prompt(batch_size=batch_size, device=input_ids.device, dtype=inputs_embeds.dtype) if attention_mask is not None: attention_mask = torch.cat([attention_mask.new_ones((batch_size, self.pre_seq_len)), attention_mask], dim=-1) if full_attention_mask is None: if attention_mask is not None and (not attention_mask.all()) or (past_key_values and seq_length != 1): full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask) rotary_pos_emb = self.rotary_pos_emb(self.seq_length) if position_ids is not None: rotary_pos_emb = rotary_pos_emb[position_ids] else: rotary_pos_emb = rotary_pos_emb[None, :seq_length] rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous() (hidden_states, presents, all_hidden_states, all_self_attentions) = self.encoder(inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb, kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states) if not return_dict: return tuple((v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions) # File: diffusers-main/src/diffusers/pipelines/kolors/tokenizer.py import json import os import re from typing import Dict, List, Optional, Union from sentencepiece import SentencePieceProcessor from transformers import PreTrainedTokenizer from transformers.tokenization_utils_base import BatchEncoding, EncodedInput from transformers.utils import PaddingStrategy class SPTokenizer: def __init__(self, model_path: str): assert os.path.isfile(model_path), model_path self.sp_model = SentencePieceProcessor(model_file=model_path) self.n_words: int = self.sp_model.vocab_size() self.bos_id: int = self.sp_model.bos_id() self.eos_id: int = self.sp_model.eos_id() self.pad_id: int = self.sp_model.unk_id() assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() role_special_tokens = ['<|system|>', '<|user|>', '<|assistant|>', '<|observation|>'] special_tokens = ['[MASK]', '[gMASK]', '[sMASK]', 'sop', 'eop'] + role_special_tokens self.special_tokens = {} self.index_special_tokens = {} for token in special_tokens: self.special_tokens[token] = self.n_words self.index_special_tokens[self.n_words] = token self.n_words += 1 self.role_special_token_expression = '|'.join([re.escape(token) for token in role_special_tokens]) def tokenize(self, s: str, encode_special_tokens=False): if encode_special_tokens: last_index = 0 t = [] for match in re.finditer(self.role_special_token_expression, s): if last_index < match.start(): t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()])) t.append(s[match.start():match.end()]) last_index = match.end() if last_index < len(s): t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) return t else: return self.sp_model.EncodeAsPieces(s) def encode(self, s: str, bos: bool=False, eos: bool=False) -> List[int]: assert isinstance(s, str) t = self.sp_model.encode(s) if bos: t = [self.bos_id] + t if eos: t = t + [self.eos_id] return t def decode(self, t: List[int]) -> str: (text, buffer) = ('', []) for token in t: if token in self.index_special_tokens: if buffer: text += self.sp_model.decode(buffer) buffer = [] text += self.index_special_tokens[token] else: buffer.append(token) if buffer: text += self.sp_model.decode(buffer) return text def decode_tokens(self, tokens: List[str]) -> str: text = self.sp_model.DecodePieces(tokens) return text def convert_token_to_id(self, token): if token in self.special_tokens: return self.special_tokens[token] return self.sp_model.PieceToId(token) def convert_id_to_token(self, index): if index in self.index_special_tokens: return self.index_special_tokens[index] if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0: return '' return self.sp_model.IdToPiece(index) class ChatGLMTokenizer(PreTrainedTokenizer): vocab_files_names = {'vocab_file': 'tokenizer.model'} model_input_names = ['input_ids', 'attention_mask', 'position_ids'] def __init__(self, vocab_file, padding_side='left', clean_up_tokenization_spaces=False, encode_special_tokens=False, **kwargs): self.name = 'GLMTokenizer' self.vocab_file = vocab_file self.tokenizer = SPTokenizer(vocab_file) self.special_tokens = {'': self.tokenizer.bos_id, '': self.tokenizer.eos_id, '': self.tokenizer.pad_id} self.encode_special_tokens = encode_special_tokens super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, encode_special_tokens=encode_special_tokens, **kwargs) def get_command(self, token): if token in self.special_tokens: return self.special_tokens[token] assert token in self.tokenizer.special_tokens, f'{token} is not a special token for {self.name}' return self.tokenizer.special_tokens[token] @property def unk_token(self) -> str: return '' @unk_token.setter def unk_token(self, value: str): self._unk_token = value @property def pad_token(self) -> str: return '' @pad_token.setter def pad_token(self, value: str): self._pad_token = value @property def pad_token_id(self): return self.get_command('') @property def eos_token(self) -> str: return '' @eos_token.setter def eos_token(self, value: str): self._eos_token = value @property def eos_token_id(self): return self.get_command('') @property def vocab_size(self): return self.tokenizer.n_words def get_vocab(self): vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text, **kwargs): return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) def _convert_token_to_id(self, token): return self.tokenizer.convert_token_to_id(token) def _convert_id_to_token(self, index): return self.tokenizer.convert_id_to_token(index) def convert_tokens_to_string(self, tokens: List[str]) -> str: return self.tokenizer.decode_tokens(tokens) def save_vocabulary(self, save_directory, filename_prefix=None): if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, self.vocab_files_names['vocab_file']) else: vocab_file = save_directory with open(self.vocab_file, 'rb') as fin: proto_str = fin.read() with open(vocab_file, 'wb') as writer: writer.write(proto_str) return (vocab_file,) def get_prefix_tokens(self): prefix_tokens = [self.get_command('[gMASK]'), self.get_command('sop')] return prefix_tokens def build_single_message(self, role, metadata, message): assert role in ['system', 'user', 'assistant', 'observation'], role role_tokens = [self.get_command(f'<|{role}|>')] + self.tokenizer.encode(f'{metadata}\n') message_tokens = self.tokenizer.encode(message) tokens = role_tokens + message_tokens return tokens def build_chat_input(self, query, history=None, role='user'): if history is None: history = [] input_ids = [] for item in history: content = item['content'] if item['role'] == 'system' and 'tools' in item: content = content + '\n' + json.dumps(item['tools'], indent=4, ensure_ascii=False) input_ids.extend(self.build_single_message(item['role'], item.get('metadata', ''), content)) input_ids.extend(self.build_single_message(role, '', query)) input_ids.extend([self.get_command('<|assistant|>')]) return self.batch_encode_plus([input_ids], return_tensors='pt', is_split_into_words=True) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: prefix_tokens = self.get_prefix_tokens() token_ids_0 = prefix_tokens + token_ids_0 if token_ids_1 is not None: token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command('')] return token_ids_0 def _pad(self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None) -> dict: assert self.padding_side == 'left' required_input = encoded_inputs[self.model_input_names[0]] seq_length = len(required_input) if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length if 'attention_mask' not in encoded_inputs: encoded_inputs['attention_mask'] = [1] * seq_length if 'position_ids' not in encoded_inputs: encoded_inputs['position_ids'] = list(range(seq_length)) if needs_to_be_padded: difference = max_length - len(required_input) if 'attention_mask' in encoded_inputs: encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask'] if 'position_ids' in encoded_inputs: encoded_inputs['position_ids'] = [0] * difference + encoded_inputs['position_ids'] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input return encoded_inputs # File: diffusers-main/src/diffusers/pipelines/latent_consistency_models/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_latent_consistency_img2img'] = ['LatentConsistencyModelImg2ImgPipeline'] _import_structure['pipeline_latent_consistency_text2img'] = ['LatentConsistencyModelPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_latent_consistency_img2img import LatentConsistencyModelImg2ImgPipeline from .pipeline_latent_consistency_text2img import LatentConsistencyModelPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import LCMScheduler from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker logger = logging.get_logger(__name__) def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import AutoPipelineForImage2Image\n >>> import torch\n >>> import PIL\n\n >>> pipe = AutoPipelineForImage2Image.from_pretrained("SimianLuo/LCM_Dreamshaper_v7")\n >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality.\n >>> pipe.to(torch_device="cuda", torch_dtype=torch.float32)\n\n >>> prompt = "High altitude snowy mountains"\n >>> image = PIL.Image.open("./snowy_mountains.png")\n\n >>> # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps.\n >>> num_inference_steps = 4\n >>> images = pipe(\n ... prompt=prompt, image=image, num_inference_steps=num_inference_steps, guidance_scale=8.0\n ... ).images\n\n >>> images[0].save("image.png")\n ```\n\n' class LatentConsistencyModelImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'denoised', 'prompt_embeds', 'w_embedding'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: LCMScheduler, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: Optional[CLIPVisionModelWithProjection]=None, requires_safety_checker: bool=True): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} ') init_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: deprecation_message = f'You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def check_inputs(self, prompt: Union[str, List[str]], strength: float, callback_steps: int, prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') @property def guidance_scale(self): return self._guidance_scale @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return False @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, num_inference_steps: int=4, strength: float=0.8, original_inference_steps: int=None, timesteps: List[int]=None, guidance_scale: float=8.5, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') self.check_inputs(prompt, strength, callback_steps, prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, _) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt=None, prompt_embeds=prompt_embeds, negative_prompt_embeds=None, lora_scale=lora_scale, clip_skip=self.clip_skip) image = self.image_processor.preprocess(image) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, original_inference_steps=original_inference_steps, strength=strength) original_inference_steps = original_inference_steps if original_inference_steps is not None else self.scheduler.config.original_inference_steps latent_timestep = timesteps[:1] if latents is None: latents = self.prepare_latents(image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator) bs = batch_size * num_images_per_prompt w = torch.tensor(self.guidance_scale - 1).repeat(bs) w_embedding = self.get_guidance_scale_embedding(w, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, None) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latents = latents.to(prompt_embeds.dtype) model_pred = self.unet(latents, t, timestep_cond=w_embedding, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] (latents, denoised) = self.scheduler.step(model_pred, t, latents, **extra_step_kwargs, return_dict=False) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) w_embedding = callback_outputs.pop('w_embedding', w_embedding) denoised = callback_outputs.pop('denoised', denoised) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) denoised = denoised.to(prompt_embeds.dtype) if not output_type == 'latent': image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = denoised has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import LCMScheduler from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import DiffusionPipeline\n >>> import torch\n\n >>> pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7")\n >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality.\n >>> pipe.to(torch_device="cuda", torch_dtype=torch.float32)\n\n >>> prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k"\n\n >>> # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps.\n >>> num_inference_steps = 4\n >>> images = pipe(prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=8.0).images\n >>> images[0].save("image.png")\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class LatentConsistencyModelPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'denoised', 'prompt_embeds', 'w_embedding'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: LCMScheduler, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: Optional[CLIPVisionModelWithProjection]=None, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt: Union[str, List[str]], height: int, width: int, callback_steps: int, prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') @property def guidance_scale(self): return self._guidance_scale @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return False @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=4, original_inference_steps: int=None, timesteps: List[int]=None, guidance_scale: float=8.5, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, _) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt=None, prompt_embeds=prompt_embeds, negative_prompt_embeds=None, lora_scale=lora_scale, clip_skip=self.clip_skip) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, original_inference_steps=original_inference_steps) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) bs = batch_size * num_images_per_prompt w = torch.tensor(self.guidance_scale - 1).repeat(bs) w_embedding = self.get_guidance_scale_embedding(w, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, None) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latents = latents.to(prompt_embeds.dtype) model_pred = self.unet(latents, t, timestep_cond=w_embedding, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] (latents, denoised) = self.scheduler.step(model_pred, t, latents, **extra_step_kwargs, return_dict=False) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) w_embedding = callback_outputs.pop('w_embedding', w_embedding) denoised = callback_outputs.pop('denoised', denoised) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) denoised = denoised.to(prompt_embeds.dtype) if not output_type == 'latent': image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = denoised has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/latent_diffusion/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_latent_diffusion'] = ['LDMBertModel', 'LDMTextToImagePipeline'] _import_structure['pipeline_latent_diffusion_superresolution'] = ['LDMSuperResolutionPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_latent_diffusion import LDMBertModel, LDMTextToImagePipeline from .pipeline_latent_diffusion_superresolution import LDMSuperResolutionPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py import inspect from typing import List, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutput from transformers.utils import logging from ...models import AutoencoderKL, UNet2DConditionModel, UNet2DModel, VQModel from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class LDMTextToImagePipeline(DiffusionPipeline): model_cpu_offload_seq = 'bert->unet->vqvae' def __init__(self, vqvae: Union[VQModel, AutoencoderKL], bert: PreTrainedModel, tokenizer: PreTrainedTokenizer, unet: Union[UNet2DModel, UNet2DConditionModel], scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler]): super().__init__() self.register_modules(vqvae=vqvae, bert=bert, tokenizer=tokenizer, unet=unet, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) @torch.no_grad() def __call__(self, prompt: Union[str, List[str]], height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: Optional[int]=50, guidance_scale: Optional[float]=1.0, eta: Optional[float]=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, **kwargs) -> Union[Tuple, ImagePipelineOutput]: height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if guidance_scale != 1.0: uncond_input = self.tokenizer([''] * batch_size, padding='max_length', max_length=77, truncation=True, return_tensors='pt') negative_prompt_embeds = self.bert(uncond_input.input_ids.to(self._execution_device))[0] text_input = self.tokenizer(prompt, padding='max_length', max_length=77, truncation=True, return_tensors='pt') prompt_embeds = self.bert(text_input.input_ids.to(self._execution_device))[0] latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(latents_shape, generator=generator, device=self._execution_device, dtype=prompt_embeds.dtype) elif latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}') latents = latents.to(self._execution_device) self.scheduler.set_timesteps(num_inference_steps) accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_kwargs = {} if accepts_eta: extra_kwargs['eta'] = eta for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale == 1.0: latents_input = latents context = prompt_embeds else: latents_input = torch.cat([latents] * 2) context = torch.cat([negative_prompt_embeds, prompt_embeds]) noise_pred = self.unet(latents_input, t, encoder_hidden_states=context).sample if guidance_scale != 1.0: (noise_pred_uncond, noise_prediction_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample latents = 1 / self.vqvae.config.scaling_factor * latents image = self.vqvae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) '' logger = logging.get_logger(__name__) LDMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = ['ldm-bert'] LDMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {'ldm-bert': 'https://huggingface.co/valhalla/ldm-bert/blob/main/config.json'} '' class LDMBertConfig(PretrainedConfig): model_type = 'ldmbert' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=30522, max_position_embeddings=77, encoder_layers=32, encoder_ffn_dim=5120, encoder_attention_heads=8, head_dim=64, encoder_layerdrop=0.0, activation_function='gelu', d_model=1280, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, pad_token_id=0, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.head_dim = head_dim self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding super().__init__(pad_token_id=pad_token_id, **kwargs) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]=None): (bsz, src_len) = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class LDMBertAttention(nn.Module): def __init__(self, embed_dim: int, num_heads: int, head_dim: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=False): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = head_dim self.inner_dim = head_dim * num_heads self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) self.out_proj = nn.Linear(self.inner_dim, embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None: key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.inner_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped, past_key_value) class LDMBertEncoderLayer(nn.Module): def __init__(self, config: LDMBertConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = LDMBertAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, head_dim=config.head_dim, dropout=config.attention_dropout) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) (hidden_states, attn_weights, _) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class LDMBertPreTrainedModel(PreTrainedModel): config_class = LDMBertConfig base_model_prefix = 'model' _supports_gradient_checkpointing = True _keys_to_ignore_on_load_unexpected = ['encoder\\.version', 'decoder\\.version'] def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LDMBertEncoder,)): module.gradient_checkpointing = value @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = {'attention_mask': input_ids.ne(pad_token), 'input_ids': input_ids} return dummy_inputs class LDMBertEncoder(LDMBertPreTrainedModel): def __init__(self, config: LDMBertConfig): super().__init__(config) self.dropout = config.dropout embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim) self.embed_positions = nn.Embedding(config.max_position_embeddings, embed_dim) self.layers = nn.ModuleList([LDMBertEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) seq_len = input_shape[1] if position_ids is None: position_ids = torch.arange(seq_len, dtype=torch.long, device=inputs_embeds.device).expand((1, -1)) embed_pos = self.embed_positions(position_ids) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if attention_mask is not None: attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint(create_custom_forward(encoder_layer), hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None) else: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class LDMBertModel(LDMBertPreTrainedModel): _no_split_modules = [] def __init__(self, config: LDMBertConfig): super().__init__(config) self.model = LDMBertEncoder(config) self.to_logits = nn.Linear(config.hidden_size, config.vocab_size) def forward(self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None): outputs = self.model(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) return outputs # File: diffusers-main/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.utils.checkpoint from ...models import UNet2DModel, VQModel from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils import PIL_INTERPOLATION from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def preprocess(image): (w, h) = image.size (w, h) = (x - x % 32 for x in (w, h)) image = image.resize((w, h), resample=PIL_INTERPOLATION['lanczos']) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image) return 2.0 * image - 1.0 class LDMSuperResolutionPipeline(DiffusionPipeline): def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler]): super().__init__() self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) @torch.no_grad() def __call__(self, image: Union[torch.Tensor, PIL.Image.Image]=None, batch_size: Optional[int]=1, num_inference_steps: Optional[int]=100, eta: Optional[float]=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, output_type: Optional[str]='pil', return_dict: bool=True) -> Union[Tuple, ImagePipelineOutput]: if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, torch.Tensor): batch_size = image.shape[0] else: raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(image)}') if isinstance(image, PIL.Image.Image): image = preprocess(image) (height, width) = image.shape[-2:] latents_shape = (batch_size, self.unet.config.in_channels // 2, height, width) latents_dtype = next(self.unet.parameters()).dtype latents = randn_tensor(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) image = image.to(device=self.device, dtype=latents_dtype) self.scheduler.set_timesteps(num_inference_steps, device=self.device) timesteps_tensor = self.scheduler.timesteps latents = latents * self.scheduler.init_noise_sigma accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_kwargs = {} if accepts_eta: extra_kwargs['eta'] = eta for t in self.progress_bar(timesteps_tensor): latents_input = torch.cat([latents, image], dim=1) latents_input = self.scheduler.scale_model_input(latents_input, t) noise_pred = self.unet(latents_input, t).sample latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample image = self.vqvae.decode(latents).sample image = torch.clamp(image, -1.0, 1.0) image = image / 2 + 0.5 image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/latte/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_latte'] = ['LattePipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_latte import LattePipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/latte/pipeline_latte.py import html import inspect import re import urllib.parse as ul from dataclasses import dataclass from typing import Callable, Dict, List, Optional, Tuple, Union import torch from transformers import T5EncoderModel, T5Tokenizer from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...models import AutoencoderKL, LatteTransformer3DModel from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import KarrasDiffusionSchedulers from ...utils import BACKENDS_MAPPING, BaseOutput, is_bs4_available, is_ftfy_available, logging, replace_example_docstring from ...utils.torch_utils import is_compiled_module, randn_tensor from ...video_processor import VideoProcessor logger = logging.get_logger(__name__) if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import LattePipeline\n >>> from diffusers.utils import export_to_gif\n\n >>> # You can replace the checkpoint id with "maxin-cn/Latte-1" too.\n >>> pipe = LattePipeline.from_pretrained("maxin-cn/Latte-1", torch_dtype=torch.float16)\n >>> # Enable memory optimizations.\n >>> pipe.enable_model_cpu_offload()\n\n >>> prompt = "A small cactus with a happy face in the Sahara desert."\n >>> videos = pipe(prompt).frames[0]\n >>> export_to_gif(videos, "latte.gif")\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) @dataclass class LattePipelineOutput(BaseOutput): frames: torch.Tensor class LattePipeline(DiffusionPipeline): bad_punct_regex = re.compile('[#®•©™&@·º½¾¿¡§~\\)\\(\\]\\[\\}\\{\\|\\\\/\\\\*]{1,}') _optional_components = ['tokenizer', 'text_encoder'] model_cpu_offload_seq = 'text_encoder->transformer->vae' _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, vae: AutoencoderKL, transformer: LatteTransformer3DModel, scheduler: KarrasDiffusionSchedulers): super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor) def mask_text_embeddings(self, emb, mask): if emb.shape[0] == 1: keep_index = mask.sum().item() return (emb[:, :, :keep_index, :], keep_index) else: masked_feature = emb * mask[:, None, :, None] return (masked_feature, emb.shape[2]) def encode_prompt(self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool=True, negative_prompt: str='', num_images_per_prompt: int=1, device: Optional[torch.device]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, clean_caption: bool=False, mask_feature: bool=True, dtype=None): embeds_initially_provided = prompt_embeds is not None and negative_prompt_embeds is not None if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = 120 if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {max_length} tokens: {removed_text}') attention_mask = text_inputs.attention_mask.to(device) prompt_embeds_attention_mask = attention_mask prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds_attention_mask = torch.ones_like(prompt_embeds) if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_embeds_attention_mask = prompt_embeds_attention_mask.view(bs_embed, -1) prompt_embeds_attention_mask = prompt_embeds_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) else: negative_prompt_embeds = None if mask_feature and (not embeds_initially_provided): prompt_embeds = prompt_embeds.unsqueeze(1) (masked_prompt_embeds, keep_indices) = self.mask_text_embeddings(prompt_embeds, prompt_embeds_attention_mask) masked_prompt_embeds = masked_prompt_embeds.squeeze(1) masked_negative_prompt_embeds = negative_prompt_embeds[:, :keep_indices, :] if negative_prompt_embeds is not None else None return (masked_prompt_embeds, masked_negative_prompt_embeds) return (prompt_embeds, negative_prompt_embeds) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds=None, negative_prompt_embeds=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def _text_preprocessing(self, text, clean_caption=False): if clean_caption and (not is_bs4_available()): logger.warning(BACKENDS_MAPPING['bs4'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if clean_caption and (not is_ftfy_available()): logger.warning(BACKENDS_MAPPING['ftfy'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub('', 'person', caption) caption = re.sub('\\b((?:https?:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = re.sub('\\b((?:www:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = BeautifulSoup(caption, features='html.parser').text caption = re.sub('@[\\w\\d]+\\b', '', caption) caption = re.sub('[\\u31c0-\\u31ef]+', '', caption) caption = re.sub('[\\u31f0-\\u31ff]+', '', caption) caption = re.sub('[\\u3200-\\u32ff]+', '', caption) caption = re.sub('[\\u3300-\\u33ff]+', '', caption) caption = re.sub('[\\u3400-\\u4dbf]+', '', caption) caption = re.sub('[\\u4dc0-\\u4dff]+', '', caption) caption = re.sub('[\\u4e00-\\u9fff]+', '', caption) caption = re.sub('[\\u002D\\u058A\\u05BE\\u1400\\u1806\\u2010-\\u2015\\u2E17\\u2E1A\\u2E3A\\u2E3B\\u2E40\\u301C\\u3030\\u30A0\\uFE31\\uFE32\\uFE58\\uFE63\\uFF0D]+', '-', caption) caption = re.sub('[`´«»“”¨]', '"', caption) caption = re.sub('[‘’]', "'", caption) caption = re.sub('"?', '', caption) caption = re.sub('&', '', caption) caption = re.sub('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', ' ', caption) caption = re.sub('\\d:\\d\\d\\s+$', '', caption) caption = re.sub('\\\\n', ' ', caption) caption = re.sub('#\\d{1,3}\\b', '', caption) caption = re.sub('#\\d{5,}\\b', '', caption) caption = re.sub('\\b\\d{6,}\\b', '', caption) caption = re.sub('[\\S]+\\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption) caption = re.sub('[\\"\\\']{2,}', '"', caption) caption = re.sub('[\\.]{2,}', ' ', caption) caption = re.sub(self.bad_punct_regex, ' ', caption) caption = re.sub('\\s+\\.\\s+', ' ', caption) regex2 = re.compile('(?:\\-|\\_)') if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, ' ', caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub('\\b[a-zA-Z]{1,3}\\d{3,15}\\b', '', caption) caption = re.sub('\\b[a-zA-Z]+\\d+[a-zA-Z]+\\b', '', caption) caption = re.sub('\\b\\d+[a-zA-Z]+\\d+\\b', '', caption) caption = re.sub('(worldwide\\s+)?(free\\s+)?shipping', '', caption) caption = re.sub('(free\\s)?download(\\sfree)?', '', caption) caption = re.sub('\\bclick\\b\\s(?:for|on)\\s\\w+', '', caption) caption = re.sub('\\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\\simage[s]?)?', '', caption) caption = re.sub('\\bpage\\s+\\d+\\b', '', caption) caption = re.sub('\\b\\d*[a-zA-Z]+\\d+[a-zA-Z]+\\d+[a-zA-Z\\d]*\\b', ' ', caption) caption = re.sub('\\b\\d+\\.?\\d*[xх×]\\d+\\.?\\d*\\b', '', caption) caption = re.sub('\\b\\s+\\:\\s+', ': ', caption) caption = re.sub('(\\D[,\\./])\\b', '\\1 ', caption) caption = re.sub('\\s+', ' ', caption) caption.strip() caption = re.sub('^[\\"\\\']([\\w\\W]+)[\\"\\\']$', '\\1', caption) caption = re.sub("^[\\'\\_,\\-\\:;]", '', caption) caption = re.sub("[\\'\\_,\\-\\:\\-\\+]$", '', caption) caption = re.sub('^\\.\\S+$', '', caption) return caption.strip() def prepare_latents(self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, negative_prompt: str='', num_inference_steps: int=50, timesteps: Optional[List[int]]=None, guidance_scale: float=7.5, num_images_per_prompt: int=1, video_length: int=16, height: int=512, width: int=512, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: str='pil', return_dict: bool=True, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], clean_caption: bool=True, mask_feature: bool=True, enable_temporal_attentions: bool=True, decode_chunk_size: Optional[int]=None) -> Union[LattePipelineOutput, Tuple]: if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs decode_chunk_size = decode_chunk_size if decode_chunk_size is not None else video_length height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds, negative_prompt_embeds) self._guidance_scale = guidance_scale self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, do_classifier_free_guidance, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clean_caption=clean_caption, mask_feature=mask_feature) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) self._num_timesteps = len(timesteps) latent_channels = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, latent_channels, video_length, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) current_timestep = t if not torch.is_tensor(current_timestep): is_mps = latent_model_input.device.type == 'mps' if isinstance(current_timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) elif len(current_timestep.shape) == 0: current_timestep = current_timestep[None].to(latent_model_input.device) current_timestep = current_timestep.expand(latent_model_input.shape[0]) noise_pred = self.transformer(latent_model_input, encoder_hidden_states=prompt_embeds, timestep=current_timestep, enable_temporal_attentions=enable_temporal_attentions, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if not (hasattr(self.scheduler.config, 'variance_type') and self.scheduler.config.variance_type in ['learned', 'learned_range']): noise_pred = noise_pred.chunk(2, dim=1)[0] latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if not output_type == 'latents': video = self.decode_latents(latents, video_length, decode_chunk_size=14) video = self.video_processor.postprocess_video(video=video, output_type=output_type) else: video = latents self.maybe_free_model_hooks() if not return_dict: return (video,) return LattePipelineOutput(frames=video) def decode_latents(self, latents: torch.Tensor, video_length: int, decode_chunk_size: int=14): latents = latents.permute(0, 2, 1, 3, 4).flatten(0, 1) latents = 1 / self.vae.config.scaling_factor * latents forward_vae_fn = self.vae._orig_mod.forward if is_compiled_module(self.vae) else self.vae.forward accepts_num_frames = 'num_frames' in set(inspect.signature(forward_vae_fn).parameters.keys()) frames = [] for i in range(0, latents.shape[0], decode_chunk_size): num_frames_in = latents[i:i + decode_chunk_size].shape[0] decode_kwargs = {} if accepts_num_frames: decode_kwargs['num_frames'] = num_frames_in frame = self.vae.decode(latents[i:i + decode_chunk_size], **decode_kwargs).sample frames.append(frame) frames = torch.cat(frames, dim=0) frames = frames.reshape(-1, video_length, *frames.shape[1:]).permute(0, 2, 1, 3, 4) frames = frames.float() return frames # File: diffusers-main/src/diffusers/pipelines/ledits_pp/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_leditspp_stable_diffusion'] = ['LEditsPPPipelineStableDiffusion'] _import_structure['pipeline_leditspp_stable_diffusion_xl'] = ['LEditsPPPipelineStableDiffusionXL'] _import_structure['pipeline_output'] = ['LEditsPPDiffusionPipelineOutput', 'LEditsPPDiffusionPipelineOutput'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_leditspp_stable_diffusion import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput, LEditsPPPipelineStableDiffusion from .pipeline_leditspp_stable_diffusion_xl import LEditsPPPipelineStableDiffusionXL else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py import inspect import math from itertools import repeat from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...configuration_utils import FrozenDict from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import Attention, AttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import PIL\n >>> import requests\n >>> import torch\n >>> from io import BytesIO\n\n >>> from diffusers import LEditsPPPipelineStableDiffusion\n >>> from diffusers.utils import load_image\n\n >>> pipe = LEditsPPPipelineStableDiffusion.from_pretrained(\n ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> img_url = "https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/cherry_blossom.png"\n >>> image = load_image(img_url).convert("RGB")\n\n >>> _ = pipe.invert(image=image, num_inversion_steps=50, skip=0.1)\n\n >>> edited_image = pipe(\n ... editing_prompt=["cherry blossom"], edit_guidance_scale=10.0, edit_threshold=0.75\n ... ).images[0]\n ```\n' class LeditsAttentionStore: @staticmethod def get_empty_store(): return {'down_cross': [], 'mid_cross': [], 'up_cross': [], 'down_self': [], 'mid_self': [], 'up_self': []} def __call__(self, attn, is_cross: bool, place_in_unet: str, editing_prompts, PnP=False): if attn.shape[1] <= self.max_size: bs = 1 + int(PnP) + editing_prompts skip = 2 if PnP else 1 attn = torch.stack(attn.split(self.batch_size)).permute(1, 0, 2, 3) source_batch_size = int(attn.shape[1] // bs) self.forward(attn[:, skip * source_batch_size:], is_cross, place_in_unet) def forward(self, attn, is_cross: bool, place_in_unet: str): key = f"{place_in_unet}_{('cross' if is_cross else 'self')}" self.step_store[key].append(attn) def between_steps(self, store_step=True): if store_step: if self.average: if len(self.attention_store) == 0: self.attention_store = self.step_store else: for key in self.attention_store: for i in range(len(self.attention_store[key])): self.attention_store[key][i] += self.step_store[key][i] elif len(self.attention_store) == 0: self.attention_store = [self.step_store] else: self.attention_store.append(self.step_store) self.cur_step += 1 self.step_store = self.get_empty_store() def get_attention(self, step: int): if self.average: attention = {key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store} else: assert step is not None attention = self.attention_store[step] return attention def aggregate_attention(self, attention_maps, prompts, res: Union[int, Tuple[int]], from_where: List[str], is_cross: bool, select: int): out = [[] for x in range(self.batch_size)] if isinstance(res, int): num_pixels = res ** 2 resolution = (res, res) else: num_pixels = res[0] * res[1] resolution = res[:2] for location in from_where: for bs_item in attention_maps[f"{location}_{('cross' if is_cross else 'self')}"]: for (batch, item) in enumerate(bs_item): if item.shape[1] == num_pixels: cross_maps = item.reshape(len(prompts), -1, *resolution, item.shape[-1])[select] out[batch].append(cross_maps) out = torch.stack([torch.cat(x, dim=0) for x in out]) out = out.sum(1) / out.shape[1] return out def __init__(self, average: bool, batch_size=1, max_resolution=16, max_size: int=None): self.step_store = self.get_empty_store() self.attention_store = [] self.cur_step = 0 self.average = average self.batch_size = batch_size if max_size is None: self.max_size = max_resolution ** 2 elif max_size is not None and max_resolution is None: self.max_size = max_size else: raise ValueError('Only allowed to set one of max_resolution or max_size') class LeditsGaussianSmoothing: def __init__(self, device): kernel_size = [3, 3] sigma = [0.5, 0.5] kernel = 1 meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size]) for (size, std, mgrid) in zip(kernel_size, sigma, meshgrids): mean = (size - 1) / 2 kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-((mgrid - mean) / (2 * std)) ** 2) kernel = kernel / torch.sum(kernel) kernel = kernel.view(1, 1, *kernel.size()) kernel = kernel.repeat(1, *[1] * (kernel.dim() - 1)) self.weight = kernel.to(device) def __call__(self, input): return F.conv2d(input, weight=self.weight.to(input.dtype)) class LEDITSCrossAttnProcessor: def __init__(self, attention_store, place_in_unet, pnp, editing_prompts): self.attnstore = attention_store self.place_in_unet = place_in_unet self.editing_prompts = editing_prompts self.pnp = pnp def __call__(self, attn: Attention, hidden_states, encoder_hidden_states, attention_mask=None, temb=None): (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) self.attnstore(attention_probs, is_cross=True, place_in_unet=self.place_in_unet, editing_prompts=self.editing_prompts, PnP=self.pnp) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states / attn.rescale_output_factor return hidden_states def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class LEditsPPPipelineStableDiffusion(DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DPMSolverMultistepScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() if not isinstance(scheduler, DDIMScheduler) and (not isinstance(scheduler, DPMSolverMultistepScheduler)): scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config, algorithm_type='sde-dpmsolver++', solver_order=2) logger.warning('This pipeline only supports DDIMScheduler and DPMSolverMultistepScheduler. The scheduler has been changed to DPMSolverMultistepScheduler.') if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'clip_sample') and scheduler.config.clip_sample is True: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('clip_sample not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['clip_sample'] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) self.inversion_steps = None def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, eta, generator=None): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, negative_prompt=None, editing_prompt_embeddings=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if editing_prompt_embeddings is not None and negative_prompt_embeds is not None: if editing_prompt_embeddings.shape != negative_prompt_embeds.shape: raise ValueError(f'`editing_prompt_embeddings` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `editing_prompt_embeddings` {editing_prompt_embeddings.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents): latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def prepare_unet(self, attention_store, PnP: bool=False): attn_procs = {} for name in self.unet.attn_processors.keys(): if name.startswith('mid_block'): place_in_unet = 'mid' elif name.startswith('up_blocks'): place_in_unet = 'up' elif name.startswith('down_blocks'): place_in_unet = 'down' else: continue if 'attn2' in name and place_in_unet != 'mid': attn_procs[name] = LEDITSCrossAttnProcessor(attention_store=attention_store, place_in_unet=place_in_unet, pnp=PnP, editing_prompts=self.enabled_editing_prompts) else: attn_procs[name] = AttnProcessor() self.unet.set_attn_processor(attn_procs) def encode_prompt(self, device, num_images_per_prompt, enable_edit_guidance, negative_prompt=None, editing_prompt=None, negative_prompt_embeds: Optional[torch.Tensor]=None, editing_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) batch_size = self.batch_size num_edit_tokens = None if negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but exoected{batch_size} based on the input images. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = negative_prompt_embeds.dtype negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) if enable_edit_guidance: if editing_prompt_embeds is None: if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] max_length = negative_prompt_embeds.shape[1] text_inputs = self.tokenizer([x for item in editing_prompt for x in repeat(item, batch_size)], padding='max_length', max_length=max_length, truncation=True, return_tensors='pt', return_length=True) num_edit_tokens = text_inputs.length - 2 text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer([x for item in editing_prompt for x in repeat(item, batch_size)], padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: editing_prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) editing_prompt_embeds = editing_prompt_embeds[0] else: editing_prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) editing_prompt_embeds = editing_prompt_embeds[-1][-(clip_skip + 1)] editing_prompt_embeds = self.text_encoder.text_model.final_layer_norm(editing_prompt_embeds) editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device) (bs_embed_edit, seq_len, _) = editing_prompt_embeds.shape editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device) editing_prompt_embeds = editing_prompt_embeds.repeat(1, num_images_per_prompt, 1) editing_prompt_embeds = editing_prompt_embeds.view(bs_embed_edit * num_images_per_prompt, seq_len, -1) seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (editing_prompt_embeds, negative_prompt_embeds, num_edit_tokens) @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, negative_prompt: Optional[Union[str, List[str]]]=None, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, output_type: Optional[str]='pil', return_dict: bool=True, editing_prompt: Optional[Union[str, List[str]]]=None, editing_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, reverse_editing_direction: Optional[Union[bool, List[bool]]]=False, edit_guidance_scale: Optional[Union[float, List[float]]]=5, edit_warmup_steps: Optional[Union[int, List[int]]]=0, edit_cooldown_steps: Optional[Union[int, List[int]]]=None, edit_threshold: Optional[Union[float, List[float]]]=0.9, user_mask: Optional[torch.Tensor]=None, sem_guidance: Optional[List[torch.Tensor]]=None, use_cross_attn_mask: bool=False, use_intersect_mask: bool=True, attn_store_steps: Optional[List[int]]=[], store_averaged_over_steps: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): if self.inversion_steps is None: raise ValueError('You need to invert an input image first before calling the pipeline. The `invert` method has to be called beforehand. Edits will always be performed for the last inverted image(s).') eta = self.eta num_images_per_prompt = 1 latents = self.init_latents zs = self.zs self.scheduler.set_timesteps(len(self.scheduler.timesteps)) if use_intersect_mask: use_cross_attn_mask = True if use_cross_attn_mask: self.smoothing = LeditsGaussianSmoothing(self.device) if user_mask is not None: user_mask = user_mask.to(self.device) org_prompt = '' self.check_inputs(negative_prompt, editing_prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs) self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs batch_size = self.batch_size if editing_prompt: enable_edit_guidance = True if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] self.enabled_editing_prompts = len(editing_prompt) elif editing_prompt_embeds is not None: enable_edit_guidance = True self.enabled_editing_prompts = editing_prompt_embeds.shape[0] else: self.enabled_editing_prompts = 0 enable_edit_guidance = False lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (edit_concepts, uncond_embeddings, num_edit_tokens) = self.encode_prompt(editing_prompt=editing_prompt, device=self.device, num_images_per_prompt=num_images_per_prompt, enable_edit_guidance=enable_edit_guidance, negative_prompt=negative_prompt, editing_prompt_embeds=editing_prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip) if enable_edit_guidance: text_embeddings = torch.cat([uncond_embeddings, edit_concepts]) self.text_cross_attention_maps = [editing_prompt] if isinstance(editing_prompt, str) else editing_prompt else: text_embeddings = torch.cat([uncond_embeddings]) timesteps = self.inversion_steps t_to_idx = {int(v): k for (k, v) in enumerate(timesteps[-zs.shape[0]:])} if use_cross_attn_mask: self.attention_store = LeditsAttentionStore(average=store_averaged_over_steps, batch_size=batch_size, max_size=latents.shape[-2] / 4.0 * (latents.shape[-1] / 4.0), max_resolution=None) self.prepare_unet(self.attention_store, PnP=False) resolution = latents.shape[-2:] att_res = (int(resolution[0] / 4), int(resolution[1] / 4)) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, None, None, text_embeddings.dtype, self.device, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(eta) self.sem_guidance = None self.activation_mask = None num_warmup_steps = 0 with self.progress_bar(total=len(timesteps)) as progress_bar: for (i, t) in enumerate(timesteps): if enable_edit_guidance: latent_model_input = torch.cat([latents] * (1 + self.enabled_editing_prompts)) else: latent_model_input = latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) text_embed_input = text_embeddings noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embed_input).sample noise_pred_out = noise_pred.chunk(1 + self.enabled_editing_prompts) noise_pred_uncond = noise_pred_out[0] noise_pred_edit_concepts = noise_pred_out[1:] noise_guidance_edit = torch.zeros(noise_pred_uncond.shape, device=self.device, dtype=noise_pred_uncond.dtype) if sem_guidance is not None and len(sem_guidance) > i: noise_guidance_edit += sem_guidance[i].to(self.device) elif enable_edit_guidance: if self.activation_mask is None: self.activation_mask = torch.zeros((len(timesteps), len(noise_pred_edit_concepts), *noise_pred_edit_concepts[0].shape)) if self.sem_guidance is None: self.sem_guidance = torch.zeros((len(timesteps), *noise_pred_uncond.shape)) for (c, noise_pred_edit_concept) in enumerate(noise_pred_edit_concepts): if isinstance(edit_warmup_steps, list): edit_warmup_steps_c = edit_warmup_steps[c] else: edit_warmup_steps_c = edit_warmup_steps if i < edit_warmup_steps_c: continue if isinstance(edit_guidance_scale, list): edit_guidance_scale_c = edit_guidance_scale[c] else: edit_guidance_scale_c = edit_guidance_scale if isinstance(edit_threshold, list): edit_threshold_c = edit_threshold[c] else: edit_threshold_c = edit_threshold if isinstance(reverse_editing_direction, list): reverse_editing_direction_c = reverse_editing_direction[c] else: reverse_editing_direction_c = reverse_editing_direction if isinstance(edit_cooldown_steps, list): edit_cooldown_steps_c = edit_cooldown_steps[c] elif edit_cooldown_steps is None: edit_cooldown_steps_c = i + 1 else: edit_cooldown_steps_c = edit_cooldown_steps if i >= edit_cooldown_steps_c: continue noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond if reverse_editing_direction_c: noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c if user_mask is not None: noise_guidance_edit_tmp = noise_guidance_edit_tmp * user_mask if use_cross_attn_mask: out = self.attention_store.aggregate_attention(attention_maps=self.attention_store.step_store, prompts=self.text_cross_attention_maps, res=att_res, from_where=['up', 'down'], is_cross=True, select=self.text_cross_attention_maps.index(editing_prompt[c])) attn_map = out[:, :, :, 1:1 + num_edit_tokens[c]] if attn_map.shape[3] != num_edit_tokens[c]: raise ValueError(f'Incorrect shape of attention_map. Expected size {num_edit_tokens[c]}, but found {attn_map.shape[3]}!') attn_map = torch.sum(attn_map, dim=3) attn_map = F.pad(attn_map.unsqueeze(1), (1, 1, 1, 1), mode='reflect') attn_map = self.smoothing(attn_map).squeeze(1) if attn_map.dtype == torch.float32: tmp = torch.quantile(attn_map.flatten(start_dim=1), edit_threshold_c, dim=1) else: tmp = torch.quantile(attn_map.flatten(start_dim=1).to(torch.float32), edit_threshold_c, dim=1).to(attn_map.dtype) attn_mask = torch.where(attn_map >= tmp.unsqueeze(1).unsqueeze(1).repeat(1, *att_res), 1.0, 0.0) attn_mask = F.interpolate(attn_mask.unsqueeze(1), noise_guidance_edit_tmp.shape[-2:]).repeat(1, 4, 1, 1) self.activation_mask[i, c] = attn_mask.detach().cpu() if not use_intersect_mask: noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask if use_intersect_mask: if t <= 800: noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) noise_guidance_edit_tmp_quantile = torch.sum(noise_guidance_edit_tmp_quantile, dim=1, keepdim=True) noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, self.unet.config.in_channels, 1, 1) if noise_guidance_edit_tmp_quantile.dtype == torch.float32: tmp = torch.quantile(noise_guidance_edit_tmp_quantile.flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False) else: tmp = torch.quantile(noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False).to(noise_guidance_edit_tmp_quantile.dtype) intersect_mask = torch.where(noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], torch.ones_like(noise_guidance_edit_tmp), torch.zeros_like(noise_guidance_edit_tmp)) * attn_mask self.activation_mask[i, c] = intersect_mask.detach().cpu() noise_guidance_edit_tmp = noise_guidance_edit_tmp * intersect_mask else: noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask elif not use_cross_attn_mask: noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) noise_guidance_edit_tmp_quantile = torch.sum(noise_guidance_edit_tmp_quantile, dim=1, keepdim=True) noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, 4, 1, 1) if noise_guidance_edit_tmp_quantile.dtype == torch.float32: tmp = torch.quantile(noise_guidance_edit_tmp_quantile.flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False) else: tmp = torch.quantile(noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False).to(noise_guidance_edit_tmp_quantile.dtype) self.activation_mask[i, c] = torch.where(noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], torch.ones_like(noise_guidance_edit_tmp), torch.zeros_like(noise_guidance_edit_tmp)).detach().cpu() noise_guidance_edit_tmp = torch.where(noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], noise_guidance_edit_tmp, torch.zeros_like(noise_guidance_edit_tmp)) noise_guidance_edit += noise_guidance_edit_tmp self.sem_guidance[i] = noise_guidance_edit.detach().cpu() noise_pred = noise_pred_uncond + noise_guidance_edit if enable_edit_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_edit_concepts.mean(dim=0, keepdim=False), guidance_rescale=self.guidance_rescale) idx = t_to_idx[int(t)] latents = self.scheduler.step(noise_pred, t, latents, variance_noise=zs[idx], **extra_step_kwargs).prev_sample if use_cross_attn_mask: store_step = i in attn_store_steps self.attention_store.between_steps(store_step) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, self.device, text_embeddings.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return LEditsPPDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) @torch.no_grad() def invert(self, image: PipelineImageInput, source_prompt: str='', source_guidance_scale: float=3.5, num_inversion_steps: int=30, skip: float=0.15, generator: Optional[torch.Generator]=None, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None, height: Optional[int]=None, width: Optional[int]=None, resize_mode: Optional[str]='default', crops_coords: Optional[Tuple[int, int, int, int]]=None): self.unet.set_attn_processor(AttnProcessor()) self.eta = 1.0 self.scheduler.config.timestep_spacing = 'leading' self.scheduler.set_timesteps(int(num_inversion_steps * (1 + skip))) self.inversion_steps = self.scheduler.timesteps[-num_inversion_steps:] timesteps = self.inversion_steps (x0, resized) = self.encode_image(image, dtype=self.text_encoder.dtype, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords) self.batch_size = x0.shape[0] image_rec = self.vae.decode(x0 / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] image_rec = self.image_processor.postprocess(image_rec, output_type='pil') do_classifier_free_guidance = source_guidance_scale > 1.0 lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (uncond_embedding, text_embeddings, _) = self.encode_prompt(num_images_per_prompt=1, device=self.device, negative_prompt=None, enable_edit_guidance=do_classifier_free_guidance, editing_prompt=source_prompt, lora_scale=lora_scale, clip_skip=clip_skip) variance_noise_shape = (num_inversion_steps, *x0.shape) t_to_idx = {int(v): k for (k, v) in enumerate(timesteps)} xts = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype) for t in reversed(timesteps): idx = num_inversion_steps - t_to_idx[int(t)] - 1 noise = randn_tensor(shape=x0.shape, generator=generator, device=self.device, dtype=x0.dtype) xts[idx] = self.scheduler.add_noise(x0, noise, torch.Tensor([t])) xts = torch.cat([x0.unsqueeze(0), xts], dim=0) self.scheduler.set_timesteps(len(self.scheduler.timesteps)) zs = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype) with self.progress_bar(total=len(timesteps)) as progress_bar: for t in timesteps: idx = num_inversion_steps - t_to_idx[int(t)] - 1 xt = xts[idx + 1] noise_pred = self.unet(xt, timestep=t, encoder_hidden_states=uncond_embedding).sample if not source_prompt == '': noise_pred_cond = self.unet(xt, timestep=t, encoder_hidden_states=text_embeddings).sample noise_pred = noise_pred + source_guidance_scale * (noise_pred_cond - noise_pred) xtm1 = xts[idx] (z, xtm1_corrected) = compute_noise(self.scheduler, xtm1, xt, t, noise_pred, self.eta) zs[idx] = z xts[idx] = xtm1_corrected progress_bar.update() self.init_latents = xts[-1].expand(self.batch_size, -1, -1, -1) zs = zs.flip(0) self.zs = zs return LEditsPPInversionPipelineOutput(images=resized, vae_reconstruction_images=image_rec) @torch.no_grad() def encode_image(self, image, dtype=None, height=None, width=None, resize_mode='default', crops_coords=None): image = self.image_processor.preprocess(image=image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords) resized = self.image_processor.postprocess(image=image, output_type='pil') if max(image.shape[-2:]) > self.vae.config['sample_size'] * 1.5: logger.warning('Your input images far exceed the default resolution of the underlying diffusion model. The output images may contain severe artifacts! Consider down-sampling the input using the `height` and `width` parameters') image = image.to(dtype) x0 = self.vae.encode(image.to(self.device)).latent_dist.mode() x0 = x0.to(dtype) x0 = self.vae.config.scaling_factor * x0 return (x0, resized) def compute_noise_ddim(scheduler, prev_latents, latents, timestep, noise_pred, eta): prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps alpha_prod_t = scheduler.alphas_cumprod[timestep] alpha_prod_t_prev = scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t pred_original_sample = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 if scheduler.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -1, 1) variance = scheduler._get_variance(timestep, prev_timestep) std_dev_t = eta * variance ** 0.5 pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t ** 2) ** 0.5 * noise_pred mu_xt = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if variance > 0.0: noise = (prev_latents - mu_xt) / (variance ** 0.5 * eta) else: noise = torch.tensor([0.0]).to(latents.device) return (noise, mu_xt + eta * variance ** 0.5 * noise) def compute_noise_sde_dpm_pp_2nd(scheduler, prev_latents, latents, timestep, noise_pred, eta): def first_order_update(model_output, sample): (sigma_t, sigma_s) = (scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index]) (alpha_t, sigma_t) = scheduler._sigma_to_alpha_sigma_t(sigma_t) (alpha_s, sigma_s) = scheduler._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s mu_xt = sigma_t / sigma_s * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * model_output mu_xt = scheduler.dpm_solver_first_order_update(model_output=model_output, sample=sample, noise=torch.zeros_like(sample)) sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) if sigma > 0.0: noise = (prev_latents - mu_xt) / sigma else: noise = torch.tensor([0.0]).to(sample.device) prev_sample = mu_xt + sigma * noise return (noise, prev_sample) def second_order_update(model_output_list, sample): (sigma_t, sigma_s0, sigma_s1) = (scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index], scheduler.sigmas[scheduler.step_index - 1]) (alpha_t, sigma_t) = scheduler._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = scheduler._sigma_to_alpha_sigma_t(sigma_s0) (alpha_s1, sigma_s1) = scheduler._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) (m0, m1) = (model_output_list[-1], model_output_list[-2]) (h, h_0) = (lambda_t - lambda_s0, lambda_s0 - lambda_s1) r0 = h_0 / h (D0, D1) = (m0, 1.0 / r0 * (m0 - m1)) mu_xt = sigma_t / sigma_s0 * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) if sigma > 0.0: noise = (prev_latents - mu_xt) / sigma else: noise = torch.tensor([0.0]).to(sample.device) prev_sample = mu_xt + sigma * noise return (noise, prev_sample) if scheduler.step_index is None: scheduler._init_step_index(timestep) model_output = scheduler.convert_model_output(model_output=noise_pred, sample=latents) for i in range(scheduler.config.solver_order - 1): scheduler.model_outputs[i] = scheduler.model_outputs[i + 1] scheduler.model_outputs[-1] = model_output if scheduler.lower_order_nums < 1: (noise, prev_sample) = first_order_update(model_output, latents) else: (noise, prev_sample) = second_order_update(scheduler.model_outputs, latents) if scheduler.lower_order_nums < scheduler.config.solver_order: scheduler.lower_order_nums += 1 scheduler._step_index += 1 return (noise, prev_sample) def compute_noise(scheduler, *args): if isinstance(scheduler, DDIMScheduler): return compute_noise_ddim(scheduler, *args) elif isinstance(scheduler, DPMSolverMultistepScheduler) and scheduler.config.algorithm_type == 'sde-dpmsolver++' and (scheduler.config.solver_order == 2): return compute_noise_sde_dpm_pp_2nd(scheduler, *args) else: raise NotImplementedError # File: diffusers-main/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py import inspect import math from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import Attention, AttnProcessor, AttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler from ...utils import USE_PEFT_BACKEND, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> import PIL\n >>> import requests\n >>> from io import BytesIO\n\n >>> from diffusers import LEditsPPPipelineStableDiffusionXL\n\n >>> pipe = LEditsPPPipelineStableDiffusionXL.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> def download_image(url):\n ... response = requests.get(url)\n ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")\n\n\n >>> img_url = "https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/tennis.jpg"\n >>> image = download_image(img_url)\n\n >>> _ = pipe.invert(image=image, num_inversion_steps=50, skip=0.2)\n\n >>> edited_image = pipe(\n ... editing_prompt=["tennis ball", "tomato"],\n ... reverse_editing_direction=[True, False],\n ... edit_guidance_scale=[5.0, 10.0],\n ... edit_threshold=[0.9, 0.85],\n ... ).images[0]\n ```\n' class LeditsAttentionStore: @staticmethod def get_empty_store(): return {'down_cross': [], 'mid_cross': [], 'up_cross': [], 'down_self': [], 'mid_self': [], 'up_self': []} def __call__(self, attn, is_cross: bool, place_in_unet: str, editing_prompts, PnP=False): if attn.shape[1] <= self.max_size: bs = 1 + int(PnP) + editing_prompts skip = 2 if PnP else 1 attn = torch.stack(attn.split(self.batch_size)).permute(1, 0, 2, 3) source_batch_size = int(attn.shape[1] // bs) self.forward(attn[:, skip * source_batch_size:], is_cross, place_in_unet) def forward(self, attn, is_cross: bool, place_in_unet: str): key = f"{place_in_unet}_{('cross' if is_cross else 'self')}" self.step_store[key].append(attn) def between_steps(self, store_step=True): if store_step: if self.average: if len(self.attention_store) == 0: self.attention_store = self.step_store else: for key in self.attention_store: for i in range(len(self.attention_store[key])): self.attention_store[key][i] += self.step_store[key][i] elif len(self.attention_store) == 0: self.attention_store = [self.step_store] else: self.attention_store.append(self.step_store) self.cur_step += 1 self.step_store = self.get_empty_store() def get_attention(self, step: int): if self.average: attention = {key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store} else: assert step is not None attention = self.attention_store[step] return attention def aggregate_attention(self, attention_maps, prompts, res: Union[int, Tuple[int]], from_where: List[str], is_cross: bool, select: int): out = [[] for x in range(self.batch_size)] if isinstance(res, int): num_pixels = res ** 2 resolution = (res, res) else: num_pixels = res[0] * res[1] resolution = res[:2] for location in from_where: for bs_item in attention_maps[f"{location}_{('cross' if is_cross else 'self')}"]: for (batch, item) in enumerate(bs_item): if item.shape[1] == num_pixels: cross_maps = item.reshape(len(prompts), -1, *resolution, item.shape[-1])[select] out[batch].append(cross_maps) out = torch.stack([torch.cat(x, dim=0) for x in out]) out = out.sum(1) / out.shape[1] return out def __init__(self, average: bool, batch_size=1, max_resolution=16, max_size: int=None): self.step_store = self.get_empty_store() self.attention_store = [] self.cur_step = 0 self.average = average self.batch_size = batch_size if max_size is None: self.max_size = max_resolution ** 2 elif max_size is not None and max_resolution is None: self.max_size = max_size else: raise ValueError('Only allowed to set one of max_resolution or max_size') class LeditsGaussianSmoothing: def __init__(self, device): kernel_size = [3, 3] sigma = [0.5, 0.5] kernel = 1 meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size]) for (size, std, mgrid) in zip(kernel_size, sigma, meshgrids): mean = (size - 1) / 2 kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-((mgrid - mean) / (2 * std)) ** 2) kernel = kernel / torch.sum(kernel) kernel = kernel.view(1, 1, *kernel.size()) kernel = kernel.repeat(1, *[1] * (kernel.dim() - 1)) self.weight = kernel.to(device) def __call__(self, input): return F.conv2d(input, weight=self.weight.to(input.dtype)) class LEDITSCrossAttnProcessor: def __init__(self, attention_store, place_in_unet, pnp, editing_prompts): self.attnstore = attention_store self.place_in_unet = place_in_unet self.editing_prompts = editing_prompts self.pnp = pnp def __call__(self, attn: Attention, hidden_states, encoder_hidden_states, attention_mask=None, temb=None): (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) self.attnstore(attention_probs, is_cross=True, place_in_unet=self.place_in_unet, editing_prompts=self.editing_prompts, PnP=self.pnp) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class LEditsPPPipelineStableDiffusionXL(DiffusionPipeline, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, IPAdapterMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'image_encoder', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'negative_add_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DPMSolverMultistepScheduler, DDIMScheduler], image_encoder: CLIPVisionModelWithProjection=None, feature_extractor: CLIPImageProcessor=None, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) if not isinstance(scheduler, DDIMScheduler) and (not isinstance(scheduler, DPMSolverMultistepScheduler)): self.scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config, algorithm_type='sde-dpmsolver++', solver_order=2) logger.warning('This pipeline only supports DDIMScheduler and DPMSolverMultistepScheduler. The scheduler has been changed to DPMSolverMultistepScheduler.') self.default_sample_size = self.unet.config.sample_size add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.inversion_steps = None def encode_prompt(self, device: Optional[torch.device]=None, num_images_per_prompt: int=1, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None, enable_edit_guidance: bool=True, editing_prompt: Optional[str]=None, editing_prompt_embeds: Optional[torch.Tensor]=None, editing_pooled_prompt_embeds: Optional[torch.Tensor]=None) -> object: device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) batch_size = self.batch_size tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] num_edit_tokens = 0 zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but image inversion has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of the input images.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(negative_prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(negative_pooled_prompt_embeds) if enable_edit_guidance and editing_prompt_embeds is None: editing_prompt_2 = editing_prompt editing_prompts = [editing_prompt, editing_prompt_2] edit_prompt_embeds_list = [] for (editing_prompt, tokenizer, text_encoder) in zip(editing_prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): editing_prompt = self.maybe_convert_prompt(editing_prompt, tokenizer) max_length = negative_prompt_embeds.shape[1] edit_concepts_input = tokenizer(editing_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt', return_length=True) num_edit_tokens = edit_concepts_input.length - 2 edit_concepts_embeds = text_encoder(edit_concepts_input.input_ids.to(device), output_hidden_states=True) editing_pooled_prompt_embeds = edit_concepts_embeds[0] if clip_skip is None: edit_concepts_embeds = edit_concepts_embeds.hidden_states[-2] else: edit_concepts_embeds = edit_concepts_embeds.hidden_states[-(clip_skip + 2)] edit_prompt_embeds_list.append(edit_concepts_embeds) edit_concepts_embeds = torch.concat(edit_prompt_embeds_list, dim=-1) elif not enable_edit_guidance: edit_concepts_embeds = None editing_pooled_prompt_embeds = None negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) (bs_embed, seq_len, _) = negative_prompt_embeds.shape seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if enable_edit_guidance: (bs_embed_edit, seq_len, _) = edit_concepts_embeds.shape edit_concepts_embeds = edit_concepts_embeds.to(dtype=self.text_encoder_2.dtype, device=device) edit_concepts_embeds = edit_concepts_embeds.repeat(1, num_images_per_prompt, 1) edit_concepts_embeds = edit_concepts_embeds.view(bs_embed_edit * num_images_per_prompt, seq_len, -1) negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if enable_edit_guidance: editing_pooled_prompt_embeds = editing_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed_edit * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (negative_prompt_embeds, edit_concepts_embeds, negative_pooled_prompt_embeds, editing_pooled_prompt_embeds, num_edit_tokens) def prepare_extra_step_kwargs(self, eta, generator=None): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, negative_prompt=None, negative_prompt_2=None, negative_prompt_embeds=None, negative_pooled_prompt_embeds=None): if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') def prepare_latents(self, device, latents): latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def num_timesteps(self): return self._num_timesteps def prepare_unet(self, attention_store, PnP: bool=False): attn_procs = {} for name in self.unet.attn_processors.keys(): if name.startswith('mid_block'): place_in_unet = 'mid' elif name.startswith('up_blocks'): place_in_unet = 'up' elif name.startswith('down_blocks'): place_in_unet = 'down' else: continue if 'attn2' in name and place_in_unet != 'mid': attn_procs[name] = LEDITSCrossAttnProcessor(attention_store=attention_store, place_in_unet=place_in_unet, pnp=PnP, editing_prompts=self.enabled_editing_prompts) else: attn_procs[name] = AttnProcessor() self.unet.set_attn_processor(attn_procs) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, denoising_end: Optional[float]=None, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Optional[Tuple[int, int]]=None, editing_prompt: Optional[Union[str, List[str]]]=None, editing_prompt_embeddings: Optional[torch.Tensor]=None, editing_pooled_prompt_embeds: Optional[torch.Tensor]=None, reverse_editing_direction: Optional[Union[bool, List[bool]]]=False, edit_guidance_scale: Optional[Union[float, List[float]]]=5, edit_warmup_steps: Optional[Union[int, List[int]]]=0, edit_cooldown_steps: Optional[Union[int, List[int]]]=None, edit_threshold: Optional[Union[float, List[float]]]=0.9, sem_guidance: Optional[List[torch.Tensor]]=None, use_cross_attn_mask: bool=False, use_intersect_mask: bool=False, user_mask: Optional[torch.Tensor]=None, attn_store_steps: Optional[List[int]]=[], store_averaged_over_steps: bool=True, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): if self.inversion_steps is None: raise ValueError('You need to invert an input image first before calling the pipeline. The `invert` method has to be called beforehand. Edits will always be performed for the last inverted image(s).') eta = self.eta num_images_per_prompt = 1 latents = self.init_latents zs = self.zs self.scheduler.set_timesteps(len(self.scheduler.timesteps)) if use_intersect_mask: use_cross_attn_mask = True if use_cross_attn_mask: self.smoothing = LeditsGaussianSmoothing(self.device) if user_mask is not None: user_mask = user_mask.to(self.device) self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end batch_size = self.batch_size device = self._execution_device if editing_prompt: enable_edit_guidance = True if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] self.enabled_editing_prompts = len(editing_prompt) elif editing_prompt_embeddings is not None: enable_edit_guidance = True self.enabled_editing_prompts = editing_prompt_embeddings.shape[0] else: self.enabled_editing_prompts = 0 enable_edit_guidance = False text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, edit_prompt_embeds, negative_pooled_prompt_embeds, pooled_edit_embeds, num_edit_tokens) = self.encode_prompt(device=device, num_images_per_prompt=num_images_per_prompt, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_embeds=negative_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, enable_edit_guidance=enable_edit_guidance, editing_prompt=editing_prompt, editing_prompt_embeds=editing_prompt_embeddings, editing_pooled_prompt_embeds=editing_pooled_prompt_embeds) timesteps = self.inversion_steps t_to_idx = {int(v): k for (k, v) in enumerate(timesteps)} if use_cross_attn_mask: self.attention_store = LeditsAttentionStore(average=store_averaged_over_steps, batch_size=batch_size, max_size=latents.shape[-2] / 4.0 * (latents.shape[-1] / 4.0), max_resolution=None) self.prepare_unet(self.attention_store) resolution = latents.shape[-2:] att_res = (int(resolution[0] / 4), int(resolution[1] / 4)) latents = self.prepare_latents(device=device, latents=latents) extra_step_kwargs = self.prepare_extra_step_kwargs(eta) if self.text_encoder_2 is None: text_encoder_projection_dim = int(negative_pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_text_embeds = negative_pooled_prompt_embeds add_time_ids = self._get_add_time_ids(self.size, crops_coords_top_left, self.size, dtype=negative_pooled_prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if enable_edit_guidance: prompt_embeds = torch.cat([prompt_embeds, edit_prompt_embeds], dim=0) add_text_embeds = torch.cat([add_text_embeds, pooled_edit_embeds], dim=0) edit_concepts_time_ids = add_time_ids.repeat(edit_prompt_embeds.shape[0], 1) add_time_ids = torch.cat([add_time_ids, edit_concepts_time_ids], dim=0) self.text_cross_attention_maps = [editing_prompt] if isinstance(editing_prompt, str) else editing_prompt prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) if ip_adapter_image is not None: (image_embeds, negative_image_embeds) = self.encode_image(ip_adapter_image, device, num_images_per_prompt) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) image_embeds = image_embeds.to(device) self.sem_guidance = None self.activation_mask = None if self.denoising_end is not None and isinstance(self.denoising_end, float) and (self.denoising_end > 0) and (self.denoising_end < 1): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - self.denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=self._num_timesteps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * (1 + self.enabled_editing_prompts)) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if ip_adapter_image is not None: added_cond_kwargs['image_embeds'] = image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] noise_pred_out = noise_pred.chunk(1 + self.enabled_editing_prompts) noise_pred_uncond = noise_pred_out[0] noise_pred_edit_concepts = noise_pred_out[1:] noise_guidance_edit = torch.zeros(noise_pred_uncond.shape, device=self.device, dtype=noise_pred_uncond.dtype) if sem_guidance is not None and len(sem_guidance) > i: noise_guidance_edit += sem_guidance[i].to(self.device) elif enable_edit_guidance: if self.activation_mask is None: self.activation_mask = torch.zeros((len(timesteps), self.enabled_editing_prompts, *noise_pred_edit_concepts[0].shape)) if self.sem_guidance is None: self.sem_guidance = torch.zeros((len(timesteps), *noise_pred_uncond.shape)) for (c, noise_pred_edit_concept) in enumerate(noise_pred_edit_concepts): if isinstance(edit_warmup_steps, list): edit_warmup_steps_c = edit_warmup_steps[c] else: edit_warmup_steps_c = edit_warmup_steps if i < edit_warmup_steps_c: continue if isinstance(edit_guidance_scale, list): edit_guidance_scale_c = edit_guidance_scale[c] else: edit_guidance_scale_c = edit_guidance_scale if isinstance(edit_threshold, list): edit_threshold_c = edit_threshold[c] else: edit_threshold_c = edit_threshold if isinstance(reverse_editing_direction, list): reverse_editing_direction_c = reverse_editing_direction[c] else: reverse_editing_direction_c = reverse_editing_direction if isinstance(edit_cooldown_steps, list): edit_cooldown_steps_c = edit_cooldown_steps[c] elif edit_cooldown_steps is None: edit_cooldown_steps_c = i + 1 else: edit_cooldown_steps_c = edit_cooldown_steps if i >= edit_cooldown_steps_c: continue noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond if reverse_editing_direction_c: noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c if user_mask is not None: noise_guidance_edit_tmp = noise_guidance_edit_tmp * user_mask if use_cross_attn_mask: out = self.attention_store.aggregate_attention(attention_maps=self.attention_store.step_store, prompts=self.text_cross_attention_maps, res=att_res, from_where=['up', 'down'], is_cross=True, select=self.text_cross_attention_maps.index(editing_prompt[c])) attn_map = out[:, :, :, 1:1 + num_edit_tokens[c]] if attn_map.shape[3] != num_edit_tokens[c]: raise ValueError(f'Incorrect shape of attention_map. Expected size {num_edit_tokens[c]}, but found {attn_map.shape[3]}!') attn_map = torch.sum(attn_map, dim=3) attn_map = F.pad(attn_map.unsqueeze(1), (1, 1, 1, 1), mode='reflect') attn_map = self.smoothing(attn_map).squeeze(1) if attn_map.dtype == torch.float32: tmp = torch.quantile(attn_map.flatten(start_dim=1), edit_threshold_c, dim=1) else: tmp = torch.quantile(attn_map.flatten(start_dim=1).to(torch.float32), edit_threshold_c, dim=1).to(attn_map.dtype) attn_mask = torch.where(attn_map >= tmp.unsqueeze(1).unsqueeze(1).repeat(1, *att_res), 1.0, 0.0) attn_mask = F.interpolate(attn_mask.unsqueeze(1), noise_guidance_edit_tmp.shape[-2:]).repeat(1, 4, 1, 1) self.activation_mask[i, c] = attn_mask.detach().cpu() if not use_intersect_mask: noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask if use_intersect_mask: noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) noise_guidance_edit_tmp_quantile = torch.sum(noise_guidance_edit_tmp_quantile, dim=1, keepdim=True) noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, self.unet.config.in_channels, 1, 1) if noise_guidance_edit_tmp_quantile.dtype == torch.float32: tmp = torch.quantile(noise_guidance_edit_tmp_quantile.flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False) else: tmp = torch.quantile(noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False).to(noise_guidance_edit_tmp_quantile.dtype) intersect_mask = torch.where(noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], torch.ones_like(noise_guidance_edit_tmp), torch.zeros_like(noise_guidance_edit_tmp)) * attn_mask self.activation_mask[i, c] = intersect_mask.detach().cpu() noise_guidance_edit_tmp = noise_guidance_edit_tmp * intersect_mask elif not use_cross_attn_mask: noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) noise_guidance_edit_tmp_quantile = torch.sum(noise_guidance_edit_tmp_quantile, dim=1, keepdim=True) noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, 4, 1, 1) if noise_guidance_edit_tmp_quantile.dtype == torch.float32: tmp = torch.quantile(noise_guidance_edit_tmp_quantile.flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False) else: tmp = torch.quantile(noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False).to(noise_guidance_edit_tmp_quantile.dtype) self.activation_mask[i, c] = torch.where(noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], torch.ones_like(noise_guidance_edit_tmp), torch.zeros_like(noise_guidance_edit_tmp)).detach().cpu() noise_guidance_edit_tmp = torch.where(noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], noise_guidance_edit_tmp, torch.zeros_like(noise_guidance_edit_tmp)) noise_guidance_edit += noise_guidance_edit_tmp self.sem_guidance[i] = noise_guidance_edit.detach().cpu() noise_pred = noise_pred_uncond + noise_guidance_edit if enable_edit_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_edit_concepts.mean(dim=0, keepdim=False), guidance_rescale=self.guidance_rescale) idx = t_to_idx[int(t)] latents = self.scheduler.step(noise_pred, t, latents, variance_noise=zs[idx], **extra_step_kwargs, return_dict=False)[0] if use_cross_attn_mask: store_step = i in attn_store_steps self.attention_store.between_steps(store_step) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) if i == len(timesteps) - 1 or (i + 1 > 0 and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == 'latent': if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return LEditsPPDiffusionPipelineOutput(images=image, nsfw_content_detected=None) @torch.no_grad() def encode_image(self, image, dtype=None, height=None, width=None, resize_mode='default', crops_coords=None): image = self.image_processor.preprocess(image=image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords) resized = self.image_processor.postprocess(image=image, output_type='pil') if max(image.shape[-2:]) > self.vae.config['sample_size'] * 1.5: logger.warning('Your input images far exceed the default resolution of the underlying diffusion model. The output images may contain severe artifacts! Consider down-sampling the input using the `height` and `width` parameters') image = image.to(self.device, dtype=dtype) needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: image = image.float() self.upcast_vae() x0 = self.vae.encode(image).latent_dist.mode() x0 = x0.to(dtype) if needs_upcasting: self.vae.to(dtype=torch.float16) x0 = self.vae.config.scaling_factor * x0 return (x0, resized) @torch.no_grad() def invert(self, image: PipelineImageInput, source_prompt: str='', source_guidance_scale=3.5, negative_prompt: str=None, negative_prompt_2: str=None, num_inversion_steps: int=50, skip: float=0.15, generator: Optional[torch.Generator]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), num_zero_noise_steps: int=3, cross_attention_kwargs: Optional[Dict[str, Any]]=None): self.unet.set_attn_processor(AttnProcessor()) self.eta = 1.0 self.scheduler.config.timestep_spacing = 'leading' self.scheduler.set_timesteps(int(num_inversion_steps * (1 + skip))) self.inversion_steps = self.scheduler.timesteps[-num_inversion_steps:] timesteps = self.inversion_steps num_images_per_prompt = 1 device = self._execution_device if source_prompt == '': source_guidance_scale = 0.0 do_classifier_free_guidance = False else: do_classifier_free_guidance = source_guidance_scale > 1.0 (x0, resized) = self.encode_image(image, dtype=self.text_encoder_2.dtype) width = x0.shape[2] * self.vae_scale_factor height = x0.shape[3] * self.vae_scale_factor self.size = (height, width) self.batch_size = x0.shape[0] text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None if isinstance(source_prompt, str): source_prompt = [source_prompt] * self.batch_size (negative_prompt_embeds, prompt_embeds, negative_pooled_prompt_embeds, edit_pooled_prompt_embeds, _) = self.encode_prompt(device=device, num_images_per_prompt=num_images_per_prompt, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, editing_prompt=source_prompt, lora_scale=text_encoder_lora_scale, enable_edit_guidance=do_classifier_free_guidance) if self.text_encoder_2 is None: text_encoder_projection_dim = int(negative_pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_text_embeds = negative_pooled_prompt_embeds add_time_ids = self._get_add_time_ids(self.size, crops_coords_top_left, self.size, dtype=negative_prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if do_classifier_free_guidance: negative_prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([add_text_embeds, edit_pooled_prompt_embeds], dim=0) add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) negative_prompt_embeds = negative_prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(self.batch_size * num_images_per_prompt, 1) if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: self.upcast_vae() x0_tmp = x0.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image_rec = self.vae.decode(x0_tmp / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] elif self.vae.config.force_upcast: x0_tmp = x0.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image_rec = self.vae.decode(x0_tmp / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] else: image_rec = self.vae.decode(x0 / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] image_rec = self.image_processor.postprocess(image_rec, output_type='pil') variance_noise_shape = (num_inversion_steps, *x0.shape) t_to_idx = {int(v): k for (k, v) in enumerate(timesteps)} xts = torch.zeros(size=variance_noise_shape, device=self.device, dtype=negative_prompt_embeds.dtype) for t in reversed(timesteps): idx = num_inversion_steps - t_to_idx[int(t)] - 1 noise = randn_tensor(shape=x0.shape, generator=generator, device=self.device, dtype=x0.dtype) xts[idx] = self.scheduler.add_noise(x0, noise, t.unsqueeze(0)) xts = torch.cat([x0.unsqueeze(0), xts], dim=0) zs = torch.zeros(size=variance_noise_shape, device=self.device, dtype=negative_prompt_embeds.dtype) self.scheduler.set_timesteps(len(self.scheduler.timesteps)) for t in self.progress_bar(timesteps): idx = num_inversion_steps - t_to_idx[int(t)] - 1 xt = xts[idx + 1] latent_model_input = torch.cat([xt] * 2) if do_classifier_free_guidance else xt latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=negative_prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if do_classifier_free_guidance: noise_pred_out = noise_pred.chunk(2) (noise_pred_uncond, noise_pred_text) = (noise_pred_out[0], noise_pred_out[1]) noise_pred = noise_pred_uncond + source_guidance_scale * (noise_pred_text - noise_pred_uncond) xtm1 = xts[idx] (z, xtm1_corrected) = compute_noise(self.scheduler, xtm1, xt, t, noise_pred, self.eta) zs[idx] = z xts[idx] = xtm1_corrected self.init_latents = xts[-1] zs = zs.flip(0) if num_zero_noise_steps > 0: zs[-num_zero_noise_steps:] = torch.zeros_like(zs[-num_zero_noise_steps:]) self.zs = zs return LEditsPPInversionPipelineOutput(images=resized, vae_reconstruction_images=image_rec) def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def compute_noise_ddim(scheduler, prev_latents, latents, timestep, noise_pred, eta): prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps alpha_prod_t = scheduler.alphas_cumprod[timestep] alpha_prod_t_prev = scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t pred_original_sample = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 if scheduler.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -1, 1) variance = scheduler._get_variance(timestep, prev_timestep) std_dev_t = eta * variance ** 0.5 pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t ** 2) ** 0.5 * noise_pred mu_xt = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if variance > 0.0: noise = (prev_latents - mu_xt) / (variance ** 0.5 * eta) else: noise = torch.tensor([0.0]).to(latents.device) return (noise, mu_xt + eta * variance ** 0.5 * noise) def compute_noise_sde_dpm_pp_2nd(scheduler, prev_latents, latents, timestep, noise_pred, eta): def first_order_update(model_output, sample): (sigma_t, sigma_s) = (scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index]) (alpha_t, sigma_t) = scheduler._sigma_to_alpha_sigma_t(sigma_t) (alpha_s, sigma_s) = scheduler._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s mu_xt = sigma_t / sigma_s * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * model_output mu_xt = scheduler.dpm_solver_first_order_update(model_output=model_output, sample=sample, noise=torch.zeros_like(sample)) sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) if sigma > 0.0: noise = (prev_latents - mu_xt) / sigma else: noise = torch.tensor([0.0]).to(sample.device) prev_sample = mu_xt + sigma * noise return (noise, prev_sample) def second_order_update(model_output_list, sample): (sigma_t, sigma_s0, sigma_s1) = (scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index], scheduler.sigmas[scheduler.step_index - 1]) (alpha_t, sigma_t) = scheduler._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = scheduler._sigma_to_alpha_sigma_t(sigma_s0) (alpha_s1, sigma_s1) = scheduler._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) (m0, m1) = (model_output_list[-1], model_output_list[-2]) (h, h_0) = (lambda_t - lambda_s0, lambda_s0 - lambda_s1) r0 = h_0 / h (D0, D1) = (m0, 1.0 / r0 * (m0 - m1)) mu_xt = sigma_t / sigma_s0 * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) if sigma > 0.0: noise = (prev_latents - mu_xt) / sigma else: noise = torch.tensor([0.0]).to(sample.device) prev_sample = mu_xt + sigma * noise return (noise, prev_sample) if scheduler.step_index is None: scheduler._init_step_index(timestep) model_output = scheduler.convert_model_output(model_output=noise_pred, sample=latents) for i in range(scheduler.config.solver_order - 1): scheduler.model_outputs[i] = scheduler.model_outputs[i + 1] scheduler.model_outputs[-1] = model_output if scheduler.lower_order_nums < 1: (noise, prev_sample) = first_order_update(model_output, latents) else: (noise, prev_sample) = second_order_update(scheduler.model_outputs, latents) if scheduler.lower_order_nums < scheduler.config.solver_order: scheduler.lower_order_nums += 1 scheduler._step_index += 1 return (noise, prev_sample) def compute_noise(scheduler, *args): if isinstance(scheduler, DDIMScheduler): return compute_noise_ddim(scheduler, *args) elif isinstance(scheduler, DPMSolverMultistepScheduler) and scheduler.config.algorithm_type == 'sde-dpmsolver++' and (scheduler.config.solver_order == 2): return compute_noise_sde_dpm_pp_2nd(scheduler, *args) else: raise NotImplementedError # File: diffusers-main/src/diffusers/pipelines/ledits_pp/pipeline_output.py from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image from ...utils import BaseOutput @dataclass class LEditsPPDiffusionPipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] @dataclass class LEditsPPInversionPipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] vae_reconstruction_images: Union[List[PIL.Image.Image], np.ndarray] # File: diffusers-main/src/diffusers/pipelines/lumina/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_lumina'] = ['LuminaText2ImgPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_lumina import LuminaText2ImgPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/lumina/pipeline_lumina.py import html import inspect import math import re import urllib.parse as ul from typing import List, Optional, Tuple, Union import torch from transformers import AutoModel, AutoTokenizer from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL from ...models.embeddings import get_2d_rotary_pos_embed_lumina from ...models.transformers.lumina_nextdit2d import LuminaNextDiT2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import BACKENDS_MAPPING, is_bs4_available, is_ftfy_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import LuminaText2ImgPipeline\n\n >>> pipe = LuminaText2ImgPipeline.from_pretrained(\n ... "Alpha-VLLM/Lumina-Next-SFT-diffusers", torch_dtype=torch.bfloat16\n ... )\n >>> # Enable memory optimizations.\n >>> pipe.enable_model_cpu_offload()\n\n >>> prompt = "Upper body of a young woman in a Victorian-era outfit with brass goggles and leather straps. Background shows an industrial revolution cityscape with smoky skies and tall, metal structures"\n >>> image = pipe(prompt).images[0]\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class LuminaText2ImgPipeline(DiffusionPipeline): bad_punct_regex = re.compile('[' + '#®•©™&@·º½¾¿¡§~' + '\\)' + '\\(' + '\\]' + '\\[' + '\\}' + '\\{' + '\\|' + '\\' + '\\/' + '\\*' + ']{1,}') _optional_components = [] model_cpu_offload_seq = 'text_encoder->transformer->vae' def __init__(self, transformer: LuminaNextDiT2DModel, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: AutoModel, tokenizer: AutoTokenizer): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.max_sequence_length = 256 self.default_sample_size = self.transformer.config.sample_size if hasattr(self, 'transformer') and self.transformer is not None else 128 self.default_image_size = self.default_sample_size * self.vae_scale_factor def _get_gemma_prompt_embeds(self, prompt: Union[str, List[str]], num_images_per_prompt: int=1, device: Optional[torch.device]=None, clean_caption: Optional[bool]=False, max_length: Optional[int]=None): device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer(prompt, pad_to_multiple_of=8, max_length=self.max_sequence_length, truncation=True, padding=True, return_tensors='pt') text_input_ids = text_inputs.input_ids.to(device) untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids.to(device) if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.max_sequence_length - 1:-1]) logger.warning(f'The following part of your input was truncated because Gemma can only handle sequences up to {self.max_sequence_length} tokens: {removed_text}') prompt_attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds.hidden_states[-2] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) prompt_attention_mask = prompt_attention_mask.view(batch_size * num_images_per_prompt, -1) return (prompt_embeds, prompt_attention_mask) def encode_prompt(self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool=True, negative_prompt: Union[str, List[str]]=None, num_images_per_prompt: int=1, device: Optional[torch.device]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, clean_caption: bool=False, **kwargs): if device is None: device = self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: (prompt_embeds, prompt_attention_mask) = self._get_gemma_prompt_embeds(prompt=prompt, num_images_per_prompt=num_images_per_prompt, device=device, clean_caption=clean_caption) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt if negative_prompt is not None else '' negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): negative_prompt = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') prompt_max_length = prompt_embeds.shape[1] negative_text_inputs = self.tokenizer(negative_prompt, padding='max_length', max_length=prompt_max_length, truncation=True, return_tensors='pt') negative_text_input_ids = negative_text_inputs.input_ids.to(device) negative_prompt_attention_mask = negative_text_inputs.attention_mask.to(device) negative_prompt_embeds = self.text_encoder(negative_text_input_ids, attention_mask=negative_prompt_attention_mask, output_hidden_states=True) negative_dtype = self.text_encoder.dtype negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] (_, seq_len, _) = negative_prompt_embeds.shape negative_prompt_embeds = negative_prompt_embeds.to(dtype=negative_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) negative_prompt_attention_mask = negative_prompt_attention_mask.view(batch_size * num_images_per_prompt, -1) return (prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, negative_prompt, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError('Must provide `prompt_attention_mask` when specifying `prompt_embeds`.') if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError('Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError(f'`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask` {negative_prompt_attention_mask.shape}.') def _text_preprocessing(self, text, clean_caption=False): if clean_caption and (not is_bs4_available()): logger.warning(BACKENDS_MAPPING['bs4'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if clean_caption and (not is_ftfy_available()): logger.warning(BACKENDS_MAPPING['ftfy'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub('', 'person', caption) caption = re.sub('\\b((?:https?:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = re.sub('\\b((?:www:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = BeautifulSoup(caption, features='html.parser').text caption = re.sub('@[\\w\\d]+\\b', '', caption) caption = re.sub('[\\u31c0-\\u31ef]+', '', caption) caption = re.sub('[\\u31f0-\\u31ff]+', '', caption) caption = re.sub('[\\u3200-\\u32ff]+', '', caption) caption = re.sub('[\\u3300-\\u33ff]+', '', caption) caption = re.sub('[\\u3400-\\u4dbf]+', '', caption) caption = re.sub('[\\u4dc0-\\u4dff]+', '', caption) caption = re.sub('[\\u4e00-\\u9fff]+', '', caption) caption = re.sub('[\\u002D\\u058A\\u05BE\\u1400\\u1806\\u2010-\\u2015\\u2E17\\u2E1A\\u2E3A\\u2E3B\\u2E40\\u301C\\u3030\\u30A0\\uFE31\\uFE32\\uFE58\\uFE63\\uFF0D]+', '-', caption) caption = re.sub('[`´«»“”¨]', '"', caption) caption = re.sub('[‘’]', "'", caption) caption = re.sub('"?', '', caption) caption = re.sub('&', '', caption) caption = re.sub('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', ' ', caption) caption = re.sub('\\d:\\d\\d\\s+$', '', caption) caption = re.sub('\\\\n', ' ', caption) caption = re.sub('#\\d{1,3}\\b', '', caption) caption = re.sub('#\\d{5,}\\b', '', caption) caption = re.sub('\\b\\d{6,}\\b', '', caption) caption = re.sub('[\\S]+\\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption) caption = re.sub('[\\"\\\']{2,}', '"', caption) caption = re.sub('[\\.]{2,}', ' ', caption) caption = re.sub(self.bad_punct_regex, ' ', caption) caption = re.sub('\\s+\\.\\s+', ' ', caption) regex2 = re.compile('(?:\\-|\\_)') if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, ' ', caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub('\\b[a-zA-Z]{1,3}\\d{3,15}\\b', '', caption) caption = re.sub('\\b[a-zA-Z]+\\d+[a-zA-Z]+\\b', '', caption) caption = re.sub('\\b\\d+[a-zA-Z]+\\d+\\b', '', caption) caption = re.sub('(worldwide\\s+)?(free\\s+)?shipping', '', caption) caption = re.sub('(free\\s)?download(\\sfree)?', '', caption) caption = re.sub('\\bclick\\b\\s(?:for|on)\\s\\w+', '', caption) caption = re.sub('\\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\\simage[s]?)?', '', caption) caption = re.sub('\\bpage\\s+\\d+\\b', '', caption) caption = re.sub('\\b\\d*[a-zA-Z]+\\d+[a-zA-Z]+\\d+[a-zA-Z\\d]*\\b', ' ', caption) caption = re.sub('\\b\\d+\\.?\\d*[xх×]\\d+\\.?\\d*\\b', '', caption) caption = re.sub('\\b\\s+\\:\\s+', ': ', caption) caption = re.sub('(\\D[,\\./])\\b', '\\1 ', caption) caption = re.sub('\\s+', ' ', caption) caption.strip() caption = re.sub('^[\\"\\\']([\\w\\W]+)[\\"\\\']$', '\\1', caption) caption = re.sub("^[\\'\\_,\\-\\:;]", '', caption) caption = re.sub("[\\'\\_,\\-\\:\\-\\+]$", '', caption) caption = re.sub('^\\.\\S+$', '', caption) return caption.strip() def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, width: Optional[int]=None, height: Optional[int]=None, num_inference_steps: int=30, timesteps: List[int]=None, guidance_scale: float=4.0, negative_prompt: Union[str, List[str]]=None, sigmas: List[float]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, clean_caption: bool=True, max_sequence_length: int=256, scaling_watershed: Optional[float]=1.0, proportional_attn: Optional[bool]=True) -> Union[ImagePipelineOutput, Tuple]: height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask) cross_attention_kwargs = {} if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if proportional_attn: cross_attention_kwargs['base_sequence_length'] = (self.default_image_size // 16) ** 2 scaling_factor = math.sqrt(width * height / self.default_image_size ** 2) device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask) = self.encode_prompt(prompt, do_classifier_free_guidance, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, clean_caption=clean_caption, max_sequence_length=max_sequence_length) if do_classifier_free_guidance: prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds], dim=0) prompt_attention_mask = torch.cat([prompt_attention_mask, negative_prompt_attention_mask], dim=0) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) latent_channels = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, latent_channels, height, width, prompt_embeds.dtype, device, generator, latents) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents current_timestep = t if not torch.is_tensor(current_timestep): is_mps = latent_model_input.device.type == 'mps' if isinstance(current_timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) elif len(current_timestep.shape) == 0: current_timestep = current_timestep[None].to(latent_model_input.device) current_timestep = current_timestep.expand(latent_model_input.shape[0]) current_timestep = 1 - current_timestep / self.scheduler.config.num_train_timesteps if current_timestep[0] < scaling_watershed: linear_factor = scaling_factor ntk_factor = 1.0 else: linear_factor = 1.0 ntk_factor = scaling_factor image_rotary_emb = get_2d_rotary_pos_embed_lumina(self.transformer.head_dim, 384, 384, linear_factor=linear_factor, ntk_factor=ntk_factor) noise_pred = self.transformer(hidden_states=latent_model_input, timestep=current_timestep, encoder_hidden_states=prompt_embeds, encoder_mask=prompt_attention_mask, image_rotary_emb=image_rotary_emb, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] noise_pred = noise_pred.chunk(2, dim=1)[0] if do_classifier_free_guidance: (noise_pred_eps, noise_pred_rest) = (noise_pred[:, :3], noise_pred[:, 3:]) (noise_pred_cond_eps, noise_pred_uncond_eps) = torch.split(noise_pred_eps, len(noise_pred_eps) // 2, dim=0) noise_pred_half = noise_pred_uncond_eps + guidance_scale * (noise_pred_cond_eps - noise_pred_uncond_eps) noise_pred_eps = torch.cat([noise_pred_half, noise_pred_half], dim=0) noise_pred = torch.cat([noise_pred_eps, noise_pred_rest], dim=1) (noise_pred, _) = noise_pred.chunk(2, dim=0) latents_dtype = latents.dtype noise_pred = -noise_pred latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) progress_bar.update() if not output_type == 'latent': latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) else: image = latents self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/marigold/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['marigold_image_processing'] = ['MarigoldImageProcessor'] _import_structure['pipeline_marigold_depth'] = ['MarigoldDepthOutput', 'MarigoldDepthPipeline'] _import_structure['pipeline_marigold_normals'] = ['MarigoldNormalsOutput', 'MarigoldNormalsPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .marigold_image_processing import MarigoldImageProcessor from .pipeline_marigold_depth import MarigoldDepthOutput, MarigoldDepthPipeline from .pipeline_marigold_normals import MarigoldNormalsOutput, MarigoldNormalsPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/marigold/marigold_image_processing.py from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.nn.functional as F from PIL import Image from ... import ConfigMixin from ...configuration_utils import register_to_config from ...image_processor import PipelineImageInput from ...utils import CONFIG_NAME, logging from ...utils.import_utils import is_matplotlib_available logger = logging.get_logger(__name__) class MarigoldImageProcessor(ConfigMixin): config_name = CONFIG_NAME @register_to_config def __init__(self, vae_scale_factor: int=8, do_normalize: bool=True, do_range_check: bool=True): super().__init__() @staticmethod def expand_tensor_or_array(images: Union[torch.Tensor, np.ndarray]) -> Union[torch.Tensor, np.ndarray]: if isinstance(images, np.ndarray): if images.ndim == 2: images = images[None, ..., None] if images.ndim == 3: images = images[None] elif isinstance(images, torch.Tensor): if images.ndim == 2: images = images[None, None] elif images.ndim == 3: images = images[None] else: raise ValueError(f'Unexpected input type: {type(images)}') return images @staticmethod def pt_to_numpy(images: torch.Tensor) -> np.ndarray: images = images.cpu().permute(0, 2, 3, 1).float().numpy() return images @staticmethod def numpy_to_pt(images: np.ndarray) -> torch.Tensor: if np.issubdtype(images.dtype, np.integer) and (not np.issubdtype(images.dtype, np.unsignedinteger)): raise ValueError(f'Input image dtype={images.dtype} cannot be a signed integer.') if np.issubdtype(images.dtype, np.complexfloating): raise ValueError(f'Input image dtype={images.dtype} cannot be complex.') if np.issubdtype(images.dtype, bool): raise ValueError(f'Input image dtype={images.dtype} cannot be boolean.') images = torch.from_numpy(images.transpose(0, 3, 1, 2)) return images @staticmethod def resize_antialias(image: torch.Tensor, size: Tuple[int, int], mode: str, is_aa: Optional[bool]=None) -> torch.Tensor: if not torch.is_tensor(image): raise ValueError(f'Invalid input type={type(image)}.') if not torch.is_floating_point(image): raise ValueError(f'Invalid input dtype={image.dtype}.') if image.dim() != 4: raise ValueError(f'Invalid input dimensions; shape={image.shape}.') antialias = is_aa and mode in ('bilinear', 'bicubic') image = F.interpolate(image, size, mode=mode, antialias=antialias) return image @staticmethod def resize_to_max_edge(image: torch.Tensor, max_edge_sz: int, mode: str) -> torch.Tensor: if not torch.is_tensor(image): raise ValueError(f'Invalid input type={type(image)}.') if not torch.is_floating_point(image): raise ValueError(f'Invalid input dtype={image.dtype}.') if image.dim() != 4: raise ValueError(f'Invalid input dimensions; shape={image.shape}.') (h, w) = image.shape[-2:] max_orig = max(h, w) new_h = h * max_edge_sz // max_orig new_w = w * max_edge_sz // max_orig if new_h == 0 or new_w == 0: raise ValueError(f'Extreme aspect ratio of the input image: [{w} x {h}]') image = MarigoldImageProcessor.resize_antialias(image, (new_h, new_w), mode, is_aa=True) return image @staticmethod def pad_image(image: torch.Tensor, align: int) -> Tuple[torch.Tensor, Tuple[int, int]]: if not torch.is_tensor(image): raise ValueError(f'Invalid input type={type(image)}.') if not torch.is_floating_point(image): raise ValueError(f'Invalid input dtype={image.dtype}.') if image.dim() != 4: raise ValueError(f'Invalid input dimensions; shape={image.shape}.') (h, w) = image.shape[-2:] (ph, pw) = (-h % align, -w % align) image = F.pad(image, (0, pw, 0, ph), mode='replicate') return (image, (ph, pw)) @staticmethod def unpad_image(image: torch.Tensor, padding: Tuple[int, int]) -> torch.Tensor: if not torch.is_tensor(image): raise ValueError(f'Invalid input type={type(image)}.') if not torch.is_floating_point(image): raise ValueError(f'Invalid input dtype={image.dtype}.') if image.dim() != 4: raise ValueError(f'Invalid input dimensions; shape={image.shape}.') (ph, pw) = padding uh = None if ph == 0 else -ph uw = None if pw == 0 else -pw image = image[:, :, :uh, :uw] return image @staticmethod def load_image_canonical(image: Union[torch.Tensor, np.ndarray, Image.Image], device: torch.device=torch.device('cpu'), dtype: torch.dtype=torch.float32) -> Tuple[torch.Tensor, int]: if isinstance(image, Image.Image): image = np.array(image) image_dtype_max = None if isinstance(image, (np.ndarray, torch.Tensor)): image = MarigoldImageProcessor.expand_tensor_or_array(image) if image.ndim != 4: raise ValueError('Input image is not 2-, 3-, or 4-dimensional.') if isinstance(image, np.ndarray): if np.issubdtype(image.dtype, np.integer) and (not np.issubdtype(image.dtype, np.unsignedinteger)): raise ValueError(f'Input image dtype={image.dtype} cannot be a signed integer.') if np.issubdtype(image.dtype, np.complexfloating): raise ValueError(f'Input image dtype={image.dtype} cannot be complex.') if np.issubdtype(image.dtype, bool): raise ValueError(f'Input image dtype={image.dtype} cannot be boolean.') if np.issubdtype(image.dtype, np.unsignedinteger): image_dtype_max = np.iinfo(image.dtype).max image = image.astype(np.float32) image = MarigoldImageProcessor.numpy_to_pt(image) if torch.is_tensor(image) and (not torch.is_floating_point(image)) and (image_dtype_max is None): if image.dtype != torch.uint8: raise ValueError(f'Image dtype={image.dtype} is not supported.') image_dtype_max = 255 if not torch.is_tensor(image): raise ValueError(f'Input type unsupported: {type(image)}.') if image.shape[1] == 1: image = image.repeat(1, 3, 1, 1) if image.shape[1] != 3: raise ValueError(f'Input image is not 1- or 3-channel: {image.shape}.') image = image.to(device=device, dtype=dtype) if image_dtype_max is not None: image = image / image_dtype_max return image @staticmethod def check_image_values_range(image: torch.Tensor) -> None: if not torch.is_tensor(image): raise ValueError(f'Invalid input type={type(image)}.') if not torch.is_floating_point(image): raise ValueError(f'Invalid input dtype={image.dtype}.') if image.min().item() < 0.0 or image.max().item() > 1.0: raise ValueError('Input image data is partially outside of the [0,1] range.') def preprocess(self, image: PipelineImageInput, processing_resolution: Optional[int]=None, resample_method_input: str='bilinear', device: torch.device=torch.device('cpu'), dtype: torch.dtype=torch.float32): if isinstance(image, list): images = None for (i, img) in enumerate(image): img = self.load_image_canonical(img, device, dtype) if images is None: images = img else: if images.shape[2:] != img.shape[2:]: raise ValueError(f'Input image[{i}] has incompatible dimensions {img.shape[2:]} with the previous images {images.shape[2:]}') images = torch.cat((images, img), dim=0) image = images del images else: image = self.load_image_canonical(image, device, dtype) original_resolution = image.shape[2:] if self.config.do_range_check: self.check_image_values_range(image) if self.config.do_normalize: image = image * 2.0 - 1.0 if processing_resolution is not None and processing_resolution > 0: image = self.resize_to_max_edge(image, processing_resolution, resample_method_input) (image, padding) = self.pad_image(image, self.config.vae_scale_factor) return (image, padding, original_resolution) @staticmethod def colormap(image: Union[np.ndarray, torch.Tensor], cmap: str='Spectral', bytes: bool=False, _force_method: Optional[str]=None) -> Union[np.ndarray, torch.Tensor]: if not (torch.is_tensor(image) or isinstance(image, np.ndarray)): raise ValueError('Argument must be a numpy array or torch tensor.') if _force_method not in (None, 'matplotlib', 'custom'): raise ValueError("_force_method must be either `None`, `'matplotlib'` or `'custom'`.") supported_cmaps = {'binary': [(1.0, 1.0, 1.0), (0.0, 0.0, 0.0)], 'Spectral': [(0.6196078431372549, 0.00392156862745098, 0.25882352941176473), (0.8352941176470589, 0.24313725490196078, 0.30980392156862746), (0.9568627450980393, 0.42745098039215684, 0.2627450980392157), (0.9921568627450981, 0.6823529411764706, 0.3803921568627451), (0.996078431372549, 0.8784313725490196, 0.5450980392156862), (1.0, 1.0, 0.7490196078431373), (0.9019607843137255, 0.9607843137254902, 0.596078431372549), (0.6705882352941176, 0.8666666666666667, 0.6431372549019608), (0.4, 0.7607843137254902, 0.6470588235294118), (0.19607843137254902, 0.5333333333333333, 0.7411764705882353), (0.3686274509803922, 0.30980392156862746, 0.6352941176470588)]} def method_matplotlib(image, cmap, bytes=False): if is_matplotlib_available(): import matplotlib else: return None (arg_is_pt, device) = (torch.is_tensor(image), None) if arg_is_pt: (image, device) = (image.cpu().numpy(), image.device) if cmap not in matplotlib.colormaps: raise ValueError(f"Unexpected color map {cmap}; available options are: {', '.join(list(matplotlib.colormaps.keys()))}") cmap = matplotlib.colormaps[cmap] out = cmap(image, bytes=bytes) out = out[..., :3] if arg_is_pt: out = torch.tensor(out, device=device) return out def method_custom(image, cmap, bytes=False): arg_is_np = isinstance(image, np.ndarray) if arg_is_np: image = torch.tensor(image) if image.dtype == torch.uint8: image = image.float() / 255 else: image = image.float() is_cmap_reversed = cmap.endswith('_r') if is_cmap_reversed: cmap = cmap[:-2] if cmap not in supported_cmaps: raise ValueError(f'Only {list(supported_cmaps.keys())} color maps are available without installing matplotlib.') cmap = supported_cmaps[cmap] if is_cmap_reversed: cmap = cmap[::-1] cmap = torch.tensor(cmap, dtype=torch.float, device=image.device) K = cmap.shape[0] pos = image.clamp(min=0, max=1) * (K - 1) left = pos.long() right = (left + 1).clamp(max=K - 1) d = (pos - left.float()).unsqueeze(-1) left_colors = cmap[left] right_colors = cmap[right] out = (1 - d) * left_colors + d * right_colors if bytes: out = (out * 255).to(torch.uint8) if arg_is_np: out = out.numpy() return out if _force_method is None and torch.is_tensor(image) and (cmap == 'Spectral'): return method_custom(image, cmap, bytes) out = None if _force_method != 'custom': out = method_matplotlib(image, cmap, bytes) if _force_method == 'matplotlib' and out is None: raise ImportError("Make sure to install matplotlib if you want to use a color map other than 'Spectral'.") if out is None: out = method_custom(image, cmap, bytes) return out @staticmethod def visualize_depth(depth: Union[PIL.Image.Image, np.ndarray, torch.Tensor, List[PIL.Image.Image], List[np.ndarray], List[torch.Tensor]], val_min: float=0.0, val_max: float=1.0, color_map: str='Spectral') -> Union[PIL.Image.Image, List[PIL.Image.Image]]: if val_max <= val_min: raise ValueError(f'Invalid values range: [{val_min}, {val_max}].') def visualize_depth_one(img, idx=None): prefix = 'Depth' + (f'[{idx}]' if idx else '') if isinstance(img, PIL.Image.Image): if img.mode != 'I;16': raise ValueError(f'{prefix}: invalid PIL mode={img.mode}.') img = np.array(img).astype(np.float32) / (2 ** 16 - 1) if isinstance(img, np.ndarray) or torch.is_tensor(img): if img.ndim != 2: raise ValueError(f'{prefix}: unexpected shape={img.shape}.') if isinstance(img, np.ndarray): img = torch.from_numpy(img) if not torch.is_floating_point(img): raise ValueError(f'{prefix}: unexected dtype={img.dtype}.') else: raise ValueError(f'{prefix}: unexpected type={type(img)}.') if val_min != 0.0 or val_max != 1.0: img = (img - val_min) / (val_max - val_min) img = MarigoldImageProcessor.colormap(img, cmap=color_map, bytes=True) img = PIL.Image.fromarray(img.cpu().numpy()) return img if depth is None or (isinstance(depth, list) and any((o is None for o in depth))): raise ValueError('Input depth is `None`') if isinstance(depth, (np.ndarray, torch.Tensor)): depth = MarigoldImageProcessor.expand_tensor_or_array(depth) if isinstance(depth, np.ndarray): depth = MarigoldImageProcessor.numpy_to_pt(depth) if not (depth.ndim == 4 and depth.shape[1] == 1): raise ValueError(f'Unexpected input shape={depth.shape}, expecting [N,1,H,W].') return [visualize_depth_one(img[0], idx) for (idx, img) in enumerate(depth)] elif isinstance(depth, list): return [visualize_depth_one(img, idx) for (idx, img) in enumerate(depth)] else: raise ValueError(f'Unexpected input type: {type(depth)}') @staticmethod def export_depth_to_16bit_png(depth: Union[np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor]], val_min: float=0.0, val_max: float=1.0) -> Union[PIL.Image.Image, List[PIL.Image.Image]]: def export_depth_to_16bit_png_one(img, idx=None): prefix = 'Depth' + (f'[{idx}]' if idx else '') if not isinstance(img, np.ndarray) and (not torch.is_tensor(img)): raise ValueError(f'{prefix}: unexpected type={type(img)}.') if img.ndim != 2: raise ValueError(f'{prefix}: unexpected shape={img.shape}.') if torch.is_tensor(img): img = img.cpu().numpy() if not np.issubdtype(img.dtype, np.floating): raise ValueError(f'{prefix}: unexected dtype={img.dtype}.') if val_min != 0.0 or val_max != 1.0: img = (img - val_min) / (val_max - val_min) img = (img * (2 ** 16 - 1)).astype(np.uint16) img = PIL.Image.fromarray(img, mode='I;16') return img if depth is None or (isinstance(depth, list) and any((o is None for o in depth))): raise ValueError('Input depth is `None`') if isinstance(depth, (np.ndarray, torch.Tensor)): depth = MarigoldImageProcessor.expand_tensor_or_array(depth) if isinstance(depth, np.ndarray): depth = MarigoldImageProcessor.numpy_to_pt(depth) if not (depth.ndim == 4 and depth.shape[1] == 1): raise ValueError(f'Unexpected input shape={depth.shape}, expecting [N,1,H,W].') return [export_depth_to_16bit_png_one(img[0], idx) for (idx, img) in enumerate(depth)] elif isinstance(depth, list): return [export_depth_to_16bit_png_one(img, idx) for (idx, img) in enumerate(depth)] else: raise ValueError(f'Unexpected input type: {type(depth)}') @staticmethod def visualize_normals(normals: Union[np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor]], flip_x: bool=False, flip_y: bool=False, flip_z: bool=False) -> Union[PIL.Image.Image, List[PIL.Image.Image]]: flip_vec = None if any((flip_x, flip_y, flip_z)): flip_vec = torch.tensor([(-1) ** flip_x, (-1) ** flip_y, (-1) ** flip_z], dtype=torch.float32) def visualize_normals_one(img, idx=None): img = img.permute(1, 2, 0) if flip_vec is not None: img *= flip_vec.to(img.device) img = (img + 1.0) * 0.5 img = (img * 255).to(dtype=torch.uint8, device='cpu').numpy() img = PIL.Image.fromarray(img) return img if normals is None or (isinstance(normals, list) and any((o is None for o in normals))): raise ValueError('Input normals is `None`') if isinstance(normals, (np.ndarray, torch.Tensor)): normals = MarigoldImageProcessor.expand_tensor_or_array(normals) if isinstance(normals, np.ndarray): normals = MarigoldImageProcessor.numpy_to_pt(normals) if not (normals.ndim == 4 and normals.shape[1] == 3): raise ValueError(f'Unexpected input shape={normals.shape}, expecting [N,3,H,W].') return [visualize_normals_one(img, idx) for (idx, img) in enumerate(normals)] elif isinstance(normals, list): return [visualize_normals_one(img, idx) for (idx, img) in enumerate(normals)] else: raise ValueError(f'Unexpected input type: {type(normals)}') @staticmethod def visualize_uncertainty(uncertainty: Union[np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor]], saturation_percentile=95) -> Union[PIL.Image.Image, List[PIL.Image.Image]]: def visualize_uncertainty_one(img, idx=None): prefix = 'Uncertainty' + (f'[{idx}]' if idx else '') if img.min() < 0: raise ValueError(f'{prefix}: unexected data range, min={img.min()}.') img = img.squeeze(0).cpu().numpy() saturation_value = np.percentile(img, saturation_percentile) img = np.clip(img * 255 / saturation_value, 0, 255) img = img.astype(np.uint8) img = PIL.Image.fromarray(img) return img if uncertainty is None or (isinstance(uncertainty, list) and any((o is None for o in uncertainty))): raise ValueError('Input uncertainty is `None`') if isinstance(uncertainty, (np.ndarray, torch.Tensor)): uncertainty = MarigoldImageProcessor.expand_tensor_or_array(uncertainty) if isinstance(uncertainty, np.ndarray): uncertainty = MarigoldImageProcessor.numpy_to_pt(uncertainty) if not (uncertainty.ndim == 4 and uncertainty.shape[1] == 1): raise ValueError(f'Unexpected input shape={uncertainty.shape}, expecting [N,1,H,W].') return [visualize_uncertainty_one(img, idx) for (idx, img) in enumerate(uncertainty)] elif isinstance(uncertainty, list): return [visualize_uncertainty_one(img, idx) for (idx, img) in enumerate(uncertainty)] else: raise ValueError(f'Unexpected input type: {type(uncertainty)}') # File: diffusers-main/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py from dataclasses import dataclass from functools import partial from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch from PIL import Image from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from ...image_processor import PipelineImageInput from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import DDIMScheduler, LCMScheduler from ...utils import BaseOutput, logging, replace_example_docstring from ...utils.import_utils import is_scipy_available from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .marigold_image_processing import MarigoldImageProcessor logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\nExamples:\n```py\n>>> import diffusers\n>>> import torch\n\n>>> pipe = diffusers.MarigoldDepthPipeline.from_pretrained(\n... "prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16\n... ).to("cuda")\n\n>>> image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")\n>>> depth = pipe(image)\n\n>>> vis = pipe.image_processor.visualize_depth(depth.prediction)\n>>> vis[0].save("einstein_depth.png")\n\n>>> depth_16bit = pipe.image_processor.export_depth_to_16bit_png(depth.prediction)\n>>> depth_16bit[0].save("einstein_depth_16bit.png")\n```\n' @dataclass class MarigoldDepthOutput(BaseOutput): prediction: Union[np.ndarray, torch.Tensor] uncertainty: Union[None, np.ndarray, torch.Tensor] latent: Union[None, torch.Tensor] class MarigoldDepthPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->unet->vae' supported_prediction_types = ('depth', 'disparity') def __init__(self, unet: UNet2DConditionModel, vae: AutoencoderKL, scheduler: Union[DDIMScheduler, LCMScheduler], text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, prediction_type: Optional[str]=None, scale_invariant: Optional[bool]=True, shift_invariant: Optional[bool]=True, default_denoising_steps: Optional[int]=None, default_processing_resolution: Optional[int]=None): super().__init__() if prediction_type not in self.supported_prediction_types: logger.warning(f"Potentially unsupported `prediction_type='{prediction_type}'`; values supported by the pipeline: {self.supported_prediction_types}.") self.register_modules(unet=unet, vae=vae, scheduler=scheduler, text_encoder=text_encoder, tokenizer=tokenizer) self.register_to_config(prediction_type=prediction_type, scale_invariant=scale_invariant, shift_invariant=shift_invariant, default_denoising_steps=default_denoising_steps, default_processing_resolution=default_processing_resolution) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.scale_invariant = scale_invariant self.shift_invariant = shift_invariant self.default_denoising_steps = default_denoising_steps self.default_processing_resolution = default_processing_resolution self.empty_text_embedding = None self.image_processor = MarigoldImageProcessor(vae_scale_factor=self.vae_scale_factor) def check_inputs(self, image: PipelineImageInput, num_inference_steps: int, ensemble_size: int, processing_resolution: int, resample_method_input: str, resample_method_output: str, batch_size: int, ensembling_kwargs: Optional[Dict[str, Any]], latents: Optional[torch.Tensor], generator: Optional[Union[torch.Generator, List[torch.Generator]]], output_type: str, output_uncertainty: bool) -> int: if num_inference_steps is None: raise ValueError('`num_inference_steps` is not specified and could not be resolved from the model config.') if num_inference_steps < 1: raise ValueError('`num_inference_steps` must be positive.') if ensemble_size < 1: raise ValueError('`ensemble_size` must be positive.') if ensemble_size == 2: logger.warning('`ensemble_size` == 2 results are similar to no ensembling (1); consider increasing the value to at least 3.') if ensemble_size > 1 and (self.scale_invariant or self.shift_invariant) and (not is_scipy_available()): raise ImportError('Make sure to install scipy if you want to use ensembling.') if ensemble_size == 1 and output_uncertainty: raise ValueError('Computing uncertainty by setting `output_uncertainty=True` also requires setting `ensemble_size` greater than 1.') if processing_resolution is None: raise ValueError('`processing_resolution` is not specified and could not be resolved from the model config.') if processing_resolution < 0: raise ValueError('`processing_resolution` must be non-negative: 0 for native resolution, or any positive value for downsampled processing.') if processing_resolution % self.vae_scale_factor != 0: raise ValueError(f'`processing_resolution` must be a multiple of {self.vae_scale_factor}.') if resample_method_input not in ('nearest', 'nearest-exact', 'bilinear', 'bicubic', 'area'): raise ValueError('`resample_method_input` takes string values compatible with PIL library: nearest, nearest-exact, bilinear, bicubic, area.') if resample_method_output not in ('nearest', 'nearest-exact', 'bilinear', 'bicubic', 'area'): raise ValueError('`resample_method_output` takes string values compatible with PIL library: nearest, nearest-exact, bilinear, bicubic, area.') if batch_size < 1: raise ValueError('`batch_size` must be positive.') if output_type not in ['pt', 'np']: raise ValueError('`output_type` must be one of `pt` or `np`.') if latents is not None and generator is not None: raise ValueError('`latents` and `generator` cannot be used together.') if ensembling_kwargs is not None: if not isinstance(ensembling_kwargs, dict): raise ValueError('`ensembling_kwargs` must be a dictionary.') if 'reduction' in ensembling_kwargs and ensembling_kwargs['reduction'] not in ('mean', 'median'): raise ValueError("`ensembling_kwargs['reduction']` can be either `'mean'` or `'median'`.") num_images = 0 (W, H) = (None, None) if not isinstance(image, list): image = [image] for (i, img) in enumerate(image): if isinstance(img, np.ndarray) or torch.is_tensor(img): if img.ndim not in (2, 3, 4): raise ValueError(f'`image[{i}]` has unsupported dimensions or shape: {img.shape}.') (H_i, W_i) = img.shape[-2:] N_i = 1 if img.ndim == 4: N_i = img.shape[0] elif isinstance(img, Image.Image): (W_i, H_i) = img.size N_i = 1 else: raise ValueError(f'Unsupported `image[{i}]` type: {type(img)}.') if W is None: (W, H) = (W_i, H_i) elif (W, H) != (W_i, H_i): raise ValueError(f'Input `image[{i}]` has incompatible dimensions {(W_i, H_i)} with the previous images {(W, H)}') num_images += N_i if latents is not None: if not torch.is_tensor(latents): raise ValueError('`latents` must be a torch.Tensor.') if latents.dim() != 4: raise ValueError(f'`latents` has unsupported dimensions or shape: {latents.shape}.') if processing_resolution > 0: max_orig = max(H, W) new_H = H * processing_resolution // max_orig new_W = W * processing_resolution // max_orig if new_H == 0 or new_W == 0: raise ValueError(f'Extreme aspect ratio of the input image: [{W} x {H}]') (W, H) = (new_W, new_H) w = (W + self.vae_scale_factor - 1) // self.vae_scale_factor h = (H + self.vae_scale_factor - 1) // self.vae_scale_factor shape_expected = (num_images * ensemble_size, self.vae.config.latent_channels, h, w) if latents.shape != shape_expected: raise ValueError(f'`latents` has unexpected shape={latents.shape} expected={shape_expected}.') if generator is not None: if isinstance(generator, list): if len(generator) != num_images * ensemble_size: raise ValueError('The number of generators must match the total number of ensemble members for all input images.') if not all((g.device.type == generator[0].device.type for g in generator)): raise ValueError('`generator` device placement is not consistent in the list.') elif not isinstance(generator, torch.Generator): raise ValueError(f'Unsupported generator type: {type(generator)}.') return num_images def progress_bar(self, iterable=None, total=None, desc=None, leave=True): if not hasattr(self, '_progress_bar_config'): self._progress_bar_config = {} elif not isinstance(self._progress_bar_config, dict): raise ValueError(f'`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}.') progress_bar_config = dict(**self._progress_bar_config) progress_bar_config['desc'] = progress_bar_config.get('desc', desc) progress_bar_config['leave'] = progress_bar_config.get('leave', leave) if iterable is not None: return tqdm(iterable, **progress_bar_config) elif total is not None: return tqdm(total=total, **progress_bar_config) else: raise ValueError('Either `total` or `iterable` has to be defined.') @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, image: PipelineImageInput, num_inference_steps: Optional[int]=None, ensemble_size: int=1, processing_resolution: Optional[int]=None, match_input_resolution: bool=True, resample_method_input: str='bilinear', resample_method_output: str='bilinear', batch_size: int=1, ensembling_kwargs: Optional[Dict[str, Any]]=None, latents: Optional[Union[torch.Tensor, List[torch.Tensor]]]=None, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, output_type: str='np', output_uncertainty: bool=False, output_latent: bool=False, return_dict: bool=True): device = self._execution_device dtype = self.dtype if num_inference_steps is None: num_inference_steps = self.default_denoising_steps if processing_resolution is None: processing_resolution = self.default_processing_resolution num_images = self.check_inputs(image, num_inference_steps, ensemble_size, processing_resolution, resample_method_input, resample_method_output, batch_size, ensembling_kwargs, latents, generator, output_type, output_uncertainty) if self.empty_text_embedding is None: prompt = '' text_inputs = self.tokenizer(prompt, padding='do_not_pad', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids.to(device) self.empty_text_embedding = self.text_encoder(text_input_ids)[0] (image, padding, original_resolution) = self.image_processor.preprocess(image, processing_resolution, resample_method_input, device, dtype) (image_latent, pred_latent) = self.prepare_latents(image, latents, generator, ensemble_size, batch_size) del image batch_empty_text_embedding = self.empty_text_embedding.to(device=device, dtype=dtype).repeat(batch_size, 1, 1) pred_latents = [] for i in self.progress_bar(range(0, num_images * ensemble_size, batch_size), leave=True, desc='Marigold predictions...'): batch_image_latent = image_latent[i:i + batch_size] batch_pred_latent = pred_latent[i:i + batch_size] effective_batch_size = batch_image_latent.shape[0] text = batch_empty_text_embedding[:effective_batch_size] self.scheduler.set_timesteps(num_inference_steps, device=device) for t in self.progress_bar(self.scheduler.timesteps, leave=False, desc='Diffusion steps...'): batch_latent = torch.cat([batch_image_latent, batch_pred_latent], dim=1) noise = self.unet(batch_latent, t, encoder_hidden_states=text, return_dict=False)[0] batch_pred_latent = self.scheduler.step(noise, t, batch_pred_latent, generator=generator).prev_sample pred_latents.append(batch_pred_latent) pred_latent = torch.cat(pred_latents, dim=0) del (pred_latents, image_latent, batch_empty_text_embedding, batch_image_latent, batch_pred_latent, text, batch_latent, noise) prediction = torch.cat([self.decode_prediction(pred_latent[i:i + batch_size]) for i in range(0, pred_latent.shape[0], batch_size)], dim=0) if not output_latent: pred_latent = None prediction = self.image_processor.unpad_image(prediction, padding) uncertainty = None if ensemble_size > 1: prediction = prediction.reshape(num_images, ensemble_size, *prediction.shape[1:]) prediction = [self.ensemble_depth(prediction[i], self.scale_invariant, self.shift_invariant, output_uncertainty, **ensembling_kwargs or {}) for i in range(num_images)] (prediction, uncertainty) = zip(*prediction) prediction = torch.cat(prediction, dim=0) if output_uncertainty: uncertainty = torch.cat(uncertainty, dim=0) else: uncertainty = None if match_input_resolution: prediction = self.image_processor.resize_antialias(prediction, original_resolution, resample_method_output, is_aa=False) if uncertainty is not None and output_uncertainty: uncertainty = self.image_processor.resize_antialias(uncertainty, original_resolution, resample_method_output, is_aa=False) if output_type == 'np': prediction = self.image_processor.pt_to_numpy(prediction) if uncertainty is not None and output_uncertainty: uncertainty = self.image_processor.pt_to_numpy(uncertainty) self.maybe_free_model_hooks() if not return_dict: return (prediction, uncertainty, pred_latent) return MarigoldDepthOutput(prediction=prediction, uncertainty=uncertainty, latent=pred_latent) def prepare_latents(self, image: torch.Tensor, latents: Optional[torch.Tensor], generator: Optional[torch.Generator], ensemble_size: int, batch_size: int) -> Tuple[torch.Tensor, torch.Tensor]: def retrieve_latents(encoder_output): if hasattr(encoder_output, 'latent_dist'): return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') image_latent = torch.cat([retrieve_latents(self.vae.encode(image[i:i + batch_size])) for i in range(0, image.shape[0], batch_size)], dim=0) image_latent = image_latent * self.vae.config.scaling_factor image_latent = image_latent.repeat_interleave(ensemble_size, dim=0) pred_latent = latents if pred_latent is None: pred_latent = randn_tensor(image_latent.shape, generator=generator, device=image_latent.device, dtype=image_latent.dtype) return (image_latent, pred_latent) def decode_prediction(self, pred_latent: torch.Tensor) -> torch.Tensor: if pred_latent.dim() != 4 or pred_latent.shape[1] != self.vae.config.latent_channels: raise ValueError(f'Expecting 4D tensor of shape [B,{self.vae.config.latent_channels},H,W]; got {pred_latent.shape}.') prediction = self.vae.decode(pred_latent / self.vae.config.scaling_factor, return_dict=False)[0] prediction = prediction.mean(dim=1, keepdim=True) prediction = torch.clip(prediction, -1.0, 1.0) prediction = (prediction + 1.0) / 2.0 return prediction @staticmethod def ensemble_depth(depth: torch.Tensor, scale_invariant: bool=True, shift_invariant: bool=True, output_uncertainty: bool=False, reduction: str='median', regularizer_strength: float=0.02, max_iter: int=2, tol: float=0.001, max_res: int=1024) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if depth.dim() != 4 or depth.shape[1] != 1: raise ValueError(f'Expecting 4D tensor of shape [B,1,H,W]; got {depth.shape}.') if reduction not in ('mean', 'median'): raise ValueError(f'Unrecognized reduction method: {reduction}.') if not scale_invariant and shift_invariant: raise ValueError('Pure shift-invariant ensembling is not supported.') def init_param(depth: torch.Tensor): init_min = depth.reshape(ensemble_size, -1).min(dim=1).values init_max = depth.reshape(ensemble_size, -1).max(dim=1).values if scale_invariant and shift_invariant: init_s = 1.0 / (init_max - init_min).clamp(min=1e-06) init_t = -init_s * init_min param = torch.cat((init_s, init_t)).cpu().numpy() elif scale_invariant: init_s = 1.0 / init_max.clamp(min=1e-06) param = init_s.cpu().numpy() else: raise ValueError('Unrecognized alignment.') return param def align(depth: torch.Tensor, param: np.ndarray) -> torch.Tensor: if scale_invariant and shift_invariant: (s, t) = np.split(param, 2) s = torch.from_numpy(s).to(depth).view(ensemble_size, 1, 1, 1) t = torch.from_numpy(t).to(depth).view(ensemble_size, 1, 1, 1) out = depth * s + t elif scale_invariant: s = torch.from_numpy(param).to(depth).view(ensemble_size, 1, 1, 1) out = depth * s else: raise ValueError('Unrecognized alignment.') return out def ensemble(depth_aligned: torch.Tensor, return_uncertainty: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: uncertainty = None if reduction == 'mean': prediction = torch.mean(depth_aligned, dim=0, keepdim=True) if return_uncertainty: uncertainty = torch.std(depth_aligned, dim=0, keepdim=True) elif reduction == 'median': prediction = torch.median(depth_aligned, dim=0, keepdim=True).values if return_uncertainty: uncertainty = torch.median(torch.abs(depth_aligned - prediction), dim=0, keepdim=True).values else: raise ValueError(f'Unrecognized reduction method: {reduction}.') return (prediction, uncertainty) def cost_fn(param: np.ndarray, depth: torch.Tensor) -> float: cost = 0.0 depth_aligned = align(depth, param) for (i, j) in torch.combinations(torch.arange(ensemble_size)): diff = depth_aligned[i] - depth_aligned[j] cost += (diff ** 2).mean().sqrt().item() if regularizer_strength > 0: (prediction, _) = ensemble(depth_aligned, return_uncertainty=False) err_near = (0.0 - prediction.min()).abs().item() err_far = (1.0 - prediction.max()).abs().item() cost += (err_near + err_far) * regularizer_strength return cost def compute_param(depth: torch.Tensor): import scipy depth_to_align = depth.to(torch.float32) if max_res is not None and max(depth_to_align.shape[2:]) > max_res: depth_to_align = MarigoldImageProcessor.resize_to_max_edge(depth_to_align, max_res, 'nearest-exact') param = init_param(depth_to_align) res = scipy.optimize.minimize(partial(cost_fn, depth=depth_to_align), param, method='BFGS', tol=tol, options={'maxiter': max_iter, 'disp': False}) return res.x requires_aligning = scale_invariant or shift_invariant ensemble_size = depth.shape[0] if requires_aligning: param = compute_param(depth) depth = align(depth, param) (depth, uncertainty) = ensemble(depth, return_uncertainty=output_uncertainty) depth_max = depth.max() if scale_invariant and shift_invariant: depth_min = depth.min() elif scale_invariant: depth_min = 0 else: raise ValueError('Unrecognized alignment.') depth_range = (depth_max - depth_min).clamp(min=1e-06) depth = (depth - depth_min) / depth_range if output_uncertainty: uncertainty /= depth_range return (depth, uncertainty) # File: diffusers-main/src/diffusers/pipelines/marigold/pipeline_marigold_normals.py from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch from PIL import Image from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from ...image_processor import PipelineImageInput from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import DDIMScheduler, LCMScheduler from ...utils import BaseOutput, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .marigold_image_processing import MarigoldImageProcessor logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\nExamples:\n```py\n>>> import diffusers\n>>> import torch\n\n>>> pipe = diffusers.MarigoldNormalsPipeline.from_pretrained(\n... "prs-eth/marigold-normals-lcm-v0-1", variant="fp16", torch_dtype=torch.float16\n... ).to("cuda")\n\n>>> image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")\n>>> normals = pipe(image)\n\n>>> vis = pipe.image_processor.visualize_normals(normals.prediction)\n>>> vis[0].save("einstein_normals.png")\n```\n' @dataclass class MarigoldNormalsOutput(BaseOutput): prediction: Union[np.ndarray, torch.Tensor] uncertainty: Union[None, np.ndarray, torch.Tensor] latent: Union[None, torch.Tensor] class MarigoldNormalsPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->unet->vae' supported_prediction_types = ('normals',) def __init__(self, unet: UNet2DConditionModel, vae: AutoencoderKL, scheduler: Union[DDIMScheduler, LCMScheduler], text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, prediction_type: Optional[str]=None, use_full_z_range: Optional[bool]=True, default_denoising_steps: Optional[int]=None, default_processing_resolution: Optional[int]=None): super().__init__() if prediction_type not in self.supported_prediction_types: logger.warning(f"Potentially unsupported `prediction_type='{prediction_type}'`; values supported by the pipeline: {self.supported_prediction_types}.") self.register_modules(unet=unet, vae=vae, scheduler=scheduler, text_encoder=text_encoder, tokenizer=tokenizer) self.register_to_config(use_full_z_range=use_full_z_range, default_denoising_steps=default_denoising_steps, default_processing_resolution=default_processing_resolution) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.use_full_z_range = use_full_z_range self.default_denoising_steps = default_denoising_steps self.default_processing_resolution = default_processing_resolution self.empty_text_embedding = None self.image_processor = MarigoldImageProcessor(vae_scale_factor=self.vae_scale_factor) def check_inputs(self, image: PipelineImageInput, num_inference_steps: int, ensemble_size: int, processing_resolution: int, resample_method_input: str, resample_method_output: str, batch_size: int, ensembling_kwargs: Optional[Dict[str, Any]], latents: Optional[torch.Tensor], generator: Optional[Union[torch.Generator, List[torch.Generator]]], output_type: str, output_uncertainty: bool) -> int: if num_inference_steps is None: raise ValueError('`num_inference_steps` is not specified and could not be resolved from the model config.') if num_inference_steps < 1: raise ValueError('`num_inference_steps` must be positive.') if ensemble_size < 1: raise ValueError('`ensemble_size` must be positive.') if ensemble_size == 2: logger.warning('`ensemble_size` == 2 results are similar to no ensembling (1); consider increasing the value to at least 3.') if ensemble_size == 1 and output_uncertainty: raise ValueError('Computing uncertainty by setting `output_uncertainty=True` also requires setting `ensemble_size` greater than 1.') if processing_resolution is None: raise ValueError('`processing_resolution` is not specified and could not be resolved from the model config.') if processing_resolution < 0: raise ValueError('`processing_resolution` must be non-negative: 0 for native resolution, or any positive value for downsampled processing.') if processing_resolution % self.vae_scale_factor != 0: raise ValueError(f'`processing_resolution` must be a multiple of {self.vae_scale_factor}.') if resample_method_input not in ('nearest', 'nearest-exact', 'bilinear', 'bicubic', 'area'): raise ValueError('`resample_method_input` takes string values compatible with PIL library: nearest, nearest-exact, bilinear, bicubic, area.') if resample_method_output not in ('nearest', 'nearest-exact', 'bilinear', 'bicubic', 'area'): raise ValueError('`resample_method_output` takes string values compatible with PIL library: nearest, nearest-exact, bilinear, bicubic, area.') if batch_size < 1: raise ValueError('`batch_size` must be positive.') if output_type not in ['pt', 'np']: raise ValueError('`output_type` must be one of `pt` or `np`.') if latents is not None and generator is not None: raise ValueError('`latents` and `generator` cannot be used together.') if ensembling_kwargs is not None: if not isinstance(ensembling_kwargs, dict): raise ValueError('`ensembling_kwargs` must be a dictionary.') if 'reduction' in ensembling_kwargs and ensembling_kwargs['reduction'] not in ('closest', 'mean'): raise ValueError("`ensembling_kwargs['reduction']` can be either `'closest'` or `'mean'`.") num_images = 0 (W, H) = (None, None) if not isinstance(image, list): image = [image] for (i, img) in enumerate(image): if isinstance(img, np.ndarray) or torch.is_tensor(img): if img.ndim not in (2, 3, 4): raise ValueError(f'`image[{i}]` has unsupported dimensions or shape: {img.shape}.') (H_i, W_i) = img.shape[-2:] N_i = 1 if img.ndim == 4: N_i = img.shape[0] elif isinstance(img, Image.Image): (W_i, H_i) = img.size N_i = 1 else: raise ValueError(f'Unsupported `image[{i}]` type: {type(img)}.') if W is None: (W, H) = (W_i, H_i) elif (W, H) != (W_i, H_i): raise ValueError(f'Input `image[{i}]` has incompatible dimensions {(W_i, H_i)} with the previous images {(W, H)}') num_images += N_i if latents is not None: if not torch.is_tensor(latents): raise ValueError('`latents` must be a torch.Tensor.') if latents.dim() != 4: raise ValueError(f'`latents` has unsupported dimensions or shape: {latents.shape}.') if processing_resolution > 0: max_orig = max(H, W) new_H = H * processing_resolution // max_orig new_W = W * processing_resolution // max_orig if new_H == 0 or new_W == 0: raise ValueError(f'Extreme aspect ratio of the input image: [{W} x {H}]') (W, H) = (new_W, new_H) w = (W + self.vae_scale_factor - 1) // self.vae_scale_factor h = (H + self.vae_scale_factor - 1) // self.vae_scale_factor shape_expected = (num_images * ensemble_size, self.vae.config.latent_channels, h, w) if latents.shape != shape_expected: raise ValueError(f'`latents` has unexpected shape={latents.shape} expected={shape_expected}.') if generator is not None: if isinstance(generator, list): if len(generator) != num_images * ensemble_size: raise ValueError('The number of generators must match the total number of ensemble members for all input images.') if not all((g.device.type == generator[0].device.type for g in generator)): raise ValueError('`generator` device placement is not consistent in the list.') elif not isinstance(generator, torch.Generator): raise ValueError(f'Unsupported generator type: {type(generator)}.') return num_images def progress_bar(self, iterable=None, total=None, desc=None, leave=True): if not hasattr(self, '_progress_bar_config'): self._progress_bar_config = {} elif not isinstance(self._progress_bar_config, dict): raise ValueError(f'`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}.') progress_bar_config = dict(**self._progress_bar_config) progress_bar_config['desc'] = progress_bar_config.get('desc', desc) progress_bar_config['leave'] = progress_bar_config.get('leave', leave) if iterable is not None: return tqdm(iterable, **progress_bar_config) elif total is not None: return tqdm(total=total, **progress_bar_config) else: raise ValueError('Either `total` or `iterable` has to be defined.') @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, image: PipelineImageInput, num_inference_steps: Optional[int]=None, ensemble_size: int=1, processing_resolution: Optional[int]=None, match_input_resolution: bool=True, resample_method_input: str='bilinear', resample_method_output: str='bilinear', batch_size: int=1, ensembling_kwargs: Optional[Dict[str, Any]]=None, latents: Optional[Union[torch.Tensor, List[torch.Tensor]]]=None, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, output_type: str='np', output_uncertainty: bool=False, output_latent: bool=False, return_dict: bool=True): device = self._execution_device dtype = self.dtype if num_inference_steps is None: num_inference_steps = self.default_denoising_steps if processing_resolution is None: processing_resolution = self.default_processing_resolution num_images = self.check_inputs(image, num_inference_steps, ensemble_size, processing_resolution, resample_method_input, resample_method_output, batch_size, ensembling_kwargs, latents, generator, output_type, output_uncertainty) if self.empty_text_embedding is None: prompt = '' text_inputs = self.tokenizer(prompt, padding='do_not_pad', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids.to(device) self.empty_text_embedding = self.text_encoder(text_input_ids)[0] (image, padding, original_resolution) = self.image_processor.preprocess(image, processing_resolution, resample_method_input, device, dtype) (image_latent, pred_latent) = self.prepare_latents(image, latents, generator, ensemble_size, batch_size) del image batch_empty_text_embedding = self.empty_text_embedding.to(device=device, dtype=dtype).repeat(batch_size, 1, 1) pred_latents = [] for i in self.progress_bar(range(0, num_images * ensemble_size, batch_size), leave=True, desc='Marigold predictions...'): batch_image_latent = image_latent[i:i + batch_size] batch_pred_latent = pred_latent[i:i + batch_size] effective_batch_size = batch_image_latent.shape[0] text = batch_empty_text_embedding[:effective_batch_size] self.scheduler.set_timesteps(num_inference_steps, device=device) for t in self.progress_bar(self.scheduler.timesteps, leave=False, desc='Diffusion steps...'): batch_latent = torch.cat([batch_image_latent, batch_pred_latent], dim=1) noise = self.unet(batch_latent, t, encoder_hidden_states=text, return_dict=False)[0] batch_pred_latent = self.scheduler.step(noise, t, batch_pred_latent, generator=generator).prev_sample pred_latents.append(batch_pred_latent) pred_latent = torch.cat(pred_latents, dim=0) del (pred_latents, image_latent, batch_empty_text_embedding, batch_image_latent, batch_pred_latent, text, batch_latent, noise) prediction = torch.cat([self.decode_prediction(pred_latent[i:i + batch_size]) for i in range(0, pred_latent.shape[0], batch_size)], dim=0) if not output_latent: pred_latent = None prediction = self.image_processor.unpad_image(prediction, padding) uncertainty = None if ensemble_size > 1: prediction = prediction.reshape(num_images, ensemble_size, *prediction.shape[1:]) prediction = [self.ensemble_normals(prediction[i], output_uncertainty, **ensembling_kwargs or {}) for i in range(num_images)] (prediction, uncertainty) = zip(*prediction) prediction = torch.cat(prediction, dim=0) if output_uncertainty: uncertainty = torch.cat(uncertainty, dim=0) else: uncertainty = None if match_input_resolution: prediction = self.image_processor.resize_antialias(prediction, original_resolution, resample_method_output, is_aa=False) prediction = self.normalize_normals(prediction) if uncertainty is not None and output_uncertainty: uncertainty = self.image_processor.resize_antialias(uncertainty, original_resolution, resample_method_output, is_aa=False) if output_type == 'np': prediction = self.image_processor.pt_to_numpy(prediction) if uncertainty is not None and output_uncertainty: uncertainty = self.image_processor.pt_to_numpy(uncertainty) self.maybe_free_model_hooks() if not return_dict: return (prediction, uncertainty, pred_latent) return MarigoldNormalsOutput(prediction=prediction, uncertainty=uncertainty, latent=pred_latent) def prepare_latents(self, image: torch.Tensor, latents: Optional[torch.Tensor], generator: Optional[torch.Generator], ensemble_size: int, batch_size: int) -> Tuple[torch.Tensor, torch.Tensor]: def retrieve_latents(encoder_output): if hasattr(encoder_output, 'latent_dist'): return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') image_latent = torch.cat([retrieve_latents(self.vae.encode(image[i:i + batch_size])) for i in range(0, image.shape[0], batch_size)], dim=0) image_latent = image_latent * self.vae.config.scaling_factor image_latent = image_latent.repeat_interleave(ensemble_size, dim=0) pred_latent = latents if pred_latent is None: pred_latent = randn_tensor(image_latent.shape, generator=generator, device=image_latent.device, dtype=image_latent.dtype) return (image_latent, pred_latent) def decode_prediction(self, pred_latent: torch.Tensor) -> torch.Tensor: if pred_latent.dim() != 4 or pred_latent.shape[1] != self.vae.config.latent_channels: raise ValueError(f'Expecting 4D tensor of shape [B,{self.vae.config.latent_channels},H,W]; got {pred_latent.shape}.') prediction = self.vae.decode(pred_latent / self.vae.config.scaling_factor, return_dict=False)[0] prediction = torch.clip(prediction, -1.0, 1.0) if not self.use_full_z_range: prediction[:, 2, :, :] *= 0.5 prediction[:, 2, :, :] += 0.5 prediction = self.normalize_normals(prediction) return prediction @staticmethod def normalize_normals(normals: torch.Tensor, eps: float=1e-06) -> torch.Tensor: if normals.dim() != 4 or normals.shape[1] != 3: raise ValueError(f'Expecting 4D tensor of shape [B,3,H,W]; got {normals.shape}.') norm = torch.norm(normals, dim=1, keepdim=True) normals /= norm.clamp(min=eps) return normals @staticmethod def ensemble_normals(normals: torch.Tensor, output_uncertainty: bool, reduction: str='closest') -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if normals.dim() != 4 or normals.shape[1] != 3: raise ValueError(f'Expecting 4D tensor of shape [B,3,H,W]; got {normals.shape}.') if reduction not in ('closest', 'mean'): raise ValueError(f'Unrecognized reduction method: {reduction}.') mean_normals = normals.mean(dim=0, keepdim=True) mean_normals = MarigoldNormalsPipeline.normalize_normals(mean_normals) sim_cos = (mean_normals * normals).sum(dim=1, keepdim=True) sim_cos = sim_cos.clamp(-1, 1) uncertainty = None if output_uncertainty: uncertainty = sim_cos.arccos() uncertainty = uncertainty.mean(dim=0, keepdim=True) / np.pi if reduction == 'mean': return (mean_normals, uncertainty) closest_indices = sim_cos.argmax(dim=0, keepdim=True) closest_indices = closest_indices.repeat(1, 3, 1, 1) closest_normals = torch.gather(normals, 0, closest_indices) return (closest_normals, uncertainty) # File: diffusers-main/src/diffusers/pipelines/musicldm/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, is_transformers_version _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.27.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_musicldm'] = ['MusicLDMPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.27.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_musicldm import MusicLDMPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/musicldm/pipeline_musicldm.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import ClapFeatureExtractor, ClapModel, ClapTextModelWithProjection, RobertaTokenizer, RobertaTokenizerFast, SpeechT5HifiGan from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import is_accelerate_available, is_accelerate_version, is_librosa_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline, StableDiffusionMixin if is_librosa_available(): import librosa logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import MusicLDMPipeline\n >>> import torch\n >>> import scipy\n\n >>> repo_id = "ucsd-reach/musicldm"\n >>> pipe = MusicLDMPipeline.from_pretrained(repo_id, torch_dtype=torch.float16)\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "Techno music with a strong, upbeat tempo and high melodic riffs"\n >>> audio = pipe(prompt, num_inference_steps=10, audio_length_in_s=5.0).audios[0]\n\n >>> # save the audio sample as a .wav file\n >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio)\n ```\n' class MusicLDMPipeline(DiffusionPipeline, StableDiffusionMixin): def __init__(self, vae: AutoencoderKL, text_encoder: Union[ClapTextModelWithProjection, ClapModel], tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast], feature_extractor: Optional[ClapFeatureExtractor], unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, vocoder: SpeechT5HifiGan): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, feature_extractor=feature_extractor, unet=unet, scheduler=scheduler, vocoder=vocoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def _encode_prompt(self, prompt, device, num_waveforms_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLAP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = self.text_encoder.get_text_features(text_input_ids.to(device), attention_mask=attention_mask.to(device)) prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.text_model.dtype, device=device) (bs_embed, seq_len) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt) prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') uncond_input_ids = uncond_input.input_ids.to(device) attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder.get_text_features(uncond_input_ids, attention_mask=attention_mask) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.text_model.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def mel_spectrogram_to_waveform(self, mel_spectrogram): if mel_spectrogram.dim() == 4: mel_spectrogram = mel_spectrogram.squeeze(1) waveform = self.vocoder(mel_spectrogram) waveform = waveform.cpu().float() return waveform def score_waveforms(self, text, audio, num_waveforms_per_prompt, device, dtype): if not is_librosa_available(): logger.info('Automatic scoring of the generated audio waveforms against the input prompt text requires the `librosa` package to resample the generated waveforms. Returning the audios in the order they were generated. To enable automatic scoring, install `librosa` with: `pip install librosa`.') return audio inputs = self.tokenizer(text, return_tensors='pt', padding=True) resampled_audio = librosa.resample(audio.numpy(), orig_sr=self.vocoder.config.sampling_rate, target_sr=self.feature_extractor.sampling_rate) inputs['input_features'] = self.feature_extractor(list(resampled_audio), return_tensors='pt', sampling_rate=self.feature_extractor.sampling_rate).input_features.type(dtype) inputs = inputs.to(device) logits_per_text = self.text_encoder(**inputs).logits_per_text indices = torch.argsort(logits_per_text, dim=1, descending=True)[:, :num_waveforms_per_prompt] audio = torch.index_select(audio, 0, indices.reshape(-1).cpu()) return audio def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, audio_length_in_s, vocoder_upsample_factor, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor if audio_length_in_s < min_audio_length_in_s: raise ValueError(f'`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but is {audio_length_in_s}.') if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0: raise ValueError(f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of {self.vae_scale_factor}.") if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(self.vocoder.config.model_in_dim) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def enable_model_cpu_offload(self, gpu_id=0): if is_accelerate_available() and is_accelerate_version('>=', '0.17.0.dev0'): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.') device = torch.device(f'cuda:{gpu_id}') if self.device.type != 'cpu': self.to('cpu', silence_dtype_warnings=True) torch.cuda.empty_cache() model_sequence = [self.text_encoder.text_model, self.text_encoder.text_projection, self.unet, self.vae, self.vocoder, self.text_encoder] hook = None for cpu_offloaded_model in model_sequence: (_, hook) = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) self.final_offload_hook = hook @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, audio_length_in_s: Optional[float]=None, num_inference_steps: int=200, guidance_scale: float=2.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_waveforms_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: Optional[int]=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, output_type: Optional[str]='np'): vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate if audio_length_in_s is None: audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor height = int(audio_length_in_s / vocoder_upsample_factor) original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate) if height % self.vae_scale_factor != 0: height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor logger.info(f'Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} so that it can be handled by the model. It will be cut to {audio_length_in_s} after the denoising process.') self.check_inputs(prompt, audio_length_in_s, vocoder_upsample_factor, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, device, num_waveforms_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_waveforms_per_prompt, num_channels_latents, height, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=None, class_labels=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) self.maybe_free_model_hooks() if not output_type == 'latent': latents = 1 / self.vae.config.scaling_factor * latents mel_spectrogram = self.vae.decode(latents).sample else: return AudioPipelineOutput(audios=latents) audio = self.mel_spectrogram_to_waveform(mel_spectrogram) audio = audio[:, :original_waveform_length] if num_waveforms_per_prompt > 1 and prompt is not None: audio = self.score_waveforms(text=prompt, audio=audio, num_waveforms_per_prompt=num_waveforms_per_prompt, device=device, dtype=prompt_embeds.dtype) if output_type == 'np': audio = audio.numpy() if not return_dict: return (audio,) return AudioPipelineOutput(audios=audio) # File: diffusers-main/src/diffusers/pipelines/onnx_utils.py import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from huggingface_hub.utils import validate_hf_hub_args from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort logger = logging.get_logger(__name__) ORT_TO_NP_TYPE = {'tensor(bool)': np.bool_, 'tensor(int8)': np.int8, 'tensor(uint8)': np.uint8, 'tensor(int16)': np.int16, 'tensor(uint16)': np.uint16, 'tensor(int32)': np.int32, 'tensor(uint32)': np.uint32, 'tensor(int64)': np.int64, 'tensor(uint64)': np.uint64, 'tensor(float16)': np.float16, 'tensor(float)': np.float32, 'tensor(double)': np.float64} class OnnxRuntimeModel: def __init__(self, model=None, **kwargs): logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.') self.model = model self.model_save_dir = kwargs.get('model_save_dir', None) self.latest_model_name = kwargs.get('latest_model_name', ONNX_WEIGHTS_NAME) def __call__(self, **kwargs): inputs = {k: np.array(v) for (k, v) in kwargs.items()} return self.model.run(None, inputs) @staticmethod def load_model(path: Union[str, Path], provider=None, sess_options=None): if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider') provider = 'CPUExecutionProvider' return ort.InferenceSession(path, providers=[provider], sess_options=sess_options) def _save_pretrained(self, save_directory: Union[str, Path], file_name: Optional[str]=None, **kwargs): model_file_name = file_name if file_name is not None else ONNX_WEIGHTS_NAME src_path = self.model_save_dir.joinpath(self.latest_model_name) dst_path = Path(save_directory).joinpath(model_file_name) try: shutil.copyfile(src_path, dst_path) except shutil.SameFileError: pass src_path = self.model_save_dir.joinpath(ONNX_EXTERNAL_WEIGHTS_NAME) if src_path.exists(): dst_path = Path(save_directory).joinpath(ONNX_EXTERNAL_WEIGHTS_NAME) try: shutil.copyfile(src_path, dst_path) except shutil.SameFileError: pass def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs): if os.path.isfile(save_directory): logger.error(f'Provided path ({save_directory}) should be a directory, not a file') return os.makedirs(save_directory, exist_ok=True) self._save_pretrained(save_directory, **kwargs) @classmethod @validate_hf_hub_args def _from_pretrained(cls, model_id: Union[str, Path], token: Optional[Union[bool, str, None]]=None, revision: Optional[Union[str, None]]=None, force_download: bool=False, cache_dir: Optional[str]=None, file_name: Optional[str]=None, provider: Optional[str]=None, sess_options: Optional['ort.SessionOptions']=None, **kwargs): model_file_name = file_name if file_name is not None else ONNX_WEIGHTS_NAME if os.path.isdir(model_id): model = OnnxRuntimeModel.load_model(Path(model_id, model_file_name).as_posix(), provider=provider, sess_options=sess_options) kwargs['model_save_dir'] = Path(model_id) else: model_cache_path = hf_hub_download(repo_id=model_id, filename=model_file_name, token=token, revision=revision, cache_dir=cache_dir, force_download=force_download) kwargs['model_save_dir'] = Path(model_cache_path).parent kwargs['latest_model_name'] = Path(model_cache_path).name model = OnnxRuntimeModel.load_model(model_cache_path, provider=provider, sess_options=sess_options) return cls(model=model, **kwargs) @classmethod @validate_hf_hub_args def from_pretrained(cls, model_id: Union[str, Path], force_download: bool=True, token: Optional[str]=None, cache_dir: Optional[str]=None, **model_kwargs): revision = None if len(str(model_id).split('@')) == 2: (model_id, revision) = model_id.split('@') return cls._from_pretrained(model_id=model_id, revision=revision, cache_dir=cache_dir, force_download=force_download, token=token, **model_kwargs) # File: diffusers-main/src/diffusers/pipelines/pag/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_pag_controlnet_sd'] = ['StableDiffusionControlNetPAGPipeline'] _import_structure['pipeline_pag_controlnet_sd_xl'] = ['StableDiffusionXLControlNetPAGPipeline'] _import_structure['pipeline_pag_controlnet_sd_xl_img2img'] = ['StableDiffusionXLControlNetPAGImg2ImgPipeline'] _import_structure['pipeline_pag_hunyuandit'] = ['HunyuanDiTPAGPipeline'] _import_structure['pipeline_pag_kolors'] = ['KolorsPAGPipeline'] _import_structure['pipeline_pag_pixart_sigma'] = ['PixArtSigmaPAGPipeline'] _import_structure['pipeline_pag_sd'] = ['StableDiffusionPAGPipeline'] _import_structure['pipeline_pag_sd_3'] = ['StableDiffusion3PAGPipeline'] _import_structure['pipeline_pag_sd_animatediff'] = ['AnimateDiffPAGPipeline'] _import_structure['pipeline_pag_sd_xl'] = ['StableDiffusionXLPAGPipeline'] _import_structure['pipeline_pag_sd_xl_img2img'] = ['StableDiffusionXLPAGImg2ImgPipeline'] _import_structure['pipeline_pag_sd_xl_inpaint'] = ['StableDiffusionXLPAGInpaintPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_pag_controlnet_sd import StableDiffusionControlNetPAGPipeline from .pipeline_pag_controlnet_sd_xl import StableDiffusionXLControlNetPAGPipeline from .pipeline_pag_controlnet_sd_xl_img2img import StableDiffusionXLControlNetPAGImg2ImgPipeline from .pipeline_pag_hunyuandit import HunyuanDiTPAGPipeline from .pipeline_pag_kolors import KolorsPAGPipeline from .pipeline_pag_pixart_sigma import PixArtSigmaPAGPipeline from .pipeline_pag_sd import StableDiffusionPAGPipeline from .pipeline_pag_sd_3 import StableDiffusion3PAGPipeline from .pipeline_pag_sd_animatediff import AnimateDiffPAGPipeline from .pipeline_pag_sd_xl import StableDiffusionXLPAGPipeline from .pipeline_pag_sd_xl_img2img import StableDiffusionXLPAGImg2ImgPipeline from .pipeline_pag_sd_xl_inpaint import StableDiffusionXLPAGInpaintPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/pag/pag_utils.py import re from typing import Dict, List, Tuple, Union import torch import torch.nn as nn from ...models.attention_processor import Attention, AttentionProcessor, PAGCFGIdentitySelfAttnProcessor2_0, PAGIdentitySelfAttnProcessor2_0 from ...utils import logging logger = logging.get_logger(__name__) class PAGMixin: def _set_pag_attn_processor(self, pag_applied_layers, do_classifier_free_guidance): pag_attn_processors = self._pag_attn_processors if pag_attn_processors is None: raise ValueError('No PAG attention processors have been set. Set the attention processors by calling `set_pag_applied_layers` and passing the relevant parameters.') pag_attn_proc = pag_attn_processors[0] if do_classifier_free_guidance else pag_attn_processors[1] if hasattr(self, 'unet'): model: nn.Module = self.unet else: model: nn.Module = self.transformer def is_self_attn(module: nn.Module) -> bool: return isinstance(module, Attention) and (not module.is_cross_attention) def is_fake_integral_match(layer_id, name): layer_id = layer_id.split('.')[-1] name = name.split('.')[-1] return layer_id.isnumeric() and name.isnumeric() and (layer_id == name) for layer_id in pag_applied_layers: target_modules = [] for (name, module) in model.named_modules(): if is_self_attn(module) and re.search(layer_id, name) is not None and (not is_fake_integral_match(layer_id, name)): logger.debug(f'Applying PAG to layer: {name}') target_modules.append(module) if len(target_modules) == 0: raise ValueError(f'Cannot find PAG layer to set attention processor for: {layer_id}') for module in target_modules: module.processor = pag_attn_proc def _get_pag_scale(self, t): if self.do_pag_adaptive_scaling: signal_scale = self.pag_scale - self.pag_adaptive_scale * (1000 - t) if signal_scale < 0: signal_scale = 0 return signal_scale else: return self.pag_scale def _apply_perturbed_attention_guidance(self, noise_pred, do_classifier_free_guidance, guidance_scale, t): pag_scale = self._get_pag_scale(t) if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text, noise_pred_perturb) = noise_pred.chunk(3) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + pag_scale * (noise_pred_text - noise_pred_perturb) else: (noise_pred_text, noise_pred_perturb) = noise_pred.chunk(2) noise_pred = noise_pred_text + pag_scale * (noise_pred_text - noise_pred_perturb) return noise_pred def _prepare_perturbed_attention_guidance(self, cond, uncond, do_classifier_free_guidance): cond = torch.cat([cond] * 2, dim=0) if do_classifier_free_guidance: cond = torch.cat([uncond, cond], dim=0) return cond def set_pag_applied_layers(self, pag_applied_layers: Union[str, List[str]], pag_attn_processors: Tuple[AttentionProcessor, AttentionProcessor]=(PAGCFGIdentitySelfAttnProcessor2_0(), PAGIdentitySelfAttnProcessor2_0())): if not hasattr(self, '_pag_attn_processors'): self._pag_attn_processors = None if not isinstance(pag_applied_layers, list): pag_applied_layers = [pag_applied_layers] if pag_attn_processors is not None: if not isinstance(pag_attn_processors, tuple) or len(pag_attn_processors) != 2: raise ValueError('Expected a tuple of two attention processors') for i in range(len(pag_applied_layers)): if not isinstance(pag_applied_layers[i], str): raise ValueError(f'Expected either a string or a list of string but got type {type(pag_applied_layers[i])}') self.pag_applied_layers = pag_applied_layers self._pag_attn_processors = pag_attn_processors @property def pag_scale(self) -> float: return self._pag_scale @property def pag_adaptive_scale(self) -> float: return self._pag_adaptive_scale @property def do_pag_adaptive_scaling(self) -> bool: return self._pag_adaptive_scale > 0 and self._pag_scale > 0 and (len(self.pag_applied_layers) > 0) @property def do_perturbed_attention_guidance(self) -> bool: return self._pag_scale > 0 and len(self.pag_applied_layers) > 0 @property def pag_attn_processors(self) -> Dict[str, AttentionProcessor]: if self._pag_attn_processors is None: return {} valid_attn_processors = {x.__class__ for x in self._pag_attn_processors} processors = {} if hasattr(self, 'unet'): denoiser_module = self.unet elif hasattr(self, 'transformer'): denoiser_module = self.transformer else: raise ValueError('No denoiser module found.') for (name, proc) in denoiser_module.attn_processors.items(): if proc.__class__ in valid_attn_processors: processors[name] = proc return processors # File: diffusers-main/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor from ..controlnet.multicontrolnet import MultiControlNetModel from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .pag_utils import PAGMixin logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> # !pip install opencv-python transformers accelerate\n >>> from diffusers import AutoPipelineForText2Image, ControlNetModel, UniPCMultistepScheduler\n >>> from diffusers.utils import load_image\n >>> import numpy as np\n >>> import torch\n\n >>> import cv2\n >>> from PIL import Image\n\n >>> # download an image\n >>> image = load_image(\n ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"\n ... )\n >>> image = np.array(image)\n\n >>> # get canny image\n >>> image = cv2.Canny(image, 100, 200)\n >>> image = image[:, :, None]\n >>> image = np.concatenate([image, image, image], axis=2)\n >>> canny_image = Image.fromarray(image)\n\n >>> # load control net and stable diffusion v1-5\n >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)\n >>> pipe = AutoPipelineForText2Image.from_pretrained(\n ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, enable_pag=True\n ... )\n\n >>> # speed up diffusion process with faster scheduler and memory optimization\n >>> # remove following line if xformers is not installed\n >>> pipe.enable_xformers_memory_efficient_attention()\n\n >>> pipe.enable_model_cpu_offload()\n\n >>> # generate image\n >>> generator = torch.manual_seed(0)\n >>> image = pipe(\n ... "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting",\n ... guidance_scale=7.5,\n ... generator=generator,\n ... image=canny_image,\n ... pag_scale=10,\n ... ).images[0]\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionControlNetPAGPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin, PAGMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection=None, requires_safety_checker: bool=True, pag_applied_layers: Union[str, List[str]]='mid'): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) self.register_to_config(requires_safety_checker=requires_safety_checker) self.set_pag_applied_layers(pag_applied_layers) def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, image, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None): if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.controlnet, torch._dynamo.eval_frame.OptimizedModule) if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): self.check_image(image, prompt, prompt_embeds) elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if not isinstance(image, list): raise TypeError('For multiple controlnets: `image` must be type `list`') elif any((isinstance(i, list) for i in image)): transposed_image = [list(t) for t in zip(*image)] if len(transposed_image) != len(self.controlnet.nets): raise ValueError(f'For multiple controlnets: if you pass`image` as a list of list, each sublist must have the same length as the number of controlnets, but the sublists in `image` got {len(transposed_image)} images and {len(self.controlnet.nets)} ControlNets.') for image_ in transposed_image: self.check_image(image_, prompt, prompt_embeds) elif len(image) != len(self.controlnet.nets): raise ValueError(f'For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets.') else: for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if isinstance(controlnet_conditioning_scale, list): if any((isinstance(i, list) for i in controlnet_conditioning_scale)): raise ValueError('A single batch of varying conditioning scale settings (e.g. [[1.0, 0.5], [0.2, 0.8]]) is not supported at the moment. The conditioning scale must be fixed across the batch.') elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(self.controlnet.nets): raise ValueError('For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have the same length as the number of controlnets') else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError(f'`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list.') if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError(f'`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}.') for (start, end) in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError(f'control guidance start: {start} cannot be larger or equal to control guidance end: {end}.') if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if not image_is_pil and (not image_is_tensor) and (not image_is_np) and (not image_is_pil_list) and (not image_is_tensor_list) and (not image_is_np_list): raise TypeError(f'image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}') if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError(f'If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}') def prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): image = torch.cat([image] * 2) return image def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, guess_mode: bool=False, control_guidance_start: Union[float, List[float]]=0.0, control_guidance_end: Union[float, List[float]]=1.0, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], pag_scale: float=3.0, pag_adaptive_scale: float=0.0): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) self.check_inputs(prompt, image, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._pag_scale = pag_scale self._pag_adaptive_scale = pag_adaptive_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions guess_mode = guess_mode or global_pool_conditions text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_perturbed_attention_guidance: prompt_embeds = self._prepare_perturbed_attention_guidance(prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if isinstance(controlnet, ControlNetModel): image = self.prepare_image(image=image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) (height, width) = image.shape[-2:] elif isinstance(controlnet, MultiControlNetModel): images = [] if isinstance(image[0], list): image = [list(t) for t in zip(*image)] for image_ in image: image_ = self.prepare_image(image=image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode) images.append(image_) image = images (height, width) = image[0].shape[-2:] else: assert False (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) self._num_timesteps = len(timesteps) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) for (i, image_embeds) in enumerate(ip_adapter_image_embeds): negative_image_embeds = None if self.do_classifier_free_guidance: (negative_image_embeds, image_embeds) = image_embeds.chunk(2) if self.do_perturbed_attention_guidance: image_embeds = self._prepare_perturbed_attention_guidance(image_embeds, negative_image_embeds, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) image_embeds = image_embeds.to(device) ip_adapter_image_embeds[i] = image_embeds added_cond_kwargs = {'image_embeds': ip_adapter_image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None controlnet_prompt_embeds = prompt_embeds controlnet_keep = [] for i in range(len(timesteps)): keeps = [1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for (s, e) in zip(control_guidance_start, control_guidance_end)] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) images = image if isinstance(image, list) else [image] for (i, single_image) in enumerate(images): if self.do_classifier_free_guidance: single_image = single_image.chunk(2)[0] if self.do_perturbed_attention_guidance: single_image = self._prepare_perturbed_attention_guidance(single_image, single_image, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: single_image = torch.cat([single_image] * 2) single_image = single_image.to(device) images[i] = single_image image = images if isinstance(image, list) else images[0] if self.do_perturbed_attention_guidance: original_attn_proc = self.unet.attn_processors self._set_pag_attn_processor(pag_applied_layers=self.pag_applied_layers, do_classifier_free_guidance=self.do_classifier_free_guidance) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order is_unet_compiled = is_compiled_module(self.unet) is_controlnet_compiled = is_compiled_module(self.controlnet) is_torch_higher_equal_2_1 = is_torch_version('>=', '2.1') with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: torch._inductor.cudagraph_mark_step_begin() latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) control_model_input = latent_model_input if isinstance(controlnet_keep[i], list): cond_scale = [c * s for (c, s) in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] (down_block_res_samples, mid_block_res_sample) = self.controlnet(control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=image, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False) if guess_mode and self.do_classifier_free_guidance: down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_perturbed_attention_guidance: noise_pred = self._apply_perturbed_attention_guidance(noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t) elif self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.unet.to('cpu') self.controlnet.to('cpu') torch.cuda.empty_cache() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if self.do_perturbed_attention_guidance: self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers.utils.import_utils import is_invisible_watermark_available from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from .pag_utils import PAGMixin if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker from ..controlnet.multicontrolnet import MultiControlNetModel logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> # !pip install opencv-python transformers accelerate\n >>> from diffusers import AutoPipelineForText2Image, ControlNetModel, AutoencoderKL\n >>> from diffusers.utils import load_image\n >>> import numpy as np\n >>> import torch\n\n >>> import cv2\n >>> from PIL import Image\n\n >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"\n >>> negative_prompt = "low quality, bad quality, sketches"\n\n >>> # download an image\n >>> image = load_image(\n ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"\n ... )\n\n >>> # initialize the models and pipeline\n >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization\n >>> controlnet = ControlNetModel.from_pretrained(\n ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16\n ... )\n >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)\n >>> pipe = AutoPipelineForText2Image.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-base-1.0",\n ... controlnet=controlnet,\n ... vae=vae,\n ... torch_dtype=torch.float16,\n ... enable_pag=True,\n ... )\n >>> pipe.enable_model_cpu_offload()\n\n >>> # get canny image\n >>> image = np.array(image)\n >>> image = cv2.Canny(image, 100, 200)\n >>> image = image[:, :, None]\n >>> image = np.concatenate([image, image, image], axis=2)\n >>> canny_image = Image.fromarray(image)\n\n >>> # generate image\n >>> image = pipe(\n ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image, pag_scale=0.3\n ... ).images[0]\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionXLControlNetPAGPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin, PAGMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->image_encoder->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'feature_extractor', 'image_encoder'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'negative_add_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None, feature_extractor: CLIPImageProcessor=None, image_encoder: CLIPVisionModelWithProjection=None, pag_applied_layers: Union[str, List[str]]='mid'): super().__init__() if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, controlnet=controlnet, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.set_pag_applied_layers(pag_applied_layers) def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, prompt_2, image, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, negative_pooled_prompt_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning(f'You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)} prompts. The conditionings will be fixed across the prompts.') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.controlnet, torch._dynamo.eval_frame.OptimizedModule) if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): self.check_image(image, prompt, prompt_embeds) elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if not isinstance(image, list): raise TypeError('For multiple controlnets: `image` must be type `list`') elif any((isinstance(i, list) for i in image)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif len(image) != len(self.controlnet.nets): raise ValueError(f'For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets.') for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if isinstance(controlnet_conditioning_scale, list): if any((isinstance(i, list) for i in controlnet_conditioning_scale)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(self.controlnet.nets): raise ValueError('For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have the same length as the number of controlnets') else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError(f'`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list.') if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError(f'`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}.') for (start, end) in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError(f'control guidance start: {start} cannot be larger or equal to control guidance end: {end}.') if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if not image_is_pil and (not image_is_tensor) and (not image_is_np) and (not image_is_pil_list) and (not image_is_tensor_list) and (not image_is_np_list): raise TypeError(f'image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}') if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError(f'If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}') def prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): image = torch.cat([image] * 2) return image def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=1.0, control_guidance_start: Union[float, List[float]]=0.0, control_guidance_end: Union[float, List[float]]=1.0, original_size: Tuple[int, int]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Tuple[int, int]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], pag_scale: float=3.0, pag_adaptive_scale: float=0.0): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) self.check_inputs(prompt, prompt_2, image, None, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, negative_pooled_prompt_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._pag_scale = pag_scale self._pag_adaptive_scale = pag_adaptive_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt, prompt_2, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) if isinstance(controlnet, ControlNetModel): image = self.prepare_image(image=image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=False) (height, width) = image.shape[-2:] elif isinstance(controlnet, MultiControlNetModel): images = [] for image_ in image: image_ = self.prepare_image(image=image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=False) images.append(image_) image = images (height, width) = image[0].shape[-2:] else: assert False (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) self._num_timesteps = len(timesteps) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) controlnet_keep = [] for i in range(len(timesteps)): keeps = [1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for (s, e) in zip(control_guidance_start, control_guidance_end)] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) if isinstance(image, list): original_size = original_size or image[0].shape[-2:] else: original_size = original_size or image.shape[-2:] target_size = target_size or (height, width) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids(negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) else: negative_add_time_ids = add_time_ids images = image if isinstance(image, list) else [image] for (i, single_image) in enumerate(images): if self.do_classifier_free_guidance: single_image = single_image.chunk(2)[0] if self.do_perturbed_attention_guidance: single_image = self._prepare_perturbed_attention_guidance(single_image, single_image, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: single_image = torch.cat([single_image] * 2) single_image = single_image.to(device) images[i] = single_image image = images if isinstance(image, list) else images[0] if ip_adapter_image_embeds is not None: for (i, image_embeds) in enumerate(ip_adapter_image_embeds): negative_image_embeds = None if self.do_classifier_free_guidance: (negative_image_embeds, image_embeds) = image_embeds.chunk(2) if self.do_perturbed_attention_guidance: image_embeds = self._prepare_perturbed_attention_guidance(image_embeds, negative_image_embeds, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) image_embeds = image_embeds.to(device) ip_adapter_image_embeds[i] = image_embeds if self.do_perturbed_attention_guidance: prompt_embeds = self._prepare_perturbed_attention_guidance(prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance) add_text_embeds = self._prepare_perturbed_attention_guidance(add_text_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance) add_time_ids = self._prepare_perturbed_attention_guidance(add_time_ids, negative_add_time_ids, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} controlnet_prompt_embeds = prompt_embeds controlnet_added_cond_kwargs = added_cond_kwargs num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order if self.denoising_end is not None and isinstance(self.denoising_end, float) and (self.denoising_end > 0) and (self.denoising_end < 1): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - self.denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] if self.do_perturbed_attention_guidance: original_attn_proc = self.unet.attn_processors self._set_pag_attn_processor(pag_applied_layers=self.pag_applied_layers, do_classifier_free_guidance=self.do_classifier_free_guidance) is_unet_compiled = is_compiled_module(self.unet) is_controlnet_compiled = is_compiled_module(self.controlnet) is_torch_higher_equal_2_1 = is_torch_version('>=', '2.1') with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: torch._inductor.cudagraph_mark_step_begin() latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) control_model_input = latent_model_input if isinstance(controlnet_keep[i], list): cond_scale = [c * s for (c, s) in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] (down_block_res_samples, mid_block_res_sample) = self.controlnet(control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=image, conditioning_scale=cond_scale, guess_mode=False, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False) if ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = ip_adapter_image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_perturbed_attention_guidance: noise_pred = self._apply_perturbed_attention_guidance(noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t) elif self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) negative_add_time_ids = callback_outputs.pop('negative_add_time_ids', negative_add_time_ids) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) has_latents_mean = hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == 'latent': if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if self.do_perturbed_attention_guidance: self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers.utils.import_utils import is_invisible_watermark_available from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import is_compiled_module, randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from .pag_utils import PAGMixin if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker from ..controlnet.multicontrolnet import MultiControlNetModel logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> # pip install accelerate transformers safetensors diffusers\n\n >>> import torch\n >>> import numpy as np\n >>> from PIL import Image\n\n >>> from transformers import DPTFeatureExtractor, DPTForDepthEstimation\n >>> from diffusers import ControlNetModel, StableDiffusionXLControlNetPAGImg2ImgPipeline, AutoencoderKL\n >>> from diffusers.utils import load_image\n\n\n >>> depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda")\n >>> feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-hybrid-midas")\n >>> controlnet = ControlNetModel.from_pretrained(\n ... "diffusers/controlnet-depth-sdxl-1.0-small",\n ... variant="fp16",\n ... use_safetensors="True",\n ... torch_dtype=torch.float16,\n ... )\n >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)\n >>> pipe = StableDiffusionXLControlNetPAGImg2ImgPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-base-1.0",\n ... controlnet=controlnet,\n ... vae=vae,\n ... variant="fp16",\n ... use_safetensors=True,\n ... torch_dtype=torch.float16,\n ... enable_pag=True,\n ... )\n >>> pipe.enable_model_cpu_offload()\n\n\n >>> def get_depth_map(image):\n ... image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda")\n ... with torch.no_grad(), torch.autocast("cuda"):\n ... depth_map = depth_estimator(image).predicted_depth\n\n ... depth_map = torch.nn.fuctional.interpolate(\n ... depth_map.unsqueeze(1),\n ... size=(1024, 1024),\n ... mode="bicubic",\n ... align_corners=False,\n ... )\n ... depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)\n ... depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)\n ... depth_map = (depth_map - depth_min) / (depth_max - depth_min)\n ... image = torch.cat([depth_map] * 3, dim=1)\n ... image = image.permute(0, 2, 3, 1).cpu().numpy()[0]\n ... image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8))\n ... return image\n\n\n >>> prompt = "A robot, 4k photo"\n >>> image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((1024, 1024))\n >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization\n >>> depth_image = get_depth_map(image)\n\n >>> images = pipe(\n ... prompt,\n ... image=image,\n ... control_image=depth_image,\n ... strength=0.99,\n ... num_inference_steps=50,\n ... controlnet_conditioning_scale=controlnet_conditioning_scale,\n ... ).images\n >>> images[0].save(f"robot_cat.png")\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') class StableDiffusionXLControlNetPAGImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin, IPAdapterMixin, PAGMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->image_encoder->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'feature_extractor', 'image_encoder'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'add_neg_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, requires_aesthetics_score: bool=False, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None, feature_extractor: CLIPImageProcessor=None, image_encoder: CLIPVisionModelWithProjection=None, pag_applied_layers: Union[str, List[str]]='mid'): super().__init__() if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, controlnet=controlnet, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) self.set_pag_applied_layers(pag_applied_layers) def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, prompt_2, image, strength, num_inference_steps, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if num_inference_steps is None: raise ValueError('`num_inference_steps` cannot be None.') elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: raise ValueError(f'`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type {type(num_inference_steps)}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning(f'You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)} prompts. The conditionings will be fixed across the prompts.') is_compiled = hasattr(F, 'scaled_dot_product_attention') and isinstance(self.controlnet, torch._dynamo.eval_frame.OptimizedModule) if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): self.check_image(image, prompt, prompt_embeds) elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if not isinstance(image, list): raise TypeError('For multiple controlnets: `image` must be type `list`') elif any((isinstance(i, list) for i in image)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif len(image) != len(self.controlnet.nets): raise ValueError(f'For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets.') for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False if isinstance(self.controlnet, ControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel)): if not isinstance(controlnet_conditioning_scale, float): raise TypeError('For single controlnet: `controlnet_conditioning_scale` must be type `float`.') elif isinstance(self.controlnet, MultiControlNetModel) or (is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel)): if isinstance(controlnet_conditioning_scale, list): if any((isinstance(i, list) for i in controlnet_conditioning_scale)): raise ValueError('A single batch of multiple conditionings are supported at the moment.') elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(self.controlnet.nets): raise ValueError('For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have the same length as the number of controlnets') else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError(f'`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list.') if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError(f'`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}.') for (start, end) in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError(f'control guidance start: {start} cannot be larger or equal to control guidance end: {end}.') if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if not image_is_pil and (not image_is_tensor) and (not image_is_np) and (not image_is_pil_list) and (not image_is_tensor_list) and (not image_is_np_list): raise TypeError(f'image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}') if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError(f'If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}') def prepare_control_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and (not guess_mode): image = torch.cat([image] * 2) return image def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') latents_mean = latents_std = None if hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None: latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.text_encoder_2.to('cpu') torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} ') init_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) if latents_mean is not None and latents_std is not None: latents_mean = latents_mean.to(device=device, dtype=dtype) latents_std = latents_std.to(device=device, dtype=dtype) init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std else: init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype, text_encoder_projection_dim=None): if self.config.requires_aesthetics_score: add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) add_neg_time_ids = list(negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)) else: add_time_ids = list(original_size + crops_coords_top_left + target_size) add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim > passed_add_embed_dim and expected_add_embed_dim - passed_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model.') elif expected_add_embed_dim < passed_add_embed_dim and passed_add_embed_dim - expected_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model.') elif expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) return (add_time_ids, add_neg_time_ids) def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, control_image: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, strength: float=0.8, num_inference_steps: int=50, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, controlnet_conditioning_scale: Union[float, List[float]]=0.8, guess_mode: bool=False, control_guidance_start: Union[float, List[float]]=0.0, control_guidance_end: Union[float, List[float]]=1.0, original_size: Tuple[int, int]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Tuple[int, int]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, aesthetic_score: float=6.0, negative_aesthetic_score: float=2.5, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], pag_scale: float=3.0, pag_adaptive_scale: float=0.0): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and (not isinstance(control_guidance_end, list)): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 (control_guidance_start, control_guidance_end) = (mult * [control_guidance_start], mult * [control_guidance_end]) self.check_inputs(prompt, prompt_2, control_image, strength, num_inference_steps, None, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._pag_scale = pag_scale self._pag_adaptive_scale = pag_adaptive_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt, prompt_2, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) if isinstance(controlnet, ControlNetModel): control_image = self.prepare_control_image(image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=False) (height, width) = control_image.shape[-2:] elif isinstance(controlnet, MultiControlNetModel): control_images = [] for control_image_ in control_image: control_image_ = self.prepare_control_image(image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=False) control_images.append(control_image_) control_image = control_images (height, width) = control_image[0].shape[-2:] else: assert False self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) self._num_timesteps = len(timesteps) if latents is None: latents = self.prepare_latents(image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, True) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) controlnet_keep = [] for i in range(len(timesteps)): keeps = [1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for (s, e) in zip(control_guidance_start, control_guidance_end)] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) if isinstance(control_image, list): original_size = original_size or control_image[0].shape[-2:] else: original_size = original_size or control_image.shape[-2:] target_size = target_size or (height, width) if negative_original_size is None: negative_original_size = original_size if negative_target_size is None: negative_target_size = target_size add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim (add_time_ids, add_neg_time_ids) = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) control_images = control_image if isinstance(control_image, list) else [control_image] for (i, single_image) in enumerate(control_images): if self.do_classifier_free_guidance: single_image = single_image.chunk(2)[0] if self.do_perturbed_attention_guidance: single_image = self._prepare_perturbed_attention_guidance(single_image, single_image, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: single_image = torch.cat([single_image] * 2) single_image = single_image.to(device) control_images[i] = single_image control_image = control_images if isinstance(control_image, list) else control_images[0] if ip_adapter_image_embeds is not None: for (i, image_embeds) in enumerate(ip_adapter_image_embeds): negative_image_embeds = None if self.do_classifier_free_guidance: (negative_image_embeds, image_embeds) = image_embeds.chunk(2) if self.do_perturbed_attention_guidance: image_embeds = self._prepare_perturbed_attention_guidance(image_embeds, negative_image_embeds, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) image_embeds = image_embeds.to(device) ip_adapter_image_embeds[i] = image_embeds if self.do_perturbed_attention_guidance: prompt_embeds = self._prepare_perturbed_attention_guidance(prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance) add_text_embeds = self._prepare_perturbed_attention_guidance(add_text_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance) add_time_ids = self._prepare_perturbed_attention_guidance(add_time_ids, add_neg_time_ids, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} controlnet_prompt_embeds = prompt_embeds controlnet_added_cond_kwargs = added_cond_kwargs num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order if self.do_perturbed_attention_guidance: original_attn_proc = self.unet.attn_processors self._set_pag_attn_processor(pag_applied_layers=self.pag_applied_layers, do_classifier_free_guidance=self.do_classifier_free_guidance) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) control_model_input = latent_model_input if isinstance(controlnet_keep[i], list): cond_scale = [c * s for (c, s) in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] (down_block_res_samples, mid_block_res_sample) = self.controlnet(control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=control_image, conditioning_scale=cond_scale, guess_mode=False, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False) if ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = ip_adapter_image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_perturbed_attention_guidance: noise_pred = self._apply_perturbed_attention_guidance(noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t) elif self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) add_neg_time_ids = callback_outputs.pop('add_neg_time_ids', add_neg_time_ids) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.unet.to('cpu') self.controlnet.to('cpu') torch.cuda.empty_cache() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) has_latents_mean = hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents return StableDiffusionXLPipelineOutput(images=image) if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if self.do_perturbed_attention_guidance: self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/pag/pipeline_pag_hunyuandit.py import inspect from typing import Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from transformers import BertModel, BertTokenizer, CLIPImageProcessor, MT5Tokenizer, T5EncoderModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, HunyuanDiT2DModel from ...models.attention_processor import PAGCFGHunyuanAttnProcessor2_0, PAGHunyuanAttnProcessor2_0 from ...models.embeddings import get_2d_rotary_pos_embed from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...schedulers import DDPMScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pag_utils import PAGMixin if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```python\n >>> import torch\n >>> from diffusers import AutoPipelineForText2Image\n\n >>> pipe = AutoPipelineForText2Image.from_pretrained(\n ... "Tencent-Hunyuan/HunyuanDiT-v1.2-Diffusers",\n ... torch_dtype=torch.float16,\n ... enable_pag=True,\n ... pag_applied_layers=[14],\n ... ).to("cuda")\n\n >>> # prompt = "an astronaut riding a horse"\n >>> prompt = "一个宇航员在骑马"\n >>> image = pipe(prompt, guidance_scale=4, pag_scale=3).images[0]\n ```\n' STANDARD_RATIO = np.array([1.0, 4.0 / 3.0, 3.0 / 4.0, 16.0 / 9.0, 9.0 / 16.0]) STANDARD_SHAPE = [[(1024, 1024), (1280, 1280)], [(1024, 768), (1152, 864), (1280, 960)], [(768, 1024), (864, 1152), (960, 1280)], [(1280, 768)], [(768, 1280)]] STANDARD_AREA = [np.array([w * h for (w, h) in shapes]) for shapes in STANDARD_SHAPE] SUPPORTED_SHAPE = [(1024, 1024), (1280, 1280), (1024, 768), (1152, 864), (1280, 960), (768, 1024), (864, 1152), (960, 1280), (1280, 768), (768, 1280)] def map_to_standard_shapes(target_width, target_height): target_ratio = target_width / target_height closest_ratio_idx = np.argmin(np.abs(STANDARD_RATIO - target_ratio)) closest_area_idx = np.argmin(np.abs(STANDARD_AREA[closest_ratio_idx] - target_width * target_height)) (width, height) = STANDARD_SHAPE[closest_ratio_idx][closest_area_idx] return (width, height) def get_resize_crop_region_for_grid(src, tgt_size): th = tw = tgt_size (h, w) = src r = h / w if r > 1: resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return ((crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)) def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class HunyuanDiTPAGPipeline(DiffusionPipeline, PAGMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->transformer->vae' _optional_components = ['safety_checker', 'feature_extractor', 'text_encoder_2', 'tokenizer_2', 'text_encoder', 'tokenizer'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'prompt_embeds_2', 'negative_prompt_embeds_2'] def __init__(self, vae: AutoencoderKL, text_encoder: BertModel, tokenizer: BertTokenizer, transformer: HunyuanDiT2DModel, scheduler: DDPMScheduler, safety_checker: Optional[StableDiffusionSafetyChecker]=None, feature_extractor: Optional[CLIPImageProcessor]=None, requires_safety_checker: bool=True, text_encoder_2: Optional[T5EncoderModel]=None, tokenizer_2: Optional[MT5Tokenizer]=None, pag_applied_layers: Union[str, List[str]]='blocks.1'): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, tokenizer_2=tokenizer_2, transformer=transformer, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, text_encoder_2=text_encoder_2) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, 'vae') and self.vae is not None else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) self.default_sample_size = self.transformer.config.sample_size if hasattr(self, 'transformer') and self.transformer is not None else 128 self.set_pag_applied_layers(pag_applied_layers, pag_attn_processors=(PAGCFGHunyuanAttnProcessor2_0(), PAGHunyuanAttnProcessor2_0())) def encode_prompt(self, prompt: str, device: torch.device=None, dtype: torch.dtype=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, max_sequence_length: Optional[int]=None, text_encoder_index: int=0): if dtype is None: if self.text_encoder_2 is not None: dtype = self.text_encoder_2.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None if device is None: device = self._execution_device tokenizers = [self.tokenizer, self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] tokenizer = tokenizers[text_encoder_index] text_encoder = text_encoders[text_encoder_index] if max_sequence_length is None: if text_encoder_index == 0: max_length = 77 if text_encoder_index == 1: max_length = 256 else: max_length = max_sequence_length if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) prompt_embeds = prompt_embeds[0] prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return (prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, prompt_embeds_2=None, negative_prompt_embeds_2=None, prompt_attention_mask_2=None, negative_prompt_attention_mask_2=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is None and prompt_embeds_2 is None: raise ValueError('Provide either `prompt` or `prompt_embeds_2`. Cannot leave both `prompt` and `prompt_embeds_2` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError('Must provide `prompt_attention_mask` when specifying `prompt_embeds`.') if prompt_embeds_2 is not None and prompt_attention_mask_2 is None: raise ValueError('Must provide `prompt_attention_mask_2` when specifying `prompt_embeds_2`.') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError('Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.') if negative_prompt_embeds_2 is not None and negative_prompt_attention_mask_2 is None: raise ValueError('Must provide `negative_prompt_attention_mask_2` when specifying `negative_prompt_embeds_2`.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds_2 is not None and negative_prompt_embeds_2 is not None: if prompt_embeds_2.shape != negative_prompt_embeds_2.shape: raise ValueError(f'`prompt_embeds_2` and `negative_prompt_embeds_2` must have the same shape when passed directly, but got: `prompt_embeds_2` {prompt_embeds_2.shape} != `negative_prompt_embeds_2` {negative_prompt_embeds_2.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: Optional[int]=50, guidance_scale: Optional[float]=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: Optional[float]=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, prompt_embeds_2: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds_2: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, prompt_attention_mask_2: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_attention_mask_2: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], guidance_rescale: float=0.0, original_size: Optional[Tuple[int, int]]=(1024, 1024), target_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), use_resolution_binning: bool=True, pag_scale: float=3.0, pag_adaptive_scale: float=0.0): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor height = int(height // 16 * 16) width = int(width // 16 * 16) if use_resolution_binning and (height, width) not in SUPPORTED_SHAPE: (width, height) = map_to_standard_shapes(width, height) height = int(height) width = int(width) logger.warning(f'Reshaped to (height, width)=({height}, {width}), Supported shapes are {SUPPORTED_SHAPE}') self.check_inputs(prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, prompt_embeds_2, negative_prompt_embeds_2, prompt_attention_mask_2, negative_prompt_attention_mask_2, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._interrupt = False self._pag_scale = pag_scale self._pag_adaptive_scale = pag_adaptive_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device (prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask) = self.encode_prompt(prompt=prompt, device=device, dtype=self.transformer.dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=77, text_encoder_index=0) (prompt_embeds_2, negative_prompt_embeds_2, prompt_attention_mask_2, negative_prompt_attention_mask_2) = self.encode_prompt(prompt=prompt, device=device, dtype=self.transformer.dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds_2, negative_prompt_embeds=negative_prompt_embeds_2, prompt_attention_mask=prompt_attention_mask_2, negative_prompt_attention_mask=negative_prompt_attention_mask_2, max_sequence_length=256, text_encoder_index=1) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) grid_height = height // 8 // self.transformer.config.patch_size grid_width = width // 8 // self.transformer.config.patch_size base_size = 512 // 8 // self.transformer.config.patch_size grid_crops_coords = get_resize_crop_region_for_grid((grid_height, grid_width), base_size) image_rotary_emb = get_2d_rotary_pos_embed(self.transformer.inner_dim // self.transformer.num_heads, grid_crops_coords, (grid_height, grid_width)) style = torch.tensor([0], device=device) target_size = target_size or (height, width) add_time_ids = list(original_size + target_size + crops_coords_top_left) add_time_ids = torch.tensor([add_time_ids], dtype=prompt_embeds.dtype) if self.do_perturbed_attention_guidance: prompt_embeds = self._prepare_perturbed_attention_guidance(prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance) prompt_attention_mask = self._prepare_perturbed_attention_guidance(prompt_attention_mask, negative_prompt_attention_mask, self.do_classifier_free_guidance) prompt_embeds_2 = self._prepare_perturbed_attention_guidance(prompt_embeds_2, negative_prompt_embeds_2, self.do_classifier_free_guidance) prompt_attention_mask_2 = self._prepare_perturbed_attention_guidance(prompt_attention_mask_2, negative_prompt_attention_mask_2, self.do_classifier_free_guidance) add_time_ids = torch.cat([add_time_ids] * 3, dim=0) style = torch.cat([style] * 3, dim=0) elif self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask]) prompt_embeds_2 = torch.cat([negative_prompt_embeds_2, prompt_embeds_2]) prompt_attention_mask_2 = torch.cat([negative_prompt_attention_mask_2, prompt_attention_mask_2]) add_time_ids = torch.cat([add_time_ids] * 2, dim=0) style = torch.cat([style] * 2, dim=0) prompt_embeds = prompt_embeds.to(device=device) prompt_attention_mask = prompt_attention_mask.to(device=device) prompt_embeds_2 = prompt_embeds_2.to(device=device) prompt_attention_mask_2 = prompt_attention_mask_2.to(device=device) add_time_ids = add_time_ids.to(dtype=prompt_embeds.dtype, device=device).repeat(batch_size * num_images_per_prompt, 1) style = style.to(device=device).repeat(batch_size * num_images_per_prompt) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) if self.do_perturbed_attention_guidance: original_attn_proc = self.transformer.attn_processors self._set_pag_attn_processor(pag_applied_layers=self.pag_applied_layers, do_classifier_free_guidance=self.do_classifier_free_guidance) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) t_expand = torch.tensor([t] * latent_model_input.shape[0], device=device).to(dtype=latent_model_input.dtype) noise_pred = self.transformer(latent_model_input, t_expand, encoder_hidden_states=prompt_embeds, text_embedding_mask=prompt_attention_mask, encoder_hidden_states_t5=prompt_embeds_2, text_embedding_mask_t5=prompt_attention_mask_2, image_meta_size=add_time_ids, style=style, image_rotary_emb=image_rotary_emb, return_dict=False)[0] (noise_pred, _) = noise_pred.chunk(2, dim=1) if self.do_perturbed_attention_guidance: noise_pred = self._apply_perturbed_attention_guidance(noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t) elif self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) prompt_embeds_2 = callback_outputs.pop('prompt_embeds_2', prompt_embeds_2) negative_prompt_embeds_2 = callback_outputs.pop('negative_prompt_embeds_2', negative_prompt_embeds_2) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if self.do_perturbed_attention_guidance: self.transformer.set_attn_processor(original_attn_proc) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/pag/pipeline_pag_kolors.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import IPAdapterMixin, StableDiffusionXLLoraLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor from ...schedulers import KarrasDiffusionSchedulers from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..kolors.pipeline_output import KolorsPipelineOutput from ..kolors.text_encoder import ChatGLMModel from ..kolors.tokenizer import ChatGLMTokenizer from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pag_utils import PAGMixin if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import AutoPipelineForText2Image\n\n >>> pipe = AutoPipelineForText2Image.from_pretrained(\n ... "Kwai-Kolors/Kolors-diffusers",\n ... variant="fp16",\n ... torch_dtype=torch.float16,\n ... enable_pag=True,\n ... pag_applied_layers=["down.block_2.attentions_1", "up.block_0.attentions_1"],\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = (\n ... "A photo of a ladybug, macro, zoom, high quality, film, holding a wooden sign with the text \'KOLORS\'"\n ... )\n >>> image = pipe(prompt, guidance_scale=5.5, pag_scale=1.5).images[0]\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class KolorsPAGPipeline(DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin, PAGMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['image_encoder', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'negative_add_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: ChatGLMModel, tokenizer: ChatGLMTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection=None, feature_extractor: CLIPImageProcessor=None, force_zeros_for_empty_prompt: bool=False, pag_applied_layers: Union[str, List[str]]='mid'): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, 'vae') and self.vae is not None else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = self.unet.config.sample_size self.set_pag_applied_layers(pag_applied_layers) def encode_prompt(self, prompt, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, max_sequence_length: int=256): device = device or self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer] text_encoders = [self.text_encoder] if prompt_embeds is None: prompt_embeds_list = [] for (tokenizer, text_encoder) in zip(tokenizers, text_encoders): text_inputs = tokenizer(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, return_tensors='pt').to(device) output = text_encoder(input_ids=text_inputs['input_ids'], attention_mask=text_inputs['attention_mask'], position_ids=text_inputs['position_ids'], output_hidden_states=True) prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_embeds_list.append(prompt_embeds) prompt_embeds = prompt_embeds_list[0] zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt negative_prompt_embeds_list = [] for (tokenizer, text_encoder) in zip(tokenizers, text_encoders): uncond_input = tokenizer(uncond_tokens, padding='max_length', max_length=max_sequence_length, truncation=True, return_tensors='pt').to(device) output = text_encoder(input_ids=uncond_input['input_ids'], attention_mask=uncond_input['attention_mask'], position_ids=uncond_input['position_ids'], output_hidden_states=True) negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = negative_prompt_embeds_list[0] bs_embed = pooled_prompt_embeds.shape[0] pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, num_inference_steps, height, width, negative_prompt=None, prompt_embeds=None, pooled_prompt_embeds=None, negative_prompt_embeds=None, negative_pooled_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None): if not isinstance(num_inference_steps, int) or num_inference_steps <= 0: raise ValueError(f'`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type {type(num_inference_steps)}.') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') if max_sequence_length is not None and max_sequence_length > 256: raise ValueError(f'`max_sequence_length` cannot be greater than 256 but is {max_sequence_length}') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, original_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Optional[Tuple[int, int]]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], pag_scale: float=3.0, pag_adaptive_scale: float=0.0, max_sequence_length: int=256): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) self.check_inputs(prompt, num_inference_steps, height, width, negative_prompt, prompt_embeds, pooled_prompt_embeds, negative_prompt_embeds, negative_pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length) self._guidance_scale = guidance_scale self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._interrupt = False self._pag_scale = pag_scale self._pag_adaptive_scale = pag_adaptive_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) add_text_embeds = pooled_prompt_embeds text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) add_time_ids = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids(negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) else: negative_add_time_ids = add_time_ids if self.do_perturbed_attention_guidance: prompt_embeds = self._prepare_perturbed_attention_guidance(prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance) add_text_embeds = self._prepare_perturbed_attention_guidance(add_text_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance) add_time_ids = self._prepare_perturbed_attention_guidance(add_time_ids, negative_add_time_ids, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) for (i, image_embeds) in enumerate(ip_adapter_image_embeds): negative_image_embeds = None if self.do_classifier_free_guidance: (negative_image_embeds, image_embeds) = image_embeds.chunk(2) if self.do_perturbed_attention_guidance: image_embeds = self._prepare_perturbed_attention_guidance(image_embeds, negative_image_embeds, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) image_embeds = image_embeds.to(device) ip_adapter_image_embeds[i] = image_embeds num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if self.denoising_end is not None and isinstance(self.denoising_end, float) and (self.denoising_end > 0) and (self.denoising_end < 1): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - self.denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) if self.do_perturbed_attention_guidance: original_attn_proc = self.unet.attn_processors self._set_pag_attn_processor(pag_applied_layers=self.pag_applied_layers, do_classifier_free_guidance=self.do_classifier_free_guidance) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_perturbed_attention_guidance: noise_pred = self._apply_perturbed_attention_guidance(noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t) elif self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) negative_add_time_ids = callback_outputs.pop('negative_add_time_ids', negative_add_time_ids) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) elif latents.dtype != self.vae.dtype: if torch.backends.mps.is_available(): self.vae = self.vae.to(latents.dtype) latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == 'latent': image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if self.do_perturbed_attention_guidance: self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image,) return KolorsPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py import html import inspect import re import urllib.parse as ul from typing import Callable, List, Optional, Tuple, Union import torch from transformers import T5EncoderModel, T5Tokenizer from ...image_processor import PixArtImageProcessor from ...models import AutoencoderKL, PixArtTransformer2DModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import BACKENDS_MAPPING, deprecate, is_bs4_available, is_ftfy_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from ..pixart_alpha.pipeline_pixart_alpha import ASPECT_RATIO_256_BIN, ASPECT_RATIO_512_BIN, ASPECT_RATIO_1024_BIN from ..pixart_alpha.pipeline_pixart_sigma import ASPECT_RATIO_2048_BIN from .pag_utils import PAGMixin logger = logging.get_logger(__name__) if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import AutoPipelineForText2Image\n\n >>> pipe = AutoPipelineForText2Image.from_pretrained(\n ... "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",\n ... torch_dtype=torch.float16,\n ... pag_applied_layers=["blocks.14"],\n ... enable_pag=True,\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "A small cactus with a happy face in the Sahara desert"\n >>> image = pipe(prompt, pag_scale=4.0, guidance_scale=1.0).images[0]\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class PixArtSigmaPAGPipeline(DiffusionPipeline, PAGMixin): bad_punct_regex = re.compile('[' + '#®•©™&@·º½¾¿¡§~' + '\\)' + '\\(' + '\\]' + '\\[' + '\\}' + '\\{' + '\\|' + '\\' + '\\/' + '\\*' + ']{1,}') _optional_components = ['tokenizer', 'text_encoder'] model_cpu_offload_seq = 'text_encoder->transformer->vae' def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, vae: AutoencoderKL, transformer: PixArtTransformer2DModel, scheduler: KarrasDiffusionSchedulers, pag_applied_layers: Union[str, List[str]]='blocks.1'): super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = PixArtImageProcessor(vae_scale_factor=self.vae_scale_factor) self.set_pag_applied_layers(pag_applied_layers) def encode_prompt(self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool=True, negative_prompt: str='', num_images_per_prompt: int=1, device: Optional[torch.device]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, clean_caption: bool=False, max_sequence_length: int=300, **kwargs): if 'mask_feature' in kwargs: deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." deprecate('mask_feature', '1.0.0', deprecation_message, standard_warn=False) if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = max_sequence_length if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because T5 can only handle sequences up to {max_length} tokens: {removed_text}') prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(bs_embed, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') negative_prompt_attention_mask = uncond_input.attention_mask negative_prompt_attention_mask = negative_prompt_attention_mask.to(device) negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) else: negative_prompt_embeds = None negative_prompt_attention_mask = None return (prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, negative_prompt, callback_steps, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError('Must provide `prompt_attention_mask` when specifying `prompt_embeds`.') if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError('Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError(f'`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask` {negative_prompt_attention_mask.shape}.') def _text_preprocessing(self, text, clean_caption=False): if clean_caption and (not is_bs4_available()): logger.warning(BACKENDS_MAPPING['bs4'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if clean_caption and (not is_ftfy_available()): logger.warning(BACKENDS_MAPPING['ftfy'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub('', 'person', caption) caption = re.sub('\\b((?:https?:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = re.sub('\\b((?:www:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = BeautifulSoup(caption, features='html.parser').text caption = re.sub('@[\\w\\d]+\\b', '', caption) caption = re.sub('[\\u31c0-\\u31ef]+', '', caption) caption = re.sub('[\\u31f0-\\u31ff]+', '', caption) caption = re.sub('[\\u3200-\\u32ff]+', '', caption) caption = re.sub('[\\u3300-\\u33ff]+', '', caption) caption = re.sub('[\\u3400-\\u4dbf]+', '', caption) caption = re.sub('[\\u4dc0-\\u4dff]+', '', caption) caption = re.sub('[\\u4e00-\\u9fff]+', '', caption) caption = re.sub('[\\u002D\\u058A\\u05BE\\u1400\\u1806\\u2010-\\u2015\\u2E17\\u2E1A\\u2E3A\\u2E3B\\u2E40\\u301C\\u3030\\u30A0\\uFE31\\uFE32\\uFE58\\uFE63\\uFF0D]+', '-', caption) caption = re.sub('[`´«»“”¨]', '"', caption) caption = re.sub('[‘’]', "'", caption) caption = re.sub('"?', '', caption) caption = re.sub('&', '', caption) caption = re.sub('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', ' ', caption) caption = re.sub('\\d:\\d\\d\\s+$', '', caption) caption = re.sub('\\\\n', ' ', caption) caption = re.sub('#\\d{1,3}\\b', '', caption) caption = re.sub('#\\d{5,}\\b', '', caption) caption = re.sub('\\b\\d{6,}\\b', '', caption) caption = re.sub('[\\S]+\\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption) caption = re.sub('[\\"\\\']{2,}', '"', caption) caption = re.sub('[\\.]{2,}', ' ', caption) caption = re.sub(self.bad_punct_regex, ' ', caption) caption = re.sub('\\s+\\.\\s+', ' ', caption) regex2 = re.compile('(?:\\-|\\_)') if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, ' ', caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub('\\b[a-zA-Z]{1,3}\\d{3,15}\\b', '', caption) caption = re.sub('\\b[a-zA-Z]+\\d+[a-zA-Z]+\\b', '', caption) caption = re.sub('\\b\\d+[a-zA-Z]+\\d+\\b', '', caption) caption = re.sub('(worldwide\\s+)?(free\\s+)?shipping', '', caption) caption = re.sub('(free\\s)?download(\\sfree)?', '', caption) caption = re.sub('\\bclick\\b\\s(?:for|on)\\s\\w+', '', caption) caption = re.sub('\\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\\simage[s]?)?', '', caption) caption = re.sub('\\bpage\\s+\\d+\\b', '', caption) caption = re.sub('\\b\\d*[a-zA-Z]+\\d+[a-zA-Z]+\\d+[a-zA-Z\\d]*\\b', ' ', caption) caption = re.sub('\\b\\d+\\.?\\d*[xх×]\\d+\\.?\\d*\\b', '', caption) caption = re.sub('\\b\\s+\\:\\s+', ': ', caption) caption = re.sub('(\\D[,\\./])\\b', '\\1 ', caption) caption = re.sub('\\s+', ' ', caption) caption.strip() caption = re.sub('^[\\"\\\']([\\w\\W]+)[\\"\\\']$', '\\1', caption) caption = re.sub("^[\\'\\_,\\-\\:;]", '', caption) caption = re.sub("[\\'\\_,\\-\\:\\-\\+]$", '', caption) caption = re.sub('^\\.\\S+$', '', caption) return caption.strip() def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, negative_prompt: str='', num_inference_steps: int=20, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: float=4.5, num_images_per_prompt: Optional[int]=1, height: Optional[int]=None, width: Optional[int]=None, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, clean_caption: bool=True, use_resolution_binning: bool=True, max_sequence_length: int=300, pag_scale: float=3.0, pag_adaptive_scale: float=0.0) -> Union[ImagePipelineOutput, Tuple]: height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor if use_resolution_binning: if self.transformer.config.sample_size == 256: aspect_ratio_bin = ASPECT_RATIO_2048_BIN elif self.transformer.config.sample_size == 128: aspect_ratio_bin = ASPECT_RATIO_1024_BIN elif self.transformer.config.sample_size == 64: aspect_ratio_bin = ASPECT_RATIO_512_BIN elif self.transformer.config.sample_size == 32: aspect_ratio_bin = ASPECT_RATIO_256_BIN else: raise ValueError('Invalid sample size') (orig_height, orig_width) = (height, width) (height, width) = self.image_processor.classify_height_width_bin(height, width, ratios=aspect_ratio_bin) self.check_inputs(prompt, height, width, negative_prompt, callback_steps, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask) self._pag_scale = pag_scale self._pag_adaptive_scale = pag_adaptive_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask) = self.encode_prompt(prompt, do_classifier_free_guidance, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, clean_caption=clean_caption, max_sequence_length=max_sequence_length) if self.do_perturbed_attention_guidance: prompt_embeds = self._prepare_perturbed_attention_guidance(prompt_embeds, negative_prompt_embeds, do_classifier_free_guidance) prompt_attention_mask = self._prepare_perturbed_attention_guidance(prompt_attention_mask, negative_prompt_attention_mask, do_classifier_free_guidance) elif do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) latent_channels = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, latent_channels, height, width, prompt_embeds.dtype, device, generator, latents) if self.do_perturbed_attention_guidance: original_attn_proc = self.transformer.attn_processors self._set_pag_attn_processor(pag_applied_layers=self.pag_applied_layers, do_classifier_free_guidance=do_classifier_free_guidance) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'resolution': None, 'aspect_ratio': None} num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) current_timestep = t if not torch.is_tensor(current_timestep): is_mps = latent_model_input.device.type == 'mps' if isinstance(current_timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) elif len(current_timestep.shape) == 0: current_timestep = current_timestep[None].to(latent_model_input.device) current_timestep = current_timestep.expand(latent_model_input.shape[0]) noise_pred = self.transformer(latent_model_input, encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, timestep=current_timestep, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_perturbed_attention_guidance: noise_pred = self._apply_perturbed_attention_guidance(noise_pred, do_classifier_free_guidance, guidance_scale, current_timestep) elif do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.transformer.config.out_channels // 2 == latent_channels: noise_pred = noise_pred.chunk(2, dim=1)[0] else: noise_pred = noise_pred latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] if use_resolution_binning: image = self.image_processor.resize_and_crop_tensor(image, orig_width, orig_height) else: image = latents if not output_type == 'latent': image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if self.do_perturbed_attention_guidance: self.transformer.set_attn_processor(original_attn_proc) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/pag/pipeline_pag_sd.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...configuration_utils import FrozenDict from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .pag_utils import PAGMixin logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import AutoPipelineForText2Image\n\n >>> pipe = AutoPipelineForText2Image.from_pretrained(\n ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, enable_pag=True\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "a photo of an astronaut riding a horse on mars"\n >>> image = pipe(prompt, pag_scale=0.3).images[0]\n ```\n' def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionPAGPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin, PAGMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection=None, requires_safety_checker: bool=True, pag_applied_layers: Union[str, List[str]]='mid'): super().__init__() if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'clip_sample') and scheduler.config.clip_sample is True: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('clip_sample not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['clip_sample'] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) self.set_pag_applied_layers(pag_applied_layers) def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], pag_scale: float=3.0, pag_adaptive_scale: float=0.0): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, None, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False self._pag_scale = pag_scale self._pag_adaptive_scale = pag_adaptive_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip) if self.do_perturbed_attention_guidance: prompt_embeds = self._prepare_perturbed_attention_guidance(prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) for (i, image_embeds) in enumerate(ip_adapter_image_embeds): negative_image_embeds = None if self.do_classifier_free_guidance: (negative_image_embeds, image_embeds) = image_embeds.chunk(2) if self.do_perturbed_attention_guidance: image_embeds = self._prepare_perturbed_attention_guidance(image_embeds, negative_image_embeds, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) image_embeds = image_embeds.to(device) ip_adapter_image_embeds[i] = image_embeds (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': ip_adapter_image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order if self.do_perturbed_attention_guidance: original_attn_proc = self.unet.attn_processors self._set_pag_attn_processor(pag_applied_layers=self.pag_applied_layers, do_classifier_free_guidance=self.do_classifier_free_guidance) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_perturbed_attention_guidance: noise_pred = self._apply_perturbed_attention_guidance(noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t) elif self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if self.do_perturbed_attention_guidance: self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/pag/pipeline_pag_sd_3.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel, T5TokenizerFast from ...image_processor import VaeImageProcessor from ...loaders import FromSingleFileMixin, SD3LoraLoaderMixin from ...models.attention_processor import PAGCFGJointAttnProcessor2_0, PAGJointAttnProcessor2_0 from ...models.autoencoders import AutoencoderKL from ...models.transformers import SD3Transformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from ..stable_diffusion_3.pipeline_output import StableDiffusion3PipelineOutput from .pag_utils import PAGMixin if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import AutoPipelineForText2Image\n\n >>> pipe = AutoPipelineForText2Image.from_pretrained(\n ... "stabilityai/stable-diffusion-3-medium-diffusers",\n ... torch_dtype=torch.float16,\n ... enable_pag=True,\n ... pag_applied_layers=["blocks.13"],\n ... )\n >>> pipe.to("cuda")\n >>> prompt = "A cat holding a sign that says hello world"\n >>> image = pipe(prompt, guidance_scale=5.0, pag_scale=0.7).images[0]\n >>> image.save("sd3_pag.png")\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusion3PAGPipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin, PAGMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->text_encoder_3->transformer->vae' _optional_components = [] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'negative_pooled_prompt_embeds'] def __init__(self, transformer: SD3Transformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_encoder_2: CLIPTextModelWithProjection, tokenizer_2: CLIPTokenizer, text_encoder_3: T5EncoderModel, tokenizer_3: T5TokenizerFast, pag_applied_layers: Union[str, List[str]]='blocks.1'): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, text_encoder_3=text_encoder_3, tokenizer=tokenizer, tokenizer_2=tokenizer_2, tokenizer_3=tokenizer_3, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, 'vae') and self.vae is not None else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.tokenizer_max_length = self.tokenizer.model_max_length if hasattr(self, 'tokenizer') and self.tokenizer is not None else 77 self.default_sample_size = self.transformer.config.sample_size if hasattr(self, 'transformer') and self.transformer is not None else 128 self.set_pag_applied_layers(pag_applied_layers, pag_attn_processors=(PAGCFGJointAttnProcessor2_0(), PAGJointAttnProcessor2_0())) def _get_t5_prompt_embeds(self, prompt: Union[str, List[str]]=None, num_images_per_prompt: int=1, max_sequence_length: int=256, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if self.text_encoder_3 is None: return torch.zeros((batch_size * num_images_per_prompt, self.tokenizer_max_length, self.transformer.config.joint_attention_dim), device=device, dtype=dtype) text_inputs = self.tokenizer_3(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_3(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because `max_sequence_length` is set to {max_sequence_length} tokens: {removed_text}') prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0] dtype = self.text_encoder_3.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds def _get_clip_prompt_embeds(self, prompt: Union[str, List[str]], num_images_per_prompt: int=1, device: Optional[torch.device]=None, clip_skip: Optional[int]=None, clip_model_index: int=0): device = device or self._execution_device clip_tokenizers = [self.tokenizer, self.tokenizer_2] clip_text_encoders = [self.text_encoder, self.text_encoder_2] tokenizer = clip_tokenizers[clip_model_index] text_encoder = clip_text_encoders[clip_model_index] prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = tokenizer(prompt, padding='max_length', max_length=self.tokenizer_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1) pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) return (prompt_embeds, pooled_prompt_embeds) def encode_prompt(self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], prompt_3: Union[str, List[str]], device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, negative_prompt_3: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor]=None, clip_skip: Optional[int]=None, max_sequence_length: int=256, lora_scale: Optional[float]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_3 = prompt_3 or prompt prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 (prompt_embed, pooled_prompt_embed) = self._get_clip_prompt_embeds(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=0) (prompt_2_embed, pooled_prompt_2_embed) = self._get_clip_prompt_embeds(prompt=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=1) clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1) t5_prompt_embed = self._get_t5_prompt_embeds(prompt=prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) clip_prompt_embeds = torch.nn.functional.pad(clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1])) prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt_3 = negative_prompt_3 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 negative_prompt_3 = batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') (negative_prompt_embed, negative_pooled_prompt_embed) = self._get_clip_prompt_embeds(negative_prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=0) (negative_prompt_2_embed, negative_pooled_prompt_2_embed) = self._get_clip_prompt_embeds(negative_prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=1) negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1) t5_negative_prompt_embed = self._get_t5_prompt_embeds(prompt=negative_prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) negative_clip_prompt_embeds = torch.nn.functional.pad(negative_clip_prompt_embeds, (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1])) negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2) negative_pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1) if self.text_encoder is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def check_inputs(self, prompt, prompt_2, prompt_3, height, width, negative_prompt=None, negative_prompt_2=None, negative_prompt_3=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_3 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') elif prompt_3 is not None and (not isinstance(prompt_3, str) and (not isinstance(prompt_3, list))): raise ValueError(f'`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_3 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f'`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): if latents is not None: return latents.to(device=device, dtype=dtype) shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, prompt_3: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=28, timesteps: List[int]=None, guidance_scale: float=7.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, negative_prompt_3: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, joint_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=256, pag_scale: float=3.0, pag_adaptive_scale: float=0.0): height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor self.check_inputs(prompt, prompt_2, prompt_3, height, width, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False self._pag_scale = pag_scale self._pag_adaptive_scale = pag_adaptive_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.joint_attention_kwargs.get('scale', None) if self.joint_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, prompt_3=prompt_3, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, device=device, clip_skip=self.clip_skip, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale) if self.do_perturbed_attention_guidance: prompt_embeds = self._prepare_perturbed_attention_guidance(prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance) pooled_prompt_embeds = self._prepare_perturbed_attention_guidance(pooled_prompt_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) if self.do_perturbed_attention_guidance: original_attn_proc = self.transformer.attn_processors self._set_pag_attn_processor(pag_applied_layers=self.pag_applied_layers, do_classifier_free_guidance=self.do_classifier_free_guidance) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) timestep = t.expand(latent_model_input.shape[0]) noise_pred = self.transformer(hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, pooled_projections=pooled_prompt_embeds, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False)[0] if self.do_perturbed_attention_guidance: noise_pred = self._apply_perturbed_attention_guidance(noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t) elif self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == 'latent': image = latents else: latents = latents / self.vae.config.scaling_factor + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if self.do_perturbed_attention_guidance: self.transformer.set_attn_processor(original_attn_proc) if not return_dict: return (image,) return StableDiffusion3PipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel from ...models.lora import adjust_lora_scale_text_encoder from ...models.unets.unet_motion_model import MotionAdapter from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..animatediff.pipeline_output import AnimateDiffPipelineOutput from ..free_init_utils import FreeInitMixin from ..free_noise_utils import AnimateDiffFreeNoiseMixin from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pag_utils import PAGMixin logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import AnimateDiffPAGPipeline, MotionAdapter, DDIMScheduler\n >>> from diffusers.utils import export_to_gif\n\n >>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE"\n >>> motion_adapter_id = "guoyww/animatediff-motion-adapter-v1-5-2"\n >>> motion_adapter = MotionAdapter.from_pretrained(motion_adapter_id)\n >>> scheduler = DDIMScheduler.from_pretrained(\n ... model_id, subfolder="scheduler", beta_schedule="linear", steps_offset=1, clip_sample=False\n ... )\n >>> pipe = AnimateDiffPAGPipeline.from_pretrained(\n ... model_id,\n ... motion_adapter=motion_adapter,\n ... scheduler=scheduler,\n ... pag_applied_layers=["mid"],\n ... torch_dtype=torch.float16,\n ... ).to("cuda")\n\n >>> video = pipe(\n ... prompt="car, futuristic cityscape with neon lights, street, no human",\n ... negative_prompt="low quality, bad quality",\n ... num_inference_steps=25,\n ... guidance_scale=6.0,\n ... pag_scale=3.0,\n ... generator=torch.Generator().manual_seed(42),\n ... ).frames[0]\n\n >>> export_to_gif(video, "animatediff_pag.gif")\n ```\n' class AnimateDiffPAGPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, FreeInitMixin, AnimateDiffFreeNoiseMixin, PAGMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['feature_extractor', 'image_encoder', 'motion_adapter'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: Union[UNet2DConditionModel, UNetMotionModel], motion_adapter: MotionAdapter, scheduler: KarrasDiffusionSchedulers, feature_extractor: CLIPImageProcessor=None, image_encoder: CLIPVisionModelWithProjection=None, pag_applied_layers: Union[str, List[str]]='mid_block.*attn1'): super().__init__() if isinstance(unet, UNet2DConditionModel): unet = UNetMotionModel.from_unet2d(unet, motion_adapter) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, motion_adapter=motion_adapter, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor) self.set_pag_applied_layers(pag_applied_layers) def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def decode_latents(self, latents, decode_chunk_size: int=16): latents = 1 / self.vae.config.scaling_factor * latents (batch_size, channels, num_frames, height, width) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) video = [] for i in range(0, latents.shape[0], decode_chunk_size): batch_latents = latents[i:i + decode_chunk_size] batch_latents = self.vae.decode(batch_latents).sample video.append(batch_latents) video = torch.cat(video) video = video[None, :].reshape((batch_size, num_frames, -1) + video.shape[2:]).permute(0, 2, 1, 3, 4) video = video.float() return video def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_latents(self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None): if self.free_noise_enabled: latents = self._prepare_latents_free_noise(batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[str, List[str]]]=None, num_frames: Optional[int]=16, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_videos_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], decode_chunk_size: int=16, pag_scale: float=3.0, pag_adaptive_scale: float=0.0): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_videos_per_prompt = 1 self.check_inputs(prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._pag_scale = pag_scale self._pag_adaptive_scale = pag_adaptive_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_videos_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_perturbed_attention_guidance: prompt_embeds = self._prepare_perturbed_attention_guidance(prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt, self.do_classifier_free_guidance) for (i, image_embeds) in enumerate(ip_adapter_image_embeds): negative_image_embeds = None if self.do_classifier_free_guidance: (negative_image_embeds, image_embeds) = image_embeds.chunk(2) if self.do_perturbed_attention_guidance: image_embeds = self._prepare_perturbed_attention_guidance(image_embeds, negative_image_embeds, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) image_embeds = image_embeds.to(device) ip_adapter_image_embeds[i] = image_embeds self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_videos_per_prompt, num_channels_latents, num_frames, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': ip_adapter_image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None if self.do_perturbed_attention_guidance: original_attn_proc = self.unet.attn_processors self._set_pag_attn_processor(pag_applied_layers=self.pag_applied_layers, do_classifier_free_guidance=self.do_classifier_free_guidance) num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 for free_init_iter in range(num_free_init_iters): if self.free_init_enabled: (latents, timesteps) = self._apply_free_init(latents, free_init_iter, num_inference_steps, device, latents.dtype, generator) self._num_timesteps = len(timesteps) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=self._num_timesteps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // num_frames // latents.shape[0])) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs).sample if self.do_perturbed_attention_guidance: noise_pred = self._apply_perturbed_attention_guidance(noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t) elif self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == 'latent': video = latents else: video_tensor = self.decode_latents(latents, decode_chunk_size) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) self.maybe_free_model_hooks() if self.do_perturbed_attention_guidance: self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (video,) return AnimateDiffPipelineOutput(frames=video) # File: diffusers-main/src/diffusers/pipelines/pag/pipeline_pag_sd_xl.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from .pag_utils import PAGMixin if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import AutoPipelineForText2Image\n\n >>> pipe = AutoPipelineForText2Image.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-base-1.0",\n ... torch_dtype=torch.float16,\n ... enable_pag=True,\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "a photo of an astronaut riding a horse on mars"\n >>> image = pipe(prompt, pag_scale=0.3).images[0]\n ```\n' def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionXLPAGPipeline(DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, IPAdapterMixin, PAGMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->image_encoder->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'image_encoder', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'negative_add_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection=None, feature_extractor: CLIPImageProcessor=None, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None, pag_applied_layers: Union[str, List[str]]='mid'): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = self.unet.config.sample_size add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.set_pag_applied_layers(pag_applied_layers) def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, prompt_2, height, width, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, original_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Optional[Tuple[int, int]]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], pag_scale: float=3.0, pag_adaptive_scale: float=0.0): height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) self.check_inputs(prompt, prompt_2, height, width, None, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._interrupt = False self._pag_scale = pag_scale self._pag_adaptive_scale = pag_adaptive_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids(negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) else: negative_add_time_ids = add_time_ids if self.do_perturbed_attention_guidance: prompt_embeds = self._prepare_perturbed_attention_guidance(prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance) add_text_embeds = self._prepare_perturbed_attention_guidance(add_text_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance) add_time_ids = self._prepare_perturbed_attention_guidance(add_time_ids, negative_add_time_ids, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) for (i, image_embeds) in enumerate(ip_adapter_image_embeds): negative_image_embeds = None if self.do_classifier_free_guidance: (negative_image_embeds, image_embeds) = image_embeds.chunk(2) if self.do_perturbed_attention_guidance: image_embeds = self._prepare_perturbed_attention_guidance(image_embeds, negative_image_embeds, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) image_embeds = image_embeds.to(device) ip_adapter_image_embeds[i] = image_embeds num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if self.denoising_end is not None and isinstance(self.denoising_end, float) and (self.denoising_end > 0) and (self.denoising_end < 1): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - self.denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) if self.do_perturbed_attention_guidance: original_attn_proc = self.unet.attn_processors self._set_pag_attn_processor(pag_applied_layers=self.pag_applied_layers, do_classifier_free_guidance=self.do_classifier_free_guidance) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = ip_adapter_image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_perturbed_attention_guidance: noise_pred = self._apply_perturbed_attention_guidance(noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t) elif self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) negative_add_time_ids = callback_outputs.pop('negative_add_time_ids', negative_add_time_ids) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) elif latents.dtype != self.vae.dtype: if torch.backends.mps.is_available(): self.vae = self.vae.to(latents.dtype) has_latents_mean = hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == 'latent': if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if self.do_perturbed_attention_guidance: self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from .pag_utils import PAGMixin if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import AutoPipelineForImage2Image\n >>> from diffusers.utils import load_image\n\n >>> pipe = AutoPipelineForImage2Image.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-refiner-1.0",\n ... torch_dtype=torch.float16,\n ... enable_pag=True,\n ... )\n >>> pipe = pipe.to("cuda")\n >>> url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png"\n\n >>> init_image = load_image(url).convert("RGB")\n >>> prompt = "a photo of an astronaut riding a horse on mars"\n >>> image = pipe(prompt, image=init_image, pag_scale=0.3).images[0]\n ```\n' def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionXLPAGImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin, PAGMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->image_encoder->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'image_encoder', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'add_neg_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection=None, feature_extractor: CLIPImageProcessor=None, requires_aesthetics_score: bool=False, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None, pag_applied_layers: Union[str, List[str]]='mid'): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, image_encoder=image_encoder, feature_extractor=feature_extractor, scheduler=scheduler) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.set_pag_applied_layers(pag_applied_layers) def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, prompt_2, strength, num_inference_steps, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if num_inference_steps is None: raise ValueError('`num_inference_steps` cannot be None.') elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: raise ValueError(f'`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type {type(num_inference_steps)}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) else: discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - denoising_start * self.scheduler.config.num_train_timesteps)) num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: num_inference_steps = num_inference_steps + 1 t_start = len(self.scheduler.timesteps) - num_inference_steps timesteps = self.scheduler.timesteps[t_start:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start) return (timesteps, num_inference_steps) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') latents_mean = latents_std = None if hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None: latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.text_encoder_2.to('cpu') torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} ') init_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) if latents_mean is not None and latents_std is not None: latents_mean = latents_mean.to(device=device, dtype=dtype) latents_std = latents_std.to(device=device, dtype=dtype) init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std else: init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype, text_encoder_projection_dim=None): if self.config.requires_aesthetics_score: add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) add_neg_time_ids = list(negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)) else: add_time_ids = list(original_size + crops_coords_top_left + target_size) add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim > passed_add_embed_dim and expected_add_embed_dim - passed_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model.') elif expected_add_embed_dim < passed_add_embed_dim and passed_add_embed_dim - expected_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model.') elif expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) return (add_time_ids, add_neg_time_ids) def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def denoising_start(self): return self._denoising_start @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, strength: float=0.3, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, denoising_start: Optional[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, original_size: Tuple[int, int]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Tuple[int, int]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, aesthetic_score: float=6.0, negative_aesthetic_score: float=2.5, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], pag_scale: float=3.0, pag_adaptive_scale: float=0.0): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs self.check_inputs(prompt, prompt_2, strength, num_inference_steps, None, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start self._interrupt = False self._pag_scale = pag_scale self._pag_adaptive_scale = pag_adaptive_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) image = self.image_processor.preprocess(image) def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) add_noise = True if self.denoising_start is None else False if latents is None: latents = self.prepare_latents(image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, add_noise) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) (height, width) = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) if negative_original_size is None: negative_original_size = original_size if negative_target_size is None: negative_target_size = target_size add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim (add_time_ids, add_neg_time_ids) = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) if self.do_perturbed_attention_guidance: prompt_embeds = self._prepare_perturbed_attention_guidance(prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance) add_text_embeds = self._prepare_perturbed_attention_guidance(add_text_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance) add_time_ids = self._prepare_perturbed_attention_guidance(add_time_ids, add_neg_time_ids, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) for (i, image_embeds) in enumerate(ip_adapter_image_embeds): negative_image_embeds = None if self.do_classifier_free_guidance: (negative_image_embeds, image_embeds) = image_embeds.chunk(2) if self.do_perturbed_attention_guidance: image_embeds = self._prepare_perturbed_attention_guidance(image_embeds, negative_image_embeds, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) image_embeds = image_embeds.to(device) ip_adapter_image_embeds[i] = image_embeds num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if self.denoising_end is not None and self.denoising_start is not None and denoising_value_valid(self.denoising_end) and denoising_value_valid(self.denoising_start) and (self.denoising_start >= self.denoising_end): raise ValueError(f'`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: ' + f' {self.denoising_end} when using type float.') elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - self.denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) if self.do_perturbed_attention_guidance: original_attn_proc = self.unet.attn_processors self._set_pag_attn_processor(pag_applied_layers=self.pag_applied_layers, do_classifier_free_guidance=self.do_classifier_free_guidance) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = ip_adapter_image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_perturbed_attention_guidance: noise_pred = self._apply_perturbed_attention_guidance(noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t) elif self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) add_neg_time_ids = callback_outputs.pop('add_neg_time_ids', add_neg_time_ids) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) elif latents.dtype != self.vae.dtype: if torch.backends.mps.is_available(): self.vae = self.vae.to(latents.dtype) has_latents_mean = hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if self.do_perturbed_attention_guidance: self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from .pag_utils import PAGMixin if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import AutoPipelineForInpainting\n >>> from diffusers.utils import load_image\n\n >>> pipe = AutoPipelineForInpainting.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-base-1.0",\n ... torch_dtype=torch.float16,\n ... variant="fp16",\n ... enable_pag=True,\n ... )\n >>> pipe.to("cuda")\n\n >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"\n >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"\n\n >>> init_image = load_image(img_url).convert("RGB")\n >>> mask_image = load_image(mask_url).convert("RGB")\n\n >>> prompt = "A majestic tiger sitting on a bench"\n >>> image = pipe(\n ... prompt=prompt,\n ... image=init_image,\n ... mask_image=mask_image,\n ... num_inference_steps=50,\n ... strength=0.80,\n ... pag_scale=0.3,\n ... ).images[0]\n ```\n' def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionXLPAGInpaintPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin, IPAdapterMixin, PAGMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->image_encoder->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'image_encoder', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'add_neg_time_ids', 'mask', 'masked_image_latents'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection=None, feature_extractor: CLIPImageProcessor=None, requires_aesthetics_score: bool=False, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None, pag_applied_layers: Union[str, List[str]]='mid'): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, image_encoder=image_encoder, feature_extractor=feature_extractor, scheduler=scheduler) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.set_pag_applied_layers(pag_applied_layers) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, prompt_2, image, mask_image, height, width, strength, callback_steps, output_type, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, padding_mask_crop=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if padding_mask_crop is not None: if not isinstance(image, PIL.Image.Image): raise ValueError(f'The image should be a PIL image when inpainting mask crop, but is of type {type(image)}.') if not isinstance(mask_image, PIL.Image.Image): raise ValueError(f'The mask image should be a PIL image when inpainting mask crop, but is of type {type(mask_image)}.') if output_type != 'pil': raise ValueError(f'The output type should be PIL when inpainting mask crop, but is {output_type}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, image=None, timestep=None, is_strength_max=True, add_noise=True, return_noise=False, return_image_latents=False): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if (image is None or timestep is None) and (not is_strength_max): raise ValueError('Since strength < 1. initial latents are to be initialised as a combination of Image + Noise.However, either the image or the noise timestep has not been provided.') if image.shape[1] == 4: image_latents = image.to(device=device, dtype=dtype) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) elif return_image_latents or (latents is None and (not is_strength_max)): image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None and add_noise: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents elif add_noise: noise = latents.to(device) latents = noise * self.scheduler.init_noise_sigma else: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = image_latents.to(device) outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): dtype = image.dtype if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list): image_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(image.shape[0])] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) image_latents = image_latents.to(dtype) image_latents = self.vae.config.scaling_factor * image_latents return image_latents def prepare_mask_latents(self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance): mask = torch.nn.functional.interpolate(mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)) mask = mask.to(device=device, dtype=dtype) if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError(f"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number of masks that you pass is divisible by the total requested batch size.") mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask if masked_image is not None and masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = None if masked_image is not None: if masked_image_latents is None: masked_image = masked_image.to(device=device, dtype=dtype) masked_image_latents = self._encode_vae_image(masked_image, generator=generator) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError(f"The passed images and the required batch size don't match. Images are supposed to be duplicated to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed. Make sure the number of images that you pass is divisible by the total requested batch size.") masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) masked_image_latents = torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return (mask, masked_image_latents) def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) else: discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - denoising_start * self.scheduler.config.num_train_timesteps)) num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: num_inference_steps = num_inference_steps + 1 t_start = len(self.scheduler.timesteps) - num_inference_steps timesteps = self.scheduler.timesteps[t_start:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start) return (timesteps, num_inference_steps) def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype, text_encoder_projection_dim=None): if self.config.requires_aesthetics_score: add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) add_neg_time_ids = list(negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)) else: add_time_ids = list(original_size + crops_coords_top_left + target_size) add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim > passed_add_embed_dim and expected_add_embed_dim - passed_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model.') elif expected_add_embed_dim < passed_add_embed_dim and passed_add_embed_dim - expected_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model.') elif expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) return (add_time_ids, add_neg_time_ids) def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def denoising_start(self): return self._denoising_start @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, mask_image: PipelineImageInput=None, masked_image_latents: torch.Tensor=None, height: Optional[int]=None, width: Optional[int]=None, padding_mask_crop: Optional[int]=None, strength: float=0.9999, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, denoising_start: Optional[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, original_size: Tuple[int, int]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Tuple[int, int]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, aesthetic_score: float=6.0, negative_aesthetic_score: float=2.5, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], pag_scale: float=3.0, pag_adaptive_scale: float=0.0): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, prompt_2, image, mask_image, height, width, strength, None, output_type, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs, padding_mask_crop) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start self._interrupt = False self._pag_scale = pag_scale self._pag_adaptive_scale = pag_adaptive_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None) if num_inference_steps < 1: raise ValueError(f'After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipelinesteps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.') latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) is_strength_max = strength == 1.0 if padding_mask_crop is not None: crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) resize_mode = 'fill' else: crops_coords = None resize_mode = 'default' original_image = image init_image = self.image_processor.preprocess(image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode) init_image = init_image.to(dtype=torch.float32) mask = self.mask_processor.preprocess(mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords) if masked_image_latents is not None: masked_image = masked_image_latents elif init_image.shape[1] == 4: masked_image = None else: masked_image = init_image * (mask < 0.5) num_channels_latents = self.vae.config.latent_channels num_channels_unet = self.unet.config.in_channels return_image_latents = num_channels_unet == 4 add_noise = True if self.denoising_start is None else False latents_outputs = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, image=init_image, timestep=latent_timestep, is_strength_max=is_strength_max, add_noise=add_noise, return_noise=True, return_image_latents=return_image_latents) if return_image_latents: (latents, noise, image_latents) = latents_outputs else: (latents, noise) = latents_outputs (mask, masked_image_latents) = self.prepare_mask_latents(mask, masked_image, batch_size * num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator, self.do_classifier_free_guidance) if self.do_perturbed_attention_guidance: if self.do_classifier_free_guidance: (mask, _) = mask.chunk(2) (masked_image_latents, _) = masked_image_latents.chunk(2) mask = self._prepare_perturbed_attention_guidance(mask, mask, self.do_classifier_free_guidance) masked_image_latents = self._prepare_perturbed_attention_guidance(masked_image_latents, masked_image_latents, self.do_classifier_free_guidance) if num_channels_unet == 9: num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image} = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of `pipeline.unet` or your `mask_image` or `image` input.') elif num_channels_unet != 4: raise ValueError(f'The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}.') extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) (height, width) = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) if negative_original_size is None: negative_original_size = original_size if negative_target_size is None: negative_target_size = target_size add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim (add_time_ids, add_neg_time_ids) = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) if self.do_perturbed_attention_guidance: prompt_embeds = self._prepare_perturbed_attention_guidance(prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance) add_text_embeds = self._prepare_perturbed_attention_guidance(add_text_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance) add_time_ids = self._prepare_perturbed_attention_guidance(add_time_ids, add_neg_time_ids, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) for (i, image_embeds) in enumerate(ip_adapter_image_embeds): negative_image_embeds = None if self.do_classifier_free_guidance: (negative_image_embeds, image_embeds) = image_embeds.chunk(2) if self.do_perturbed_attention_guidance: image_embeds = self._prepare_perturbed_attention_guidance(image_embeds, negative_image_embeds, self.do_classifier_free_guidance) elif self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) image_embeds = image_embeds.to(device) ip_adapter_image_embeds[i] = image_embeds num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if self.denoising_end is not None and self.denoising_start is not None and denoising_value_valid(self.denoising_end) and denoising_value_valid(self.denoising_start) and (self.denoising_start >= self.denoising_end): raise ValueError(f'`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: ' + f' {self.denoising_end} when using type float.') elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - self.denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) if self.do_perturbed_attention_guidance: original_attn_proc = self.unet.attn_processors self._set_pag_attn_processor(pag_applied_layers=self.pag_applied_layers, do_classifier_free_guidance=self.do_classifier_free_guidance) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if num_channels_unet == 9: latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = ip_adapter_image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_perturbed_attention_guidance: noise_pred = self._apply_perturbed_attention_guidance(noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t) elif self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if num_channels_unet == 4: init_latents_proper = image_latents if self.do_perturbed_attention_guidance: (init_mask, *_) = mask.chunk(3) if self.do_classifier_free_guidance else mask.chunk(2) else: (init_mask, *_) = mask.chunk(2) if self.do_classifier_free_guidance else mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise(init_latents_proper, noise, torch.tensor([noise_timestep])) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) add_neg_time_ids = callback_outputs.pop('add_neg_time_ids', add_neg_time_ids) mask = callback_outputs.pop('mask', mask) masked_image_latents = callback_outputs.pop('masked_image_latents', masked_image_latents) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) elif latents.dtype != self.vae.dtype: if torch.backends.mps.is_available(): self.vae = self.vae.to(latents.dtype) has_latents_mean = hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: return StableDiffusionXLPipelineOutput(images=latents) if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) if padding_mask_crop is not None: image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] self.maybe_free_model_hooks() if self.do_perturbed_attention_guidance: self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/paint_by_example/__init__.py from dataclasses import dataclass from typing import TYPE_CHECKING, List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['image_encoder'] = ['PaintByExampleImageEncoder'] _import_structure['pipeline_paint_by_example'] = ['PaintByExamplePipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .image_encoder import PaintByExampleImageEncoder from .pipeline_paint_by_example import PaintByExamplePipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/paint_by_example/image_encoder.py import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging logger = logging.get_logger(__name__) class PaintByExampleImageEncoder(CLIPPreTrainedModel): def __init__(self, config, proj_size=None): super().__init__(config) self.proj_size = proj_size or getattr(config, 'projection_dim', 768) self.model = CLIPVisionModel(config) self.mapper = PaintByExampleMapper(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size) self.proj_out = nn.Linear(config.hidden_size, self.proj_size) self.uncond_vector = nn.Parameter(torch.randn((1, 1, self.proj_size))) def forward(self, pixel_values, return_uncond_vector=False): clip_output = self.model(pixel_values=pixel_values) latent_states = clip_output.pooler_output latent_states = self.mapper(latent_states[:, None]) latent_states = self.final_layer_norm(latent_states) latent_states = self.proj_out(latent_states) if return_uncond_vector: return (latent_states, self.uncond_vector) return latent_states class PaintByExampleMapper(nn.Module): def __init__(self, config): super().__init__() num_layers = (config.num_hidden_layers + 1) // 5 hid_size = config.hidden_size num_heads = 1 self.blocks = nn.ModuleList([BasicTransformerBlock(hid_size, num_heads, hid_size, activation_fn='gelu', attention_bias=True) for _ in range(num_layers)]) def forward(self, hidden_states): for block in self.blocks: hidden_states = block(hidden_states) return hidden_states # File: diffusers-main/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py import inspect from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils import deprecate, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .image_encoder import PaintByExampleImageEncoder logger = logging.get_logger(__name__) def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def prepare_mask_and_masked_image(image, mask): if isinstance(image, torch.Tensor): if not isinstance(mask, torch.Tensor): raise TypeError(f'`image` is a torch.Tensor but `mask` (type: {type(mask)} is not') if image.ndim == 3: assert image.shape[0] == 3, 'Image outside a batch should be of shape (3, H, W)' image = image.unsqueeze(0) if mask.ndim == 2: mask = mask.unsqueeze(0).unsqueeze(0) if mask.ndim == 3: if mask.shape[0] == image.shape[0]: mask = mask.unsqueeze(1) else: mask = mask.unsqueeze(0) assert image.ndim == 4 and mask.ndim == 4, 'Image and Mask must have 4 dimensions' assert image.shape[-2:] == mask.shape[-2:], 'Image and Mask must have the same spatial dimensions' assert image.shape[0] == mask.shape[0], 'Image and Mask must have the same batch size' assert mask.shape[1] == 1, 'Mask image must have a single channel' if image.min() < -1 or image.max() > 1: raise ValueError('Image should be in [-1, 1] range') if mask.min() < 0 or mask.max() > 1: raise ValueError('Mask should be in [0, 1] range') mask = 1 - mask mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 image = image.to(dtype=torch.float32) elif isinstance(mask, torch.Tensor): raise TypeError(f'`mask` is a torch.Tensor but `image` (type: {type(image)} is not') else: if isinstance(image, PIL.Image.Image): image = [image] image = np.concatenate([np.array(i.convert('RGB'))[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 if isinstance(mask, PIL.Image.Image): mask = [mask] mask = np.concatenate([np.array(m.convert('L'))[None, None, :] for m in mask], axis=0) mask = mask.astype(np.float32) / 255.0 mask = 1 - mask mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) masked_image = image * mask return (mask, masked_image) class PaintByExamplePipeline(DiffusionPipeline, StableDiffusionMixin): model_cpu_offload_seq = 'unet->vae' _exclude_from_cpu_offload = ['image_encoder'] _optional_components = ['safety_checker'] def __init__(self, vae: AutoencoderKL, image_encoder: PaintByExampleImageEncoder, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=False): super().__init__() self.register_modules(vae=vae, image_encoder=image_encoder, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def check_inputs(self, image, height, width, callback_steps): if not isinstance(image, torch.Tensor) and (not isinstance(image, PIL.Image.Image)) and (not isinstance(image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is {type(image)}') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def prepare_mask_latents(self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance): mask = torch.nn.functional.interpolate(mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)) mask = mask.to(device=device, dtype=dtype) masked_image = masked_image.to(device=device, dtype=dtype) if masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = self._encode_vae_image(masked_image, generator=generator) if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError(f"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number of masks that you pass is divisible by the total requested batch size.") mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError(f"The passed images and the required batch size don't match. Images are supposed to be duplicated to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed. Make sure the number of images that you pass is divisible by the total requested batch size.") masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask masked_image_latents = torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return (mask, masked_image_latents) def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(image.shape[0])] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) image_latents = self.vae.config.scaling_factor * image_latents return image_latents def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(images=image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) (image_embeddings, negative_prompt_embeds) = self.image_encoder(image, return_uncond_vector=True) (bs_embed, seq_len, _) = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: negative_prompt_embeds = negative_prompt_embeds.repeat(1, image_embeddings.shape[0], 1) negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, 1, -1) image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) return image_embeddings @torch.no_grad() def __call__(self, example_image: Union[torch.Tensor, PIL.Image.Image], image: Union[torch.Tensor, PIL.Image.Image], mask_image: Union[torch.Tensor, PIL.Image.Image], height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1): if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, list): batch_size = len(image) else: batch_size = image.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (mask, masked_image) = prepare_mask_and_masked_image(image, mask_image) (height, width) = masked_image.shape[-2:] self.check_inputs(example_image, height, width, callback_steps) image_embeddings = self._encode_image(example_image, device, num_images_per_prompt, do_classifier_free_guidance) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.vae.config.latent_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, image_embeddings.dtype, device, generator, latents) (mask, masked_image_latents) = self.prepare_mask_latents(mask, masked_image, batch_size * num_images_per_prompt, height, width, image_embeddings.dtype, device, generator, do_classifier_free_guidance) num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image} = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of `pipeline.unet` or your `mask_image` or `image` input.') extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_model_input = torch.cat([latent_model_input, masked_image_latents, mask], dim=1) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) self.maybe_free_model_hooks() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, image_embeddings.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/pia/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_pia'] = ['PIAPipeline', 'PIAPipelineOutput'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_pia import PIAPipeline, PIAPipelineOutput else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/pia/pipeline_pia.py import inspect from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel from ...models.lora import adjust_lora_scale_text_encoder from ...models.unets.unet_motion_model import MotionAdapter from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils import USE_PEFT_BACKEND, BaseOutput, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..free_init_utils import FreeInitMixin from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import EulerDiscreteScheduler, MotionAdapter, PIAPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> adapter = MotionAdapter.from_pretrained("openmmlab/PIA-condition-adapter")\n >>> pipe = PIAPipeline.from_pretrained(\n ... "SG161222/Realistic_Vision_V6.0_B1_noVAE", motion_adapter=adapter, torch_dtype=torch.float16\n ... )\n\n >>> pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)\n >>> image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png?download=true"\n ... )\n >>> image = image.resize((512, 512))\n >>> prompt = "cat in a hat"\n >>> negative_prompt = "wrong white balance, dark, sketches, worst quality, low quality, deformed, distorted"\n >>> generator = torch.Generator("cpu").manual_seed(0)\n >>> output = pipe(image=image, prompt=prompt, negative_prompt=negative_prompt, generator=generator)\n >>> frames = output.frames[0]\n >>> export_to_gif(frames, "pia-animation.gif")\n ```\n' RANGE_LIST = [[1.0, 0.9, 0.85, 0.85, 0.85, 0.8], [1.0, 0.8, 0.8, 0.8, 0.79, 0.78, 0.75], [1.0, 0.8, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.6, 0.5, 0.5], [1.0, 0.9, 0.85, 0.85, 0.85, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.85, 0.85, 0.9, 1.0], [1.0, 0.8, 0.8, 0.8, 0.79, 0.78, 0.75, 0.75, 0.75, 0.75, 0.75, 0.78, 0.79, 0.8, 0.8, 1.0], [1.0, 0.8, 0.7, 0.7, 0.7, 0.7, 0.6, 0.5, 0.5, 0.6, 0.7, 0.7, 0.7, 0.7, 0.8, 1.0], [0.5, 0.4, 0.4, 0.4, 0.35, 0.3], [0.5, 0.4, 0.4, 0.4, 0.35, 0.35, 0.3, 0.25, 0.2], [0.5, 0.2]] def prepare_mask_coef_by_statistics(num_frames: int, cond_frame: int, motion_scale: int): assert num_frames > 0, 'video_length should be greater than 0' assert num_frames > cond_frame, 'video_length should be greater than cond_frame' range_list = RANGE_LIST assert motion_scale < len(range_list), f'motion_scale type{motion_scale} not implemented' coef = range_list[motion_scale] coef = coef + [coef[-1]] * (num_frames - len(coef)) order = [abs(i - cond_frame) for i in range(num_frames)] coef = [coef[order[i]] for i in range(num_frames)] return coef @dataclass class PIAPipelineOutput(BaseOutput): frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]] class PIAPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin, FreeInitMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['feature_extractor', 'image_encoder', 'motion_adapter'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: Union[UNet2DConditionModel, UNetMotionModel], scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler], motion_adapter: Optional[MotionAdapter]=None, feature_extractor: CLIPImageProcessor=None, image_encoder: CLIPVisionModelWithProjection=None): super().__init__() if isinstance(unet, UNet2DConditionModel): unet = UNetMotionModel.from_unet2d(unet, motion_adapter) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, motion_adapter=motion_adapter, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor) def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents (batch_size, channels, num_frames, height, width) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) image = self.vae.decode(latents).sample video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) video = video.float() return video def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def prepare_latents(self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def prepare_masked_condition(self, image, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, motion_scale=0): shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) (_, _, _, scaled_height, scaled_width) = shape image = self.video_processor.preprocess(image) image = image.to(device, dtype) if isinstance(generator, list): image_latent = [self.vae.encode(image[k:k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size)] image_latent = torch.cat(image_latent, dim=0) else: image_latent = self.vae.encode(image).latent_dist.sample(generator) image_latent = image_latent.to(device=device, dtype=dtype) image_latent = torch.nn.functional.interpolate(image_latent, size=[scaled_height, scaled_width]) image_latent_padding = image_latent.clone() * self.vae.config.scaling_factor mask = torch.zeros((batch_size, 1, num_frames, scaled_height, scaled_width)).to(device=device, dtype=dtype) mask_coef = prepare_mask_coef_by_statistics(num_frames, 0, motion_scale) masked_image = torch.zeros(batch_size, 4, num_frames, scaled_height, scaled_width).to(device=device, dtype=self.unet.dtype) for f in range(num_frames): mask[:, :, f, :, :] = mask_coef[f] masked_image[:, :, f, :, :] = image_latent_padding.clone() mask = torch.cat([mask] * 2) if self.do_classifier_free_guidance else mask masked_image = torch.cat([masked_image] * 2) if self.do_classifier_free_guidance else masked_image return (mask, masked_image) def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, image: PipelineImageInput, prompt: Union[str, List[str]]=None, strength: float=1.0, num_frames: Optional[int]=16, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_videos_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, motion_scale: int=0, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents']): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_videos_per_prompt = 1 self.check_inputs(prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_videos_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt, self.do_classifier_free_guidance) self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) self._num_timesteps = len(timesteps) latents = self.prepare_latents(batch_size * num_videos_per_prompt, 4, num_frames, height, width, prompt_embeds.dtype, device, generator, latents=latents) (mask, masked_image) = self.prepare_masked_condition(image, batch_size * num_videos_per_prompt, 4, num_frames=num_frames, height=height, width=width, dtype=self.unet.dtype, device=device, generator=generator, motion_scale=motion_scale) if strength < 1.0: noise = randn_tensor(latents.shape, generator=generator, device=device, dtype=latents.dtype) latents = self.scheduler.add_noise(masked_image[0], noise, latent_timestep) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 for free_init_iter in range(num_free_init_iters): if self.free_init_enabled: (latents, timesteps) = self._apply_free_init(latents, free_init_iter, num_inference_steps, device, latents.dtype, generator) self._num_timesteps = len(timesteps) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=self._num_timesteps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_model_input = torch.cat([latent_model_input, mask, masked_image], dim=1) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs).sample if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == 'latent': video = latents else: video_tensor = self.decode_latents(latents) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (video,) return PIAPipelineOutput(frames=video) # File: diffusers-main/src/diffusers/pipelines/pipeline_flax_utils.py import importlib import inspect import os from typing import Any, Dict, List, Optional, Union import flax import numpy as np import PIL.Image from flax.core.frozen_dict import FrozenDict from huggingface_hub import create_repo, snapshot_download from huggingface_hub.utils import validate_hf_hub_args from PIL import Image from tqdm.auto import tqdm from ..configuration_utils import ConfigMixin from ..models.modeling_flax_utils import FLAX_WEIGHTS_NAME, FlaxModelMixin from ..schedulers.scheduling_utils_flax import SCHEDULER_CONFIG_NAME, FlaxSchedulerMixin from ..utils import CONFIG_NAME, BaseOutput, PushToHubMixin, http_user_agent, is_transformers_available, logging if is_transformers_available(): from transformers import FlaxPreTrainedModel INDEX_FILE = 'diffusion_flax_model.bin' logger = logging.get_logger(__name__) LOADABLE_CLASSES = {'diffusers': {'FlaxModelMixin': ['save_pretrained', 'from_pretrained'], 'FlaxSchedulerMixin': ['save_pretrained', 'from_pretrained'], 'FlaxDiffusionPipeline': ['save_pretrained', 'from_pretrained']}, 'transformers': {'PreTrainedTokenizer': ['save_pretrained', 'from_pretrained'], 'PreTrainedTokenizerFast': ['save_pretrained', 'from_pretrained'], 'FlaxPreTrainedModel': ['save_pretrained', 'from_pretrained'], 'FeatureExtractionMixin': ['save_pretrained', 'from_pretrained'], 'ProcessorMixin': ['save_pretrained', 'from_pretrained'], 'ImageProcessingMixin': ['save_pretrained', 'from_pretrained']}} ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) def import_flax_or_no_model(module, class_name): try: class_obj = getattr(module, 'Flax' + class_name) except AttributeError: class_obj = getattr(module, class_name) except AttributeError: raise ValueError(f'Neither Flax{class_name} nor {class_name} exist in {module}') return class_obj @flax.struct.dataclass class FlaxImagePipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin): config_name = 'model_index.json' def register_modules(self, **kwargs): from diffusers import pipelines for (name, module) in kwargs.items(): if module is None: register_dict = {name: (None, None)} else: library = module.__module__.split('.')[0] pipeline_dir = module.__module__.split('.')[-2] path = module.__module__.split('.') is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir) if library not in LOADABLE_CLASSES or is_pipeline_module: library = pipeline_dir class_name = module.__class__.__name__ register_dict = {name: (library, class_name)} self.register_to_config(**register_dict) setattr(self, name, module) def save_pretrained(self, save_directory: Union[str, os.PathLike], params: Union[Dict, FrozenDict], push_to_hub: bool=False, **kwargs): self.save_config(save_directory) model_index_dict = dict(self.config) model_index_dict.pop('_class_name') model_index_dict.pop('_diffusers_version') model_index_dict.pop('_module', None) if push_to_hub: commit_message = kwargs.pop('commit_message', None) private = kwargs.pop('private', False) create_pr = kwargs.pop('create_pr', False) token = kwargs.pop('token', None) repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id for pipeline_component_name in model_index_dict.keys(): sub_model = getattr(self, pipeline_component_name) if sub_model is None: continue model_cls = sub_model.__class__ save_method_name = None for (library_name, library_classes) in LOADABLE_CLASSES.items(): library = importlib.import_module(library_name) for (base_class, save_load_methods) in library_classes.items(): class_candidate = getattr(library, base_class, None) if class_candidate is not None and issubclass(model_cls, class_candidate): save_method_name = save_load_methods[0] break if save_method_name is not None: break save_method = getattr(sub_model, save_method_name) expects_params = 'params' in set(inspect.signature(save_method).parameters.keys()) if expects_params: save_method(os.path.join(save_directory, pipeline_component_name), params=params[pipeline_component_name]) else: save_method(os.path.join(save_directory, pipeline_component_name)) if push_to_hub: self._upload_folder(save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr) @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): cache_dir = kwargs.pop('cache_dir', None) proxies = kwargs.pop('proxies', None) local_files_only = kwargs.pop('local_files_only', False) token = kwargs.pop('token', None) revision = kwargs.pop('revision', None) from_pt = kwargs.pop('from_pt', False) use_memory_efficient_attention = kwargs.pop('use_memory_efficient_attention', False) split_head_dim = kwargs.pop('split_head_dim', False) dtype = kwargs.pop('dtype', None) if not os.path.isdir(pretrained_model_name_or_path): config_dict = cls.load_config(pretrained_model_name_or_path, cache_dir=cache_dir, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision) folder_names = [k for k in config_dict.keys() if not k.startswith('_')] allow_patterns = [os.path.join(k, '*') for k in folder_names] allow_patterns += [FLAX_WEIGHTS_NAME, SCHEDULER_CONFIG_NAME, CONFIG_NAME, cls.config_name] ignore_patterns = ['*.bin', '*.safetensors'] if not from_pt else [] ignore_patterns += ['*.onnx', '*.onnx_data', '*.xml', '*.pb'] if cls != FlaxDiffusionPipeline: requested_pipeline_class = cls.__name__ else: requested_pipeline_class = config_dict.get('_class_name', cls.__name__) requested_pipeline_class = requested_pipeline_class if requested_pipeline_class.startswith('Flax') else 'Flax' + requested_pipeline_class user_agent = {'pipeline_class': requested_pipeline_class} user_agent = http_user_agent(user_agent) cached_folder = snapshot_download(pretrained_model_name_or_path, cache_dir=cache_dir, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, user_agent=user_agent) else: cached_folder = pretrained_model_name_or_path config_dict = cls.load_config(cached_folder) if cls != FlaxDiffusionPipeline: pipeline_class = cls else: diffusers_module = importlib.import_module(cls.__module__.split('.')[0]) class_name = config_dict['_class_name'] if config_dict['_class_name'].startswith('Flax') else 'Flax' + config_dict['_class_name'] pipeline_class = getattr(diffusers_module, class_name) (expected_modules, optional_kwargs) = cls._get_signature_keys(pipeline_class) passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} (init_dict, unused_kwargs, _) = pipeline_class.extract_init_dict(config_dict, **kwargs) init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict} init_kwargs = {**init_kwargs, **passed_pipe_kwargs} def load_module(name, value): if value[0] is None: return False if name in passed_class_obj and passed_class_obj[name] is None: return False return True init_dict = {k: v for (k, v) in init_dict.items() if load_module(k, v)} if len(unused_kwargs) > 0: logger.warning(f'Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored.') params = {} from diffusers import pipelines for (name, (library_name, class_name)) in init_dict.items(): if class_name is None: init_kwargs[name] = None continue is_pipeline_module = hasattr(pipelines, library_name) loaded_sub_model = None sub_model_should_be_defined = True if name in passed_class_obj: if not is_pipeline_module: library = importlib.import_module(library_name) class_obj = getattr(library, class_name) importable_classes = LOADABLE_CLASSES[library_name] class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} expected_class_obj = None for (class_name, class_candidate) in class_candidates.items(): if class_candidate is not None and issubclass(class_obj, class_candidate): expected_class_obj = class_candidate if not issubclass(passed_class_obj[name].__class__, expected_class_obj): raise ValueError(f'{passed_class_obj[name]} is of type: {type(passed_class_obj[name])}, but should be {expected_class_obj}') elif passed_class_obj[name] is None: logger.warning(f'You have passed `None` for {name} to disable its functionality in {pipeline_class}. Note that this might lead to problems when using {pipeline_class} and is not recommended.') sub_model_should_be_defined = False else: logger.warning(f'You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it has the correct type') loaded_sub_model = passed_class_obj[name] elif is_pipeline_module: pipeline_module = getattr(pipelines, library_name) class_obj = import_flax_or_no_model(pipeline_module, class_name) importable_classes = ALL_IMPORTABLE_CLASSES class_candidates = {c: class_obj for c in importable_classes.keys()} else: library = importlib.import_module(library_name) class_obj = import_flax_or_no_model(library, class_name) importable_classes = LOADABLE_CLASSES[library_name] class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} if loaded_sub_model is None and sub_model_should_be_defined: load_method_name = None for (class_name, class_candidate) in class_candidates.items(): if class_candidate is not None and issubclass(class_obj, class_candidate): load_method_name = importable_classes[class_name][1] load_method = getattr(class_obj, load_method_name) if os.path.isdir(os.path.join(cached_folder, name)): loadable_folder = os.path.join(cached_folder, name) else: loaded_sub_model = cached_folder if issubclass(class_obj, FlaxModelMixin): (loaded_sub_model, loaded_params) = load_method(loadable_folder, from_pt=from_pt, use_memory_efficient_attention=use_memory_efficient_attention, split_head_dim=split_head_dim, dtype=dtype) params[name] = loaded_params elif is_transformers_available() and issubclass(class_obj, FlaxPreTrainedModel): if from_pt: loaded_sub_model = load_method(loadable_folder, from_pt=from_pt) loaded_params = loaded_sub_model.params del loaded_sub_model._params else: (loaded_sub_model, loaded_params) = load_method(loadable_folder, _do_init=False) params[name] = loaded_params elif issubclass(class_obj, FlaxSchedulerMixin): (loaded_sub_model, scheduler_state) = load_method(loadable_folder) params[name] = scheduler_state else: loaded_sub_model = load_method(loadable_folder) init_kwargs[name] = loaded_sub_model missing_modules = set(expected_modules) - set(init_kwargs.keys()) passed_modules = list(passed_class_obj.keys()) if len(missing_modules) > 0 and missing_modules <= set(passed_modules): for module in missing_modules: init_kwargs[module] = passed_class_obj.get(module, None) elif len(missing_modules) > 0: passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs raise ValueError(f'Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed.') model = pipeline_class(**init_kwargs, dtype=dtype) return (model, params) @classmethod def _get_signature_keys(cls, obj): parameters = inspect.signature(obj.__init__).parameters required_parameters = {k: v for (k, v) in parameters.items() if v.default == inspect._empty} optional_parameters = set({k for (k, v) in parameters.items() if v.default != inspect._empty}) expected_modules = set(required_parameters.keys()) - {'self'} return (expected_modules, optional_parameters) @property def components(self) -> Dict[str, Any]: (expected_modules, optional_parameters) = self._get_signature_keys(self) components = {k: getattr(self, k) for k in self.config.keys() if not k.startswith('_') and k not in optional_parameters} if set(components.keys()) != expected_modules: raise ValueError(f'{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected {expected_modules} to be defined, but {components} are defined.') return components @staticmethod def numpy_to_pil(images): if images.ndim == 3: images = images[None, ...] images = (images * 255).round().astype('uint8') if images.shape[-1] == 1: pil_images = [Image.fromarray(image.squeeze(), mode='L') for image in images] else: pil_images = [Image.fromarray(image) for image in images] return pil_images def progress_bar(self, iterable): if not hasattr(self, '_progress_bar_config'): self._progress_bar_config = {} elif not isinstance(self._progress_bar_config, dict): raise ValueError(f'`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}.') return tqdm(iterable, **self._progress_bar_config) def set_progress_bar_config(self, **kwargs): self._progress_bar_config = kwargs # File: diffusers-main/src/diffusers/pipelines/pipeline_loading_utils.py import importlib import os import re import warnings from pathlib import Path from typing import Any, Dict, List, Optional, Union import torch from huggingface_hub import ModelCard, model_info from huggingface_hub.utils import validate_hf_hub_args from packaging import version from .. import __version__ from ..utils import FLAX_WEIGHTS_NAME, ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, deprecate, get_class_from_dynamic_module, is_accelerate_available, is_peft_available, is_transformers_available, logging from ..utils.torch_utils import is_compiled_module if is_transformers_available(): import transformers from transformers import PreTrainedModel from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME from transformers.utils import SAFE_WEIGHTS_NAME as TRANSFORMERS_SAFE_WEIGHTS_NAME from transformers.utils import WEIGHTS_NAME as TRANSFORMERS_WEIGHTS_NAME if is_accelerate_available(): import accelerate from accelerate import dispatch_model from accelerate.hooks import remove_hook_from_module from accelerate.utils import compute_module_sizes, get_max_memory INDEX_FILE = 'diffusion_pytorch_model.bin' CUSTOM_PIPELINE_FILE_NAME = 'pipeline.py' DUMMY_MODULES_FOLDER = 'diffusers.utils' TRANSFORMERS_DUMMY_MODULES_FOLDER = 'transformers.utils' CONNECTED_PIPES_KEYS = ['prior'] logger = logging.get_logger(__name__) LOADABLE_CLASSES = {'diffusers': {'ModelMixin': ['save_pretrained', 'from_pretrained'], 'SchedulerMixin': ['save_pretrained', 'from_pretrained'], 'DiffusionPipeline': ['save_pretrained', 'from_pretrained'], 'OnnxRuntimeModel': ['save_pretrained', 'from_pretrained']}, 'transformers': {'PreTrainedTokenizer': ['save_pretrained', 'from_pretrained'], 'PreTrainedTokenizerFast': ['save_pretrained', 'from_pretrained'], 'PreTrainedModel': ['save_pretrained', 'from_pretrained'], 'FeatureExtractionMixin': ['save_pretrained', 'from_pretrained'], 'ProcessorMixin': ['save_pretrained', 'from_pretrained'], 'ImageProcessingMixin': ['save_pretrained', 'from_pretrained']}, 'onnxruntime.training': {'ORTModule': ['save_pretrained', 'from_pretrained']}} ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) def is_safetensors_compatible(filenames, passed_components=None, folder_names=None) -> bool: passed_components = passed_components or [] if folder_names is not None: filenames = {f for f in filenames if os.path.split(f)[0] in folder_names} components = {} for filename in filenames: if not len(filename.split('/')) == 2: continue (component, component_filename) = filename.split('/') if component in passed_components: continue components.setdefault(component, []) components[component].append(component_filename) for (component, component_filenames) in components.items(): matches = [] for component_filename in component_filenames: (filename, extension) = os.path.splitext(component_filename) match_exists = extension == '.safetensors' matches.append(match_exists) if not any(matches): return False return True def variant_compatible_siblings(filenames, variant=None) -> Union[List[os.PathLike], str]: weight_names = [WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME, FLAX_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, ONNX_EXTERNAL_WEIGHTS_NAME] if is_transformers_available(): weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME] weight_prefixes = [w.split('.')[0] for w in weight_names] weight_suffixs = [w.split('.')[-1] for w in weight_names] transformers_index_format = '\\d{5}-of-\\d{5}' if variant is not None: variant_file_re = re.compile(f"({'|'.join(weight_prefixes)})\\.({variant}|{variant}-{transformers_index_format})\\.({'|'.join(weight_suffixs)})$") variant_index_re = re.compile(f"({'|'.join(weight_prefixes)})\\.({'|'.join(weight_suffixs)})\\.index\\.{variant}\\.json$") non_variant_file_re = re.compile(f"({'|'.join(weight_prefixes)})(-{transformers_index_format})?\\.({'|'.join(weight_suffixs)})$") non_variant_index_re = re.compile(f"({'|'.join(weight_prefixes)})\\.({'|'.join(weight_suffixs)})\\.index\\.json") if variant is not None: variant_weights = {f for f in filenames if variant_file_re.match(f.split('/')[-1]) is not None} variant_indexes = {f for f in filenames if variant_index_re.match(f.split('/')[-1]) is not None} variant_filenames = variant_weights | variant_indexes else: variant_filenames = set() non_variant_weights = {f for f in filenames if non_variant_file_re.match(f.split('/')[-1]) is not None} non_variant_indexes = {f for f in filenames if non_variant_index_re.match(f.split('/')[-1]) is not None} non_variant_filenames = non_variant_weights | non_variant_indexes usable_filenames = set(variant_filenames) def convert_to_variant(filename): if 'index' in filename: variant_filename = filename.replace('index', f'index.{variant}') elif re.compile(f'^(.*?){transformers_index_format}').match(filename) is not None: variant_filename = f"{filename.split('-')[0]}.{variant}-{'-'.join(filename.split('-')[1:])}" else: variant_filename = f"{filename.split('.')[0]}.{variant}.{filename.split('.')[1]}" return variant_filename for f in non_variant_filenames: variant_filename = convert_to_variant(f) if variant_filename not in usable_filenames: usable_filenames.add(f) return (usable_filenames, variant_filenames) @validate_hf_hub_args def warn_deprecated_model_variant(pretrained_model_name_or_path, token, variant, revision, model_filenames): info = model_info(pretrained_model_name_or_path, token=token, revision=None) filenames = {sibling.rfilename for sibling in info.siblings} (comp_model_filenames, _) = variant_compatible_siblings(filenames, variant=revision) comp_model_filenames = ['.'.join(f.split('.')[:1] + f.split('.')[2:]) for f in comp_model_filenames] if set(model_filenames).issubset(set(comp_model_filenames)): warnings.warn(f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` even though you can load it via `variant=`{revision}`. Loading model variants via `revision='{revision}'` is deprecated and will be removed in diffusers v1. Please use `variant='{revision}'` instead.", FutureWarning) else: warnings.warn(f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have the required variant filenames in the 'main' branch. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {revision} files' so that the correct variant file can be added.", FutureWarning) def _unwrap_model(model): if is_compiled_module(model): model = model._orig_mod if is_peft_available(): from peft import PeftModel if isinstance(model, PeftModel): model = model.base_model.model return model def maybe_raise_or_warn(library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module): if not is_pipeline_module: library = importlib.import_module(library_name) class_obj = getattr(library, class_name) class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} expected_class_obj = None for (class_name, class_candidate) in class_candidates.items(): if class_candidate is not None and issubclass(class_obj, class_candidate): expected_class_obj = class_candidate sub_model = passed_class_obj[name] unwrapped_sub_model = _unwrap_model(sub_model) model_cls = unwrapped_sub_model.__class__ if not issubclass(model_cls, expected_class_obj): raise ValueError(f'{passed_class_obj[name]} is of type: {model_cls}, but should be {expected_class_obj}') else: logger.warning(f'You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it has the correct type') def get_class_obj_and_candidates(library_name, class_name, importable_classes, pipelines, is_pipeline_module, component_name=None, cache_dir=None): component_folder = os.path.join(cache_dir, component_name) if is_pipeline_module: pipeline_module = getattr(pipelines, library_name) class_obj = getattr(pipeline_module, class_name) class_candidates = {c: class_obj for c in importable_classes.keys()} elif os.path.isfile(os.path.join(component_folder, library_name + '.py')): class_obj = get_class_from_dynamic_module(component_folder, module_file=library_name + '.py', class_name=class_name) class_candidates = {c: class_obj for c in importable_classes.keys()} else: library = importlib.import_module(library_name) class_obj = getattr(library, class_name) class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} return (class_obj, class_candidates) def _get_custom_pipeline_class(custom_pipeline, repo_id=None, hub_revision=None, class_name=None, cache_dir=None, revision=None): if custom_pipeline.endswith('.py'): path = Path(custom_pipeline) file_name = path.name custom_pipeline = path.parent.absolute() elif repo_id is not None: file_name = f'{custom_pipeline}.py' custom_pipeline = repo_id else: file_name = CUSTOM_PIPELINE_FILE_NAME if repo_id is not None and hub_revision is not None: revision = hub_revision return get_class_from_dynamic_module(custom_pipeline, module_file=file_name, class_name=class_name, cache_dir=cache_dir, revision=revision) def _get_pipeline_class(class_obj, config=None, load_connected_pipeline=False, custom_pipeline=None, repo_id=None, hub_revision=None, class_name=None, cache_dir=None, revision=None): if custom_pipeline is not None: return _get_custom_pipeline_class(custom_pipeline, repo_id=repo_id, hub_revision=hub_revision, class_name=class_name, cache_dir=cache_dir, revision=revision) if class_obj.__name__ != 'DiffusionPipeline': return class_obj diffusers_module = importlib.import_module(class_obj.__module__.split('.')[0]) class_name = class_name or config['_class_name'] if not class_name: raise ValueError('The class name could not be found in the configuration file. Please make sure to pass the correct `class_name`.') class_name = class_name[4:] if class_name.startswith('Flax') else class_name pipeline_cls = getattr(diffusers_module, class_name) if load_connected_pipeline: from .auto_pipeline import _get_connected_pipeline connected_pipeline_cls = _get_connected_pipeline(pipeline_cls) if connected_pipeline_cls is not None: logger.info(f'Loading connected pipeline {connected_pipeline_cls.__name__} instead of {pipeline_cls.__name__} as specified via `load_connected_pipeline=True`') else: logger.info(f'{pipeline_cls.__name__} has no connected pipeline class. Loading {pipeline_cls.__name__}.') pipeline_cls = connected_pipeline_cls or pipeline_cls return pipeline_cls def _load_empty_model(library_name: str, class_name: str, importable_classes: List[Any], pipelines: Any, is_pipeline_module: bool, name: str, torch_dtype: Union[str, torch.dtype], cached_folder: Union[str, os.PathLike], **kwargs): (class_obj, _) = get_class_obj_and_candidates(library_name, class_name, importable_classes, pipelines, is_pipeline_module, component_name=name, cache_dir=cached_folder) if is_transformers_available(): transformers_version = version.parse(version.parse(transformers.__version__).base_version) else: transformers_version = 'N/A' is_transformers_model = is_transformers_available() and issubclass(class_obj, PreTrainedModel) and (transformers_version >= version.parse('4.20.0')) diffusers_module = importlib.import_module(__name__.split('.')[0]) is_diffusers_model = issubclass(class_obj, diffusers_module.ModelMixin) model = None config_path = cached_folder user_agent = {'diffusers': __version__, 'file_type': 'model', 'framework': 'pytorch'} if is_diffusers_model: (config, unused_kwargs, commit_hash) = class_obj.load_config(os.path.join(config_path, name), cache_dir=cached_folder, return_unused_kwargs=True, return_commit_hash=True, force_download=kwargs.pop('force_download', False), proxies=kwargs.pop('proxies', None), local_files_only=kwargs.pop('local_files_only', False), token=kwargs.pop('token', None), revision=kwargs.pop('revision', None), subfolder=kwargs.pop('subfolder', None), user_agent=user_agent) with accelerate.init_empty_weights(): model = class_obj.from_config(config, **unused_kwargs) elif is_transformers_model: config_class = getattr(class_obj, 'config_class', None) if config_class is None: raise ValueError('`config_class` cannot be None. Please double-check the model.') config = config_class.from_pretrained(cached_folder, subfolder=name, force_download=kwargs.pop('force_download', False), proxies=kwargs.pop('proxies', None), local_files_only=kwargs.pop('local_files_only', False), token=kwargs.pop('token', None), revision=kwargs.pop('revision', None), user_agent=user_agent) with accelerate.init_empty_weights(): model = class_obj(config) if model is not None: model = model.to(dtype=torch_dtype) return model def _assign_components_to_devices(module_sizes: Dict[str, float], device_memory: Dict[str, float], device_mapping_strategy: str='balanced'): device_ids = list(device_memory.keys()) device_cycle = device_ids + device_ids[::-1] device_memory = device_memory.copy() device_id_component_mapping = {} current_device_index = 0 for component in module_sizes: device_id = device_cycle[current_device_index % len(device_cycle)] component_memory = module_sizes[component] curr_device_memory = device_memory[device_id] if component_memory > curr_device_memory: device_id_component_mapping['cpu'] = [component] else: if device_id not in device_id_component_mapping: device_id_component_mapping[device_id] = [component] else: device_id_component_mapping[device_id].append(component) device_memory[device_id] -= component_memory current_device_index += 1 return device_id_component_mapping def _get_final_device_map(device_map, pipeline_class, passed_class_obj, init_dict, library, max_memory, **kwargs): from diffusers import pipelines torch_dtype = kwargs.get('torch_dtype', torch.float32) init_empty_modules = {} for (name, (library_name, class_name)) in init_dict.items(): if class_name.startswith('Flax'): raise ValueError('Flax pipelines are not supported with `device_map`.') is_pipeline_module = hasattr(pipelines, library_name) importable_classes = ALL_IMPORTABLE_CLASSES loaded_sub_model = None if name in passed_class_obj: maybe_raise_or_warn(library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module) with accelerate.init_empty_weights(): loaded_sub_model = passed_class_obj[name] else: loaded_sub_model = _load_empty_model(library_name=library_name, class_name=class_name, importable_classes=importable_classes, pipelines=pipelines, is_pipeline_module=is_pipeline_module, pipeline_class=pipeline_class, name=name, torch_dtype=torch_dtype, cached_folder=kwargs.get('cached_folder', None), force_download=kwargs.get('force_download', None), proxies=kwargs.get('proxies', None), local_files_only=kwargs.get('local_files_only', None), token=kwargs.get('token', None), revision=kwargs.get('revision', None)) if loaded_sub_model is not None: init_empty_modules[name] = loaded_sub_model module_sizes = {module_name: compute_module_sizes(module, dtype=torch_dtype)[''] for (module_name, module) in init_empty_modules.items() if isinstance(module, torch.nn.Module)} module_sizes = dict(sorted(module_sizes.items(), key=lambda item: item[1], reverse=True)) max_memory = get_max_memory(max_memory) max_memory = dict(sorted(max_memory.items(), key=lambda item: item[1], reverse=True)) max_memory = {k: v for (k, v) in max_memory.items() if k != 'cpu'} final_device_map = None if len(max_memory) > 0: device_id_component_mapping = _assign_components_to_devices(module_sizes, max_memory, device_mapping_strategy=device_map) final_device_map = {} for (device_id, components) in device_id_component_mapping.items(): for component in components: final_device_map[component] = device_id return final_device_map def load_sub_model(library_name: str, class_name: str, importable_classes: List[Any], pipelines: Any, is_pipeline_module: bool, pipeline_class: Any, torch_dtype: torch.dtype, provider: Any, sess_options: Any, device_map: Optional[Union[Dict[str, torch.device], str]], max_memory: Optional[Dict[Union[int, str], Union[int, str]]], offload_folder: Optional[Union[str, os.PathLike]], offload_state_dict: bool, model_variants: Dict[str, str], name: str, from_flax: bool, variant: str, low_cpu_mem_usage: bool, cached_folder: Union[str, os.PathLike]): (class_obj, class_candidates) = get_class_obj_and_candidates(library_name, class_name, importable_classes, pipelines, is_pipeline_module, component_name=name, cache_dir=cached_folder) load_method_name = None for (class_name, class_candidate) in class_candidates.items(): if class_candidate is not None and issubclass(class_obj, class_candidate): load_method_name = importable_classes[class_name][1] if load_method_name is None: none_module = class_obj.__module__ is_dummy_path = none_module.startswith(DUMMY_MODULES_FOLDER) or none_module.startswith(TRANSFORMERS_DUMMY_MODULES_FOLDER) if is_dummy_path and 'dummy' in none_module: class_obj() raise ValueError(f'The component {class_obj} of {pipeline_class} cannot be loaded as it does not seem to have any of the loading methods defined in {ALL_IMPORTABLE_CLASSES}.') load_method = getattr(class_obj, load_method_name) diffusers_module = importlib.import_module(__name__.split('.')[0]) loading_kwargs = {} if issubclass(class_obj, torch.nn.Module): loading_kwargs['torch_dtype'] = torch_dtype if issubclass(class_obj, diffusers_module.OnnxRuntimeModel): loading_kwargs['provider'] = provider loading_kwargs['sess_options'] = sess_options is_diffusers_model = issubclass(class_obj, diffusers_module.ModelMixin) if is_transformers_available(): transformers_version = version.parse(version.parse(transformers.__version__).base_version) else: transformers_version = 'N/A' is_transformers_model = is_transformers_available() and issubclass(class_obj, PreTrainedModel) and (transformers_version >= version.parse('4.20.0')) if is_diffusers_model or is_transformers_model: loading_kwargs['device_map'] = device_map loading_kwargs['max_memory'] = max_memory loading_kwargs['offload_folder'] = offload_folder loading_kwargs['offload_state_dict'] = offload_state_dict loading_kwargs['variant'] = model_variants.pop(name, None) if from_flax: loading_kwargs['from_flax'] = True if is_transformers_model and loading_kwargs['variant'] is not None and (transformers_version < version.parse('4.27.0')): raise ImportError(f"When passing `variant='{variant}'`, please make sure to upgrade your `transformers` version to at least 4.27.0.dev0") elif is_transformers_model and loading_kwargs['variant'] is None: loading_kwargs.pop('variant') if not (from_flax and is_transformers_model): loading_kwargs['low_cpu_mem_usage'] = low_cpu_mem_usage else: loading_kwargs['low_cpu_mem_usage'] = False if os.path.isdir(os.path.join(cached_folder, name)): loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs) else: loaded_sub_model = load_method(cached_folder, **loading_kwargs) if isinstance(loaded_sub_model, torch.nn.Module) and isinstance(device_map, dict): remove_hook_from_module(loaded_sub_model, recurse=True) needs_offloading_to_cpu = device_map[''] == 'cpu' if needs_offloading_to_cpu: dispatch_model(loaded_sub_model, state_dict=loaded_sub_model.state_dict(), device_map=device_map, force_hooks=True, main_device=0) else: dispatch_model(loaded_sub_model, device_map=device_map, force_hooks=True) return loaded_sub_model def _fetch_class_library_tuple(module): diffusers_module = importlib.import_module(__name__.split('.')[0]) pipelines = getattr(diffusers_module, 'pipelines') not_compiled_module = _unwrap_model(module) library = not_compiled_module.__module__.split('.')[0] module_path_items = not_compiled_module.__module__.split('.') pipeline_dir = module_path_items[-2] if len(module_path_items) > 2 else None path = not_compiled_module.__module__.split('.') is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir) if is_pipeline_module: library = pipeline_dir elif library not in LOADABLE_CLASSES: library = not_compiled_module.__module__ class_name = not_compiled_module.__class__.__name__ return (library, class_name) def _identify_model_variants(folder: str, variant: str, config: dict) -> dict: model_variants = {} if variant is not None: for sub_folder in os.listdir(folder): folder_path = os.path.join(folder, sub_folder) is_folder = os.path.isdir(folder_path) and sub_folder in config variant_exists = is_folder and any((p.split('.')[1].startswith(variant) for p in os.listdir(folder_path))) if variant_exists: model_variants[sub_folder] = variant return model_variants def _resolve_custom_pipeline_and_cls(folder, config, custom_pipeline): custom_class_name = None if os.path.isfile(os.path.join(folder, f'{custom_pipeline}.py')): custom_pipeline = os.path.join(folder, f'{custom_pipeline}.py') elif isinstance(config['_class_name'], (list, tuple)) and os.path.isfile(os.path.join(folder, f"{config['_class_name'][0]}.py")): custom_pipeline = os.path.join(folder, f"{config['_class_name'][0]}.py") custom_class_name = config['_class_name'][1] return (custom_pipeline, custom_class_name) def _maybe_raise_warning_for_inpainting(pipeline_class, pretrained_model_name_or_path: str, config: dict): if pipeline_class.__name__ == 'StableDiffusionInpaintPipeline' and version.parse(version.parse(config['_diffusers_version']).base_version) <= version.parse('0.5.1'): from diffusers import StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy pipeline_class = StableDiffusionInpaintPipelineLegacy deprecation_message = f"You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For better inpainting results, we strongly suggest using Stable Diffusion's official inpainting checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your checkpoint {pretrained_model_name_or_path} to the format of https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain the {{StableDiffusionInpaintPipelineLegacy}} class and will likely remove it in version 1.0.0." deprecate('StableDiffusionInpaintPipelineLegacy', '1.0.0', deprecation_message, standard_warn=False) def _update_init_kwargs_with_connected_pipeline(init_kwargs: dict, passed_pipe_kwargs: dict, passed_class_objs: dict, folder: str, **pipeline_loading_kwargs) -> dict: from .pipeline_utils import DiffusionPipeline modelcard = ModelCard.load(os.path.join(folder, 'README.md')) connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS} pipeline_loading_kwargs_cp = pipeline_loading_kwargs.copy() if pipeline_loading_kwargs_cp is not None and len(pipeline_loading_kwargs_cp) >= 1: for k in pipeline_loading_kwargs: if 'scheduler' in k: _ = pipeline_loading_kwargs_cp.pop(k) def get_connected_passed_kwargs(prefix): connected_passed_class_obj = {k.replace(f'{prefix}_', ''): w for (k, w) in passed_class_objs.items() if k.split('_')[0] == prefix} connected_passed_pipe_kwargs = {k.replace(f'{prefix}_', ''): w for (k, w) in passed_pipe_kwargs.items() if k.split('_')[0] == prefix} connected_passed_kwargs = {**connected_passed_class_obj, **connected_passed_pipe_kwargs} return connected_passed_kwargs connected_pipes = {prefix: DiffusionPipeline.from_pretrained(repo_id, **pipeline_loading_kwargs_cp, **get_connected_passed_kwargs(prefix)) for (prefix, repo_id) in connected_pipes.items() if repo_id is not None} for (prefix, connected_pipe) in connected_pipes.items(): init_kwargs.update({'_'.join([prefix, name]): component for (name, component) in connected_pipe.components.items()}) return init_kwargs # File: diffusers-main/src/diffusers/pipelines/pipeline_utils.py import fnmatch import importlib import inspect import os import re import sys from dataclasses import dataclass from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Union, get_args, get_origin import numpy as np import PIL.Image import requests import torch from huggingface_hub import ModelCard, create_repo, hf_hub_download, model_info, snapshot_download from huggingface_hub.utils import OfflineModeIsEnabled, validate_hf_hub_args from packaging import version from requests.exceptions import HTTPError from tqdm.auto import tqdm from .. import __version__ from ..configuration_utils import ConfigMixin from ..models import AutoencoderKL from ..models.attention_processor import FusedAttnProcessor2_0 from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, ModelMixin from ..schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from ..utils import CONFIG_NAME, DEPRECATED_REVISION_ARGS, BaseOutput, PushToHubMixin, deprecate, is_accelerate_available, is_accelerate_version, is_torch_npu_available, is_torch_version, logging, numpy_to_pil from ..utils.hub_utils import load_or_create_model_card, populate_model_card from ..utils.torch_utils import is_compiled_module if is_torch_npu_available(): import torch_npu from .pipeline_loading_utils import ALL_IMPORTABLE_CLASSES, CONNECTED_PIPES_KEYS, CUSTOM_PIPELINE_FILE_NAME, LOADABLE_CLASSES, _fetch_class_library_tuple, _get_custom_pipeline_class, _get_final_device_map, _get_pipeline_class, _identify_model_variants, _maybe_raise_warning_for_inpainting, _resolve_custom_pipeline_and_cls, _unwrap_model, _update_init_kwargs_with_connected_pipeline, is_safetensors_compatible, load_sub_model, maybe_raise_or_warn, variant_compatible_siblings, warn_deprecated_model_variant if is_accelerate_available(): import accelerate LIBRARIES = [] for library in LOADABLE_CLASSES: LIBRARIES.append(library) SUPPORTED_DEVICE_MAP = ['balanced'] logger = logging.get_logger(__name__) @dataclass class ImagePipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] @dataclass class AudioPipelineOutput(BaseOutput): audios: np.ndarray class DiffusionPipeline(ConfigMixin, PushToHubMixin): config_name = 'model_index.json' model_cpu_offload_seq = None hf_device_map = None _optional_components = [] _exclude_from_cpu_offload = [] _load_connected_pipes = False _is_onnx = False def register_modules(self, **kwargs): for (name, module) in kwargs.items(): if module is None or (isinstance(module, (tuple, list)) and module[0] is None): register_dict = {name: (None, None)} else: (library, class_name) = _fetch_class_library_tuple(module) register_dict = {name: (library, class_name)} self.register_to_config(**register_dict) setattr(self, name, module) def __setattr__(self, name: str, value: Any): if name in self.__dict__ and hasattr(self.config, name): if isinstance(getattr(self.config, name), (tuple, list)): if value is not None and self.config[name][0] is not None: class_library_tuple = _fetch_class_library_tuple(value) else: class_library_tuple = (None, None) self.register_to_config(**{name: class_library_tuple}) else: self.register_to_config(**{name: value}) super().__setattr__(name, value) def save_pretrained(self, save_directory: Union[str, os.PathLike], safe_serialization: bool=True, variant: Optional[str]=None, push_to_hub: bool=False, **kwargs): model_index_dict = dict(self.config) model_index_dict.pop('_class_name', None) model_index_dict.pop('_diffusers_version', None) model_index_dict.pop('_module', None) model_index_dict.pop('_name_or_path', None) if push_to_hub: commit_message = kwargs.pop('commit_message', None) private = kwargs.pop('private', False) create_pr = kwargs.pop('create_pr', False) token = kwargs.pop('token', None) repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id (expected_modules, optional_kwargs) = self._get_signature_keys(self) def is_saveable_module(name, value): if name not in expected_modules: return False if name in self._optional_components and value[0] is None: return False return True model_index_dict = {k: v for (k, v) in model_index_dict.items() if is_saveable_module(k, v)} for pipeline_component_name in model_index_dict.keys(): sub_model = getattr(self, pipeline_component_name) model_cls = sub_model.__class__ if is_compiled_module(sub_model): sub_model = _unwrap_model(sub_model) model_cls = sub_model.__class__ save_method_name = None for (library_name, library_classes) in LOADABLE_CLASSES.items(): if library_name in sys.modules: library = importlib.import_module(library_name) else: logger.info(f'{library_name} is not installed. Cannot save {pipeline_component_name} as {library_classes} from {library_name}') for (base_class, save_load_methods) in library_classes.items(): class_candidate = getattr(library, base_class, None) if class_candidate is not None and issubclass(model_cls, class_candidate): save_method_name = save_load_methods[0] break if save_method_name is not None: break if save_method_name is None: logger.warning(f'self.{pipeline_component_name}={sub_model} of type {type(sub_model)} cannot be saved.') self.register_to_config(**{pipeline_component_name: (None, None)}) continue save_method = getattr(sub_model, save_method_name) save_method_signature = inspect.signature(save_method) save_method_accept_safe = 'safe_serialization' in save_method_signature.parameters save_method_accept_variant = 'variant' in save_method_signature.parameters save_kwargs = {} if save_method_accept_safe: save_kwargs['safe_serialization'] = safe_serialization if save_method_accept_variant: save_kwargs['variant'] = variant save_method(os.path.join(save_directory, pipeline_component_name), **save_kwargs) self.save_config(save_directory) if push_to_hub: model_card = load_or_create_model_card(repo_id, token=token, is_pipeline=True) model_card = populate_model_card(model_card) model_card.save(os.path.join(save_directory, 'README.md')) self._upload_folder(save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr) def to(self, *args, **kwargs): dtype = kwargs.pop('dtype', None) device = kwargs.pop('device', None) silence_dtype_warnings = kwargs.pop('silence_dtype_warnings', False) dtype_arg = None device_arg = None if len(args) == 1: if isinstance(args[0], torch.dtype): dtype_arg = args[0] else: device_arg = torch.device(args[0]) if args[0] is not None else None elif len(args) == 2: if isinstance(args[0], torch.dtype): raise ValueError('When passing two arguments, make sure the first corresponds to `device` and the second to `dtype`.') device_arg = torch.device(args[0]) if args[0] is not None else None dtype_arg = args[1] elif len(args) > 2: raise ValueError('Please make sure to pass at most two arguments (`device` and `dtype`) `.to(...)`') if dtype is not None and dtype_arg is not None: raise ValueError('You have passed `dtype` both as an argument and as a keyword argument. Please only pass one of the two.') dtype = dtype or dtype_arg if device is not None and device_arg is not None: raise ValueError('You have passed `device` both as an argument and as a keyword argument. Please only pass one of the two.') device = device or device_arg def module_is_sequentially_offloaded(module): if not is_accelerate_available() or is_accelerate_version('<', '0.14.0'): return False return hasattr(module, '_hf_hook') and (isinstance(module._hf_hook, accelerate.hooks.AlignDevicesHook) or (hasattr(module._hf_hook, 'hooks') and isinstance(module._hf_hook.hooks[0], accelerate.hooks.AlignDevicesHook))) def module_is_offloaded(module): if not is_accelerate_available() or is_accelerate_version('<', '0.17.0.dev0'): return False return hasattr(module, '_hf_hook') and isinstance(module._hf_hook, accelerate.hooks.CpuOffload) pipeline_is_sequentially_offloaded = any((module_is_sequentially_offloaded(module) for (_, module) in self.components.items())) if pipeline_is_sequentially_offloaded and device and (torch.device(device).type == 'cuda'): raise ValueError("It seems like you have activated sequential model offloading by calling `enable_sequential_cpu_offload`, but are now attempting to move the pipeline to GPU. This is not compatible with offloading. Please, move your pipeline `.to('cpu')` or consider removing the move altogether if you use sequential offloading.") is_pipeline_device_mapped = self.hf_device_map is not None and len(self.hf_device_map) > 1 if is_pipeline_device_mapped: raise ValueError("It seems like you have activated a device mapping strategy on the pipeline which doesn't allow explicit device placement using `to()`. You can call `reset_device_map()` first and then call `to()`.") pipeline_is_offloaded = any((module_is_offloaded(module) for (_, module) in self.components.items())) if pipeline_is_offloaded and device and (torch.device(device).type == 'cuda'): logger.warning(f"It seems like you have activated model offloading by calling `enable_model_cpu_offload`, but are now manually moving the pipeline to GPU. It is strongly recommended against doing so as memory gains from offloading are likely to be lost. Offloading automatically takes care of moving the individual components {', '.join(self.components.keys())} to GPU when needed. To make sure offloading works as expected, you should consider moving the pipeline back to CPU: `pipeline.to('cpu')` or removing the move altogether if you use offloading.") (module_names, _) = self._get_signature_keys(self) modules = [getattr(self, n, None) for n in module_names] modules = [m for m in modules if isinstance(m, torch.nn.Module)] is_offloaded = pipeline_is_offloaded or pipeline_is_sequentially_offloaded for module in modules: is_loaded_in_8bit = hasattr(module, 'is_loaded_in_8bit') and module.is_loaded_in_8bit if is_loaded_in_8bit and dtype is not None: logger.warning(f"The module '{module.__class__.__name__}' has been loaded in 8bit and conversion to {dtype} is not yet supported. Module is still in 8bit precision.") if is_loaded_in_8bit and device is not None: logger.warning(f"The module '{module.__class__.__name__}' has been loaded in 8bit and moving it to {dtype} via `.to()` is not yet supported. Module is still on {module.device}.") else: module.to(device, dtype) if module.dtype == torch.float16 and str(device) in ['cpu'] and (not silence_dtype_warnings) and (not is_offloaded): logger.warning('Pipelines loaded with `dtype=torch.float16` cannot run with `cpu` device. It is not recommended to move them to `cpu` as running them will fail. Please make sure to use an accelerator to run the pipeline in inference, due to the lack of support for`float16` operations on this device in PyTorch. Please, remove the `torch_dtype=torch.float16` argument, or use another device for inference.') return self @property def device(self) -> torch.device: (module_names, _) = self._get_signature_keys(self) modules = [getattr(self, n, None) for n in module_names] modules = [m for m in modules if isinstance(m, torch.nn.Module)] for module in modules: return module.device return torch.device('cpu') @property def dtype(self) -> torch.dtype: (module_names, _) = self._get_signature_keys(self) modules = [getattr(self, n, None) for n in module_names] modules = [m for m in modules if isinstance(m, torch.nn.Module)] for module in modules: return module.dtype return torch.float32 @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): kwargs_copied = kwargs.copy() cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) local_files_only = kwargs.pop('local_files_only', None) token = kwargs.pop('token', None) revision = kwargs.pop('revision', None) from_flax = kwargs.pop('from_flax', False) torch_dtype = kwargs.pop('torch_dtype', None) custom_pipeline = kwargs.pop('custom_pipeline', None) custom_revision = kwargs.pop('custom_revision', None) provider = kwargs.pop('provider', None) sess_options = kwargs.pop('sess_options', None) device_map = kwargs.pop('device_map', None) max_memory = kwargs.pop('max_memory', None) offload_folder = kwargs.pop('offload_folder', None) offload_state_dict = kwargs.pop('offload_state_dict', False) low_cpu_mem_usage = kwargs.pop('low_cpu_mem_usage', _LOW_CPU_MEM_USAGE_DEFAULT) variant = kwargs.pop('variant', None) use_safetensors = kwargs.pop('use_safetensors', None) use_onnx = kwargs.pop('use_onnx', None) load_connected_pipeline = kwargs.pop('load_connected_pipeline', False) if low_cpu_mem_usage and (not is_accelerate_available()): low_cpu_mem_usage = False logger.warning('Cannot initialize model with low cpu memory usage because `accelerate` was not found in the environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip install accelerate\n```\n.') if low_cpu_mem_usage is True and (not is_torch_version('>=', '1.9.0')): raise NotImplementedError('Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set `low_cpu_mem_usage=False`.') if device_map is not None and (not is_torch_version('>=', '1.9.0')): raise NotImplementedError('Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set `device_map=None`.') if device_map is not None and (not is_accelerate_available()): raise NotImplementedError('Using `device_map` requires the `accelerate` library. Please install it using: `pip install accelerate`.') if device_map is not None and (not isinstance(device_map, str)): raise ValueError('`device_map` must be a string.') if device_map is not None and device_map not in SUPPORTED_DEVICE_MAP: raise NotImplementedError(f"{device_map} not supported. Supported strategies are: {', '.join(SUPPORTED_DEVICE_MAP)}") if device_map is not None and device_map in SUPPORTED_DEVICE_MAP: if is_accelerate_version('<', '0.28.0'): raise NotImplementedError('Device placement requires `accelerate` version `0.28.0` or later.') if low_cpu_mem_usage is False and device_map is not None: raise ValueError(f'You cannot set `low_cpu_mem_usage` to False while using device_map={device_map} for loading and dispatching. Please make sure to set `low_cpu_mem_usage=True`.') if not os.path.isdir(pretrained_model_name_or_path): if pretrained_model_name_or_path.count('/') > 1: raise ValueError(f'The provided pretrained_model_name_or_path "{pretrained_model_name_or_path}" is neither a valid local path nor a valid repo id. Please check the parameter.') cached_folder = cls.download(pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, from_flax=from_flax, use_safetensors=use_safetensors, use_onnx=use_onnx, custom_pipeline=custom_pipeline, custom_revision=custom_revision, variant=variant, load_connected_pipeline=load_connected_pipeline, **kwargs) else: cached_folder = pretrained_model_name_or_path config_dict = cls.load_config(cached_folder) config_dict.pop('_ignore_files', None) model_variants = _identify_model_variants(folder=cached_folder, variant=variant, config=config_dict) (custom_pipeline, custom_class_name) = _resolve_custom_pipeline_and_cls(folder=cached_folder, config=config_dict, custom_pipeline=custom_pipeline) pipeline_class = _get_pipeline_class(cls, config=config_dict, load_connected_pipeline=load_connected_pipeline, custom_pipeline=custom_pipeline, class_name=custom_class_name, cache_dir=cache_dir, revision=custom_revision) if device_map is not None and pipeline_class._load_connected_pipes: raise NotImplementedError('`device_map` is not yet supported for connected pipelines.') _maybe_raise_warning_for_inpainting(pipeline_class=pipeline_class, pretrained_model_name_or_path=pretrained_model_name_or_path, config=config_dict) (expected_modules, optional_kwargs) = cls._get_signature_keys(pipeline_class) passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} (init_dict, unused_kwargs, _) = pipeline_class.extract_init_dict(config_dict, **kwargs) init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict and k not in pipeline_class._optional_components} init_kwargs = {**init_kwargs, **passed_pipe_kwargs} def load_module(name, value): if value[0] is None: return False if name in passed_class_obj and passed_class_obj[name] is None: return False return True init_dict = {k: v for (k, v) in init_dict.items() if load_module(k, v)} if from_flax and 'safety_checker' in init_dict and ('safety_checker' not in passed_class_obj): raise NotImplementedError('The safety checker cannot be automatically loaded when loading weights `from_flax`. Please, pass `safety_checker=None` to `from_pretrained`, and load the safety checker separately if you need it.') if len(unused_kwargs) > 0: logger.warning(f'Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored.') from diffusers import pipelines final_device_map = None if device_map is not None: final_device_map = _get_final_device_map(device_map=device_map, pipeline_class=pipeline_class, passed_class_obj=passed_class_obj, init_dict=init_dict, library=library, max_memory=max_memory, torch_dtype=torch_dtype, cached_folder=cached_folder, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision) current_device_map = None for (name, (library_name, class_name)) in logging.tqdm(init_dict.items(), desc='Loading pipeline components...'): if final_device_map is not None and len(final_device_map) > 0: component_device = final_device_map.get(name, None) if component_device is not None: current_device_map = {'': component_device} else: current_device_map = None class_name = class_name[4:] if class_name.startswith('Flax') else class_name is_pipeline_module = hasattr(pipelines, library_name) importable_classes = ALL_IMPORTABLE_CLASSES loaded_sub_model = None if name in passed_class_obj: maybe_raise_or_warn(library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module) loaded_sub_model = passed_class_obj[name] else: loaded_sub_model = load_sub_model(library_name=library_name, class_name=class_name, importable_classes=importable_classes, pipelines=pipelines, is_pipeline_module=is_pipeline_module, pipeline_class=pipeline_class, torch_dtype=torch_dtype, provider=provider, sess_options=sess_options, device_map=current_device_map, max_memory=max_memory, offload_folder=offload_folder, offload_state_dict=offload_state_dict, model_variants=model_variants, name=name, from_flax=from_flax, variant=variant, low_cpu_mem_usage=low_cpu_mem_usage, cached_folder=cached_folder) logger.info(f'Loaded {name} as {class_name} from `{name}` subfolder of {pretrained_model_name_or_path}.') init_kwargs[name] = loaded_sub_model if pipeline_class._load_connected_pipes and os.path.isfile(os.path.join(cached_folder, 'README.md')): init_kwargs = _update_init_kwargs_with_connected_pipeline(init_kwargs=init_kwargs, passed_pipe_kwargs=passed_pipe_kwargs, passed_class_objs=passed_class_obj, folder=cached_folder, **kwargs_copied) missing_modules = set(expected_modules) - set(init_kwargs.keys()) passed_modules = list(passed_class_obj.keys()) optional_modules = pipeline_class._optional_components if len(missing_modules) > 0 and missing_modules <= set(passed_modules + optional_modules): for module in missing_modules: init_kwargs[module] = passed_class_obj.get(module, None) elif len(missing_modules) > 0: passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs raise ValueError(f'Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed.') model = pipeline_class(**init_kwargs) model.register_to_config(_name_or_path=pretrained_model_name_or_path) if device_map is not None: setattr(model, 'hf_device_map', final_device_map) return model @property def name_or_path(self) -> str: return getattr(self.config, '_name_or_path', None) @property def _execution_device(self): for (name, model) in self.components.items(): if not isinstance(model, torch.nn.Module) or name in self._exclude_from_cpu_offload: continue if not hasattr(model, '_hf_hook'): return self.device for module in model.modules(): if hasattr(module, '_hf_hook') and hasattr(module._hf_hook, 'execution_device') and (module._hf_hook.execution_device is not None): return torch.device(module._hf_hook.execution_device) return self.device def remove_all_hooks(self): for (_, model) in self.components.items(): if isinstance(model, torch.nn.Module) and hasattr(model, '_hf_hook'): accelerate.hooks.remove_hook_from_module(model, recurse=True) self._all_hooks = [] def enable_model_cpu_offload(self, gpu_id: Optional[int]=None, device: Union[torch.device, str]='cuda'): is_pipeline_device_mapped = self.hf_device_map is not None and len(self.hf_device_map) > 1 if is_pipeline_device_mapped: raise ValueError("It seems like you have activated a device mapping strategy on the pipeline so calling `enable_model_cpu_offload() isn't allowed. You can call `reset_device_map()` first and then call `enable_model_cpu_offload()`.") if self.model_cpu_offload_seq is None: raise ValueError('Model CPU offload cannot be enabled because no `model_cpu_offload_seq` class attribute is set.') if is_accelerate_available() and is_accelerate_version('>=', '0.17.0.dev0'): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.') self.remove_all_hooks() torch_device = torch.device(device) device_index = torch_device.index if gpu_id is not None and device_index is not None: raise ValueError(f'You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}') self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, '_offload_gpu_id', 0) device_type = torch_device.type device = torch.device(f'{device_type}:{self._offload_gpu_id}') self._offload_device = device self.to('cpu', silence_dtype_warnings=True) device_mod = getattr(torch, device.type, None) if hasattr(device_mod, 'empty_cache') and device_mod.is_available(): device_mod.empty_cache() all_model_components = {k: v for (k, v) in self.components.items() if isinstance(v, torch.nn.Module)} self._all_hooks = [] hook = None for model_str in self.model_cpu_offload_seq.split('->'): model = all_model_components.pop(model_str, None) if not isinstance(model, torch.nn.Module): continue (_, hook) = cpu_offload_with_hook(model, device, prev_module_hook=hook) self._all_hooks.append(hook) for (name, model) in all_model_components.items(): if not isinstance(model, torch.nn.Module): continue if name in self._exclude_from_cpu_offload: model.to(device) else: (_, hook) = cpu_offload_with_hook(model, device) self._all_hooks.append(hook) def maybe_free_model_hooks(self): if not hasattr(self, '_all_hooks') or len(self._all_hooks) == 0: return self.enable_model_cpu_offload(device=getattr(self, '_offload_device', 'cuda')) def enable_sequential_cpu_offload(self, gpu_id: Optional[int]=None, device: Union[torch.device, str]='cuda'): if is_accelerate_available() and is_accelerate_version('>=', '0.14.0'): from accelerate import cpu_offload else: raise ImportError('`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher') self.remove_all_hooks() is_pipeline_device_mapped = self.hf_device_map is not None and len(self.hf_device_map) > 1 if is_pipeline_device_mapped: raise ValueError("It seems like you have activated a device mapping strategy on the pipeline so calling `enable_sequential_cpu_offload() isn't allowed. You can call `reset_device_map()` first and then call `enable_sequential_cpu_offload()`.") torch_device = torch.device(device) device_index = torch_device.index if gpu_id is not None and device_index is not None: raise ValueError(f'You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}') self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, '_offload_gpu_id', 0) device_type = torch_device.type device = torch.device(f'{device_type}:{self._offload_gpu_id}') self._offload_device = device if self.device.type != 'cpu': self.to('cpu', silence_dtype_warnings=True) device_mod = getattr(torch, self.device.type, None) if hasattr(device_mod, 'empty_cache') and device_mod.is_available(): device_mod.empty_cache() for (name, model) in self.components.items(): if not isinstance(model, torch.nn.Module): continue if name in self._exclude_from_cpu_offload: model.to(device) else: offload_buffers = len(model._parameters) > 0 cpu_offload(model, device, offload_buffers=offload_buffers) def reset_device_map(self): if self.hf_device_map is None: return else: self.remove_all_hooks() for (name, component) in self.components.items(): if isinstance(component, torch.nn.Module): component.to('cpu') self.hf_device_map = None @classmethod @validate_hf_hub_args def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) local_files_only = kwargs.pop('local_files_only', None) token = kwargs.pop('token', None) revision = kwargs.pop('revision', None) from_flax = kwargs.pop('from_flax', False) custom_pipeline = kwargs.pop('custom_pipeline', None) custom_revision = kwargs.pop('custom_revision', None) variant = kwargs.pop('variant', None) use_safetensors = kwargs.pop('use_safetensors', None) use_onnx = kwargs.pop('use_onnx', None) load_connected_pipeline = kwargs.pop('load_connected_pipeline', False) trust_remote_code = kwargs.pop('trust_remote_code', False) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True allow_patterns = None ignore_patterns = None model_info_call_error: Optional[Exception] = None if not local_files_only: try: info = model_info(pretrained_model_name, token=token, revision=revision) except (HTTPError, OfflineModeIsEnabled, requests.ConnectionError) as e: logger.warning(f"Couldn't connect to the Hub: {e}.\nWill try to load from local cache.") local_files_only = True model_info_call_error = e if not local_files_only: config_file = hf_hub_download(pretrained_model_name, cls.config_name, cache_dir=cache_dir, revision=revision, proxies=proxies, force_download=force_download, token=token) config_dict = cls._dict_from_json_file(config_file) ignore_filenames = config_dict.pop('_ignore_files', []) folder_names = [k for (k, v) in config_dict.items() if isinstance(v, list) and k != '_class_name'] filenames = {sibling.rfilename for sibling in info.siblings} (model_filenames, variant_filenames) = variant_compatible_siblings(filenames, variant=variant) diffusers_module = importlib.import_module(__name__.split('.')[0]) pipelines = getattr(diffusers_module, 'pipelines') custom_components = {} for component in folder_names: module_candidate = config_dict[component][0] if module_candidate is None or not isinstance(module_candidate, str): continue candidate_file = f'{component}/{module_candidate}.py' if candidate_file in filenames: custom_components[component] = module_candidate elif module_candidate not in LOADABLE_CLASSES and (not hasattr(pipelines, module_candidate)): raise ValueError(f"{candidate_file} as defined in `model_index.json` does not exist in {pretrained_model_name} and is not a module in 'diffusers/pipelines'.") if len(variant_filenames) == 0 and variant is not None: deprecation_message = f'You are trying to load the model files of the `variant={variant}`, but no such modeling files are available.The default model files: {model_filenames} will be loaded instead. Make sure to not load from `variant={variant}`if such variant modeling files are not available. Doing so will lead to an error in v0.24.0 as defaulting to non-variantmodeling files is deprecated.' deprecate('no variant default', '0.24.0', deprecation_message, standard_warn=False) model_filenames = set(model_filenames) - set(ignore_filenames) variant_filenames = set(variant_filenames) - set(ignore_filenames) if revision in DEPRECATED_REVISION_ARGS and version.parse(version.parse(__version__).base_version) >= version.parse('0.22.0'): warn_deprecated_model_variant(pretrained_model_name, token, variant, revision, model_filenames) model_folder_names = {os.path.split(f)[0] for f in model_filenames if os.path.split(f)[0] in folder_names} custom_class_name = None if custom_pipeline is None and isinstance(config_dict['_class_name'], (list, tuple)): custom_pipeline = config_dict['_class_name'][0] custom_class_name = config_dict['_class_name'][1] allow_patterns = list(model_filenames) allow_patterns += [f'{k}/*' for k in folder_names if k not in model_folder_names] allow_patterns += [f'{k}/{f}.py' for (k, f) in custom_components.items()] allow_patterns += [f'{custom_pipeline}.py'] if f'{custom_pipeline}.py' in filenames else [] allow_patterns += [os.path.join(k, 'config.json') for k in model_folder_names] allow_patterns += [SCHEDULER_CONFIG_NAME, CONFIG_NAME, cls.config_name, CUSTOM_PIPELINE_FILE_NAME] load_pipe_from_hub = custom_pipeline is not None and f'{custom_pipeline}.py' in filenames load_components_from_hub = len(custom_components) > 0 if load_pipe_from_hub and (not trust_remote_code): raise ValueError(f'The repository for {pretrained_model_name} contains custom code in {custom_pipeline}.py which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/{pretrained_model_name}/blob/main/{custom_pipeline}.py.\nPlease pass the argument `trust_remote_code=True` to allow custom code to be run.') if load_components_from_hub and (not trust_remote_code): raise ValueError(f"The repository for {pretrained_model_name} contains custom code in {'.py, '.join([os.path.join(k, v) for (k, v) in custom_components.items()])} which must be executed to correctly load the model. You can inspect the repository content at {', '.join([f'https://hf.co/{pretrained_model_name}/{k}/{v}.py' for (k, v) in custom_components.items()])}.\nPlease pass the argument `trust_remote_code=True` to allow custom code to be run.") pipeline_class = _get_pipeline_class(cls, config_dict, load_connected_pipeline=load_connected_pipeline, custom_pipeline=custom_pipeline, repo_id=pretrained_model_name if load_pipe_from_hub else None, hub_revision=revision, class_name=custom_class_name, cache_dir=cache_dir, revision=custom_revision) (expected_components, _) = cls._get_signature_keys(pipeline_class) passed_components = [k for k in expected_components if k in kwargs] if use_safetensors and (not allow_pickle) and (not is_safetensors_compatible(model_filenames, passed_components=passed_components, folder_names=model_folder_names)): raise EnvironmentError(f'Could not find the necessary `safetensors` weights in {model_filenames} (variant={variant})') if from_flax: ignore_patterns = ['*.bin', '*.safetensors', '*.onnx', '*.pb'] elif use_safetensors and is_safetensors_compatible(model_filenames, passed_components=passed_components, folder_names=model_folder_names): ignore_patterns = ['*.bin', '*.msgpack'] use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx if not use_onnx: ignore_patterns += ['*.onnx', '*.pb'] safetensors_variant_filenames = {f for f in variant_filenames if f.endswith('.safetensors')} safetensors_model_filenames = {f for f in model_filenames if f.endswith('.safetensors')} if len(safetensors_variant_filenames) > 0 and safetensors_model_filenames != safetensors_variant_filenames: logger.warning(f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(safetensors_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(safetensors_model_filenames - safetensors_variant_filenames)}\nIf this behavior is not expected, please check your folder structure.") else: ignore_patterns = ['*.safetensors', '*.msgpack'] use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx if not use_onnx: ignore_patterns += ['*.onnx', '*.pb'] bin_variant_filenames = {f for f in variant_filenames if f.endswith('.bin')} bin_model_filenames = {f for f in model_filenames if f.endswith('.bin')} if len(bin_variant_filenames) > 0 and bin_model_filenames != bin_variant_filenames: logger.warning(f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(bin_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(bin_model_filenames - bin_variant_filenames)}\nIf this behavior is not expected, please check your folder structure.") allow_patterns = [p for p in allow_patterns if not (len(p.split('/')) == 2 and p.split('/')[0] in passed_components)] if pipeline_class._load_connected_pipes: allow_patterns.append('README.md') ignore_patterns = ignore_patterns + [f'{i}.index.*json' for i in ignore_patterns] re_ignore_pattern = [re.compile(fnmatch.translate(p)) for p in ignore_patterns] re_allow_pattern = [re.compile(fnmatch.translate(p)) for p in allow_patterns] expected_files = [f for f in filenames if not any((p.match(f) for p in re_ignore_pattern))] expected_files = [f for f in expected_files if any((p.match(f) for p in re_allow_pattern))] snapshot_folder = Path(config_file).parent pipeline_is_cached = all(((snapshot_folder / f).is_file() for f in expected_files)) if pipeline_is_cached and (not force_download): return snapshot_folder user_agent = {'pipeline_class': cls.__name__} if custom_pipeline is not None and (not custom_pipeline.endswith('.py')): user_agent['custom_pipeline'] = custom_pipeline try: cached_folder = snapshot_download(pretrained_model_name, cache_dir=cache_dir, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, user_agent=user_agent) cls_name = cls.load_config(os.path.join(cached_folder, 'model_index.json')).get('_class_name', None) cls_name = cls_name[4:] if isinstance(cls_name, str) and cls_name.startswith('Flax') else cls_name diffusers_module = importlib.import_module(__name__.split('.')[0]) pipeline_class = getattr(diffusers_module, cls_name, None) if isinstance(cls_name, str) else None if pipeline_class is not None and pipeline_class._load_connected_pipes: modelcard = ModelCard.load(os.path.join(cached_folder, 'README.md')) connected_pipes = sum([getattr(modelcard.data, k, []) for k in CONNECTED_PIPES_KEYS], []) for connected_pipe_repo_id in connected_pipes: download_kwargs = {'cache_dir': cache_dir, 'force_download': force_download, 'proxies': proxies, 'local_files_only': local_files_only, 'token': token, 'variant': variant, 'use_safetensors': use_safetensors} DiffusionPipeline.download(connected_pipe_repo_id, **download_kwargs) return cached_folder except FileNotFoundError: if model_info_call_error is None: raise else: raise EnvironmentError(f'Cannot load model {pretrained_model_name}: model is not cached locally and an error occurred while trying to fetch metadata from the Hub. Please check out the root cause in the stacktrace above.') from model_info_call_error @classmethod def _get_signature_keys(cls, obj): parameters = inspect.signature(obj.__init__).parameters required_parameters = {k: v for (k, v) in parameters.items() if v.default == inspect._empty} optional_parameters = set({k for (k, v) in parameters.items() if v.default != inspect._empty}) expected_modules = set(required_parameters.keys()) - {'self'} optional_names = list(optional_parameters) for name in optional_names: if name in cls._optional_components: expected_modules.add(name) optional_parameters.remove(name) return (expected_modules, optional_parameters) @classmethod def _get_signature_types(cls): signature_types = {} for (k, v) in inspect.signature(cls.__init__).parameters.items(): if inspect.isclass(v.annotation): signature_types[k] = (v.annotation,) elif get_origin(v.annotation) == Union: signature_types[k] = get_args(v.annotation) else: logger.warning(f'cannot get type annotation for Parameter {k} of {cls}.') return signature_types @property def components(self) -> Dict[str, Any]: (expected_modules, optional_parameters) = self._get_signature_keys(self) components = {k: getattr(self, k) for k in self.config.keys() if not k.startswith('_') and k not in optional_parameters} if set(components.keys()) != expected_modules: raise ValueError(f'{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected {expected_modules} to be defined, but {components.keys()} are defined.') return components @staticmethod def numpy_to_pil(images): return numpy_to_pil(images) def progress_bar(self, iterable=None, total=None): if not hasattr(self, '_progress_bar_config'): self._progress_bar_config = {} elif not isinstance(self._progress_bar_config, dict): raise ValueError(f'`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}.') if iterable is not None: return tqdm(iterable, **self._progress_bar_config) elif total is not None: return tqdm(total=total, **self._progress_bar_config) else: raise ValueError('Either `total` or `iterable` has to be defined.') def set_progress_bar_config(self, **kwargs): self._progress_bar_config = kwargs def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None): self.set_use_memory_efficient_attention_xformers(True, attention_op) def disable_xformers_memory_efficient_attention(self): self.set_use_memory_efficient_attention_xformers(False) def set_use_memory_efficient_attention_xformers(self, valid: bool, attention_op: Optional[Callable]=None) -> None: def fn_recursive_set_mem_eff(module: torch.nn.Module): if hasattr(module, 'set_use_memory_efficient_attention_xformers'): module.set_use_memory_efficient_attention_xformers(valid, attention_op) for child in module.children(): fn_recursive_set_mem_eff(child) (module_names, _) = self._get_signature_keys(self) modules = [getattr(self, n, None) for n in module_names] modules = [m for m in modules if isinstance(m, torch.nn.Module)] for module in modules: fn_recursive_set_mem_eff(module) def enable_attention_slicing(self, slice_size: Optional[Union[str, int]]='auto'): self.set_attention_slice(slice_size) def disable_attention_slicing(self): self.enable_attention_slicing(None) def set_attention_slice(self, slice_size: Optional[int]): (module_names, _) = self._get_signature_keys(self) modules = [getattr(self, n, None) for n in module_names] modules = [m for m in modules if isinstance(m, torch.nn.Module) and hasattr(m, 'set_attention_slice')] for module in modules: module.set_attention_slice(slice_size) @classmethod def from_pipe(cls, pipeline, **kwargs): original_config = dict(pipeline.config) torch_dtype = kwargs.pop('torch_dtype', None) custom_pipeline = kwargs.pop('custom_pipeline', None) custom_revision = kwargs.pop('custom_revision', None) if custom_pipeline is not None: pipeline_class = _get_custom_pipeline_class(custom_pipeline, revision=custom_revision) else: pipeline_class = cls (expected_modules, optional_kwargs) = cls._get_signature_keys(pipeline_class) parameters = inspect.signature(cls.__init__).parameters true_optional_modules = set({k for (k, v) in parameters.items() if v.default != inspect._empty and k in expected_modules}) component_types = pipeline_class._get_signature_types() pretrained_model_name_or_path = original_config.pop('_name_or_path', None) passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} original_class_obj = {} for (name, component) in pipeline.components.items(): if name in expected_modules and name not in passed_class_obj: if not isinstance(component, ModelMixin) or type(component) in component_types[name] or (component is None and name in cls._optional_components): original_class_obj[name] = component else: logger.warning(f'component {name} is not switched over to new pipeline because type does not match the expected. {name} is {type(component)} while the new pipeline expect {component_types[name]}. please pass the component of the correct type to the new pipeline. `from_pipe(..., {name}={name})`') passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} original_pipe_kwargs = {k: original_config[k] for k in original_config.keys() if k in optional_kwargs and k not in passed_pipe_kwargs} additional_pipe_kwargs = [k[1:] for k in original_config.keys() if k.startswith('_') and k[1:] in optional_kwargs and (k[1:] not in passed_pipe_kwargs)] for k in additional_pipe_kwargs: original_pipe_kwargs[k] = original_config.pop(f'_{k}') pipeline_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs, **kwargs} unused_original_config = {f"{('' if k.startswith('_') else '_')}{k}": v for (k, v) in original_config.items() if k not in pipeline_kwargs} missing_modules = set(expected_modules) - set(pipeline._optional_components) - set(pipeline_kwargs.keys()) - set(true_optional_modules) if len(missing_modules) > 0: raise ValueError(f'Pipeline {pipeline_class} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed') new_pipeline = pipeline_class(**pipeline_kwargs) if pretrained_model_name_or_path is not None: new_pipeline.register_to_config(_name_or_path=pretrained_model_name_or_path) new_pipeline.register_to_config(**unused_original_config) if torch_dtype is not None: new_pipeline.to(dtype=torch_dtype) return new_pipeline class StableDiffusionMixin: def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_vae_tiling(self): self.vae.enable_tiling() def disable_vae_tiling(self): self.vae.disable_tiling() def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): if not hasattr(self, 'unet'): raise ValueError('The pipeline must have `unet` for using FreeU.') self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2) def disable_freeu(self): self.unet.disable_freeu() def fuse_qkv_projections(self, unet: bool=True, vae: bool=True): self.fusing_unet = False self.fusing_vae = False if unet: self.fusing_unet = True self.unet.fuse_qkv_projections() self.unet.set_attn_processor(FusedAttnProcessor2_0()) if vae: if not isinstance(self.vae, AutoencoderKL): raise ValueError('`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.') self.fusing_vae = True self.vae.fuse_qkv_projections() self.vae.set_attn_processor(FusedAttnProcessor2_0()) def unfuse_qkv_projections(self, unet: bool=True, vae: bool=True): if unet: if not self.fusing_unet: logger.warning('The UNet was not initially fused for QKV projections. Doing nothing.') else: self.unet.unfuse_qkv_projections() self.fusing_unet = False if vae: if not self.fusing_vae: logger.warning('The VAE was not initially fused for QKV projections. Doing nothing.') else: self.vae.unfuse_qkv_projections() self.fusing_vae = False # File: diffusers-main/src/diffusers/pipelines/pixart_alpha/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_pixart_alpha'] = ['PixArtAlphaPipeline'] _import_structure['pipeline_pixart_sigma'] = ['PixArtSigmaPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_pixart_alpha import ASPECT_RATIO_256_BIN, ASPECT_RATIO_512_BIN, ASPECT_RATIO_1024_BIN, PixArtAlphaPipeline from .pipeline_pixart_sigma import ASPECT_RATIO_2048_BIN, PixArtSigmaPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py import html import inspect import re import urllib.parse as ul from typing import Callable, List, Optional, Tuple, Union import torch from transformers import T5EncoderModel, T5Tokenizer from ...image_processor import PixArtImageProcessor from ...models import AutoencoderKL, PixArtTransformer2DModel from ...schedulers import DPMSolverMultistepScheduler from ...utils import BACKENDS_MAPPING, deprecate, is_bs4_available, is_ftfy_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import PixArtAlphaPipeline\n\n >>> # You can replace the checkpoint id with "PixArt-alpha/PixArt-XL-2-512x512" too.\n >>> pipe = PixArtAlphaPipeline.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", torch_dtype=torch.float16)\n >>> # Enable memory optimizations.\n >>> pipe.enable_model_cpu_offload()\n\n >>> prompt = "A small cactus with a happy face in the Sahara desert."\n >>> image = pipe(prompt).images[0]\n ```\n' ASPECT_RATIO_1024_BIN = {'0.25': [512.0, 2048.0], '0.28': [512.0, 1856.0], '0.32': [576.0, 1792.0], '0.33': [576.0, 1728.0], '0.35': [576.0, 1664.0], '0.4': [640.0, 1600.0], '0.42': [640.0, 1536.0], '0.48': [704.0, 1472.0], '0.5': [704.0, 1408.0], '0.52': [704.0, 1344.0], '0.57': [768.0, 1344.0], '0.6': [768.0, 1280.0], '0.68': [832.0, 1216.0], '0.72': [832.0, 1152.0], '0.78': [896.0, 1152.0], '0.82': [896.0, 1088.0], '0.88': [960.0, 1088.0], '0.94': [960.0, 1024.0], '1.0': [1024.0, 1024.0], '1.07': [1024.0, 960.0], '1.13': [1088.0, 960.0], '1.21': [1088.0, 896.0], '1.29': [1152.0, 896.0], '1.38': [1152.0, 832.0], '1.46': [1216.0, 832.0], '1.67': [1280.0, 768.0], '1.75': [1344.0, 768.0], '2.0': [1408.0, 704.0], '2.09': [1472.0, 704.0], '2.4': [1536.0, 640.0], '2.5': [1600.0, 640.0], '3.0': [1728.0, 576.0], '4.0': [2048.0, 512.0]} ASPECT_RATIO_512_BIN = {'0.25': [256.0, 1024.0], '0.28': [256.0, 928.0], '0.32': [288.0, 896.0], '0.33': [288.0, 864.0], '0.35': [288.0, 832.0], '0.4': [320.0, 800.0], '0.42': [320.0, 768.0], '0.48': [352.0, 736.0], '0.5': [352.0, 704.0], '0.52': [352.0, 672.0], '0.57': [384.0, 672.0], '0.6': [384.0, 640.0], '0.68': [416.0, 608.0], '0.72': [416.0, 576.0], '0.78': [448.0, 576.0], '0.82': [448.0, 544.0], '0.88': [480.0, 544.0], '0.94': [480.0, 512.0], '1.0': [512.0, 512.0], '1.07': [512.0, 480.0], '1.13': [544.0, 480.0], '1.21': [544.0, 448.0], '1.29': [576.0, 448.0], '1.38': [576.0, 416.0], '1.46': [608.0, 416.0], '1.67': [640.0, 384.0], '1.75': [672.0, 384.0], '2.0': [704.0, 352.0], '2.09': [736.0, 352.0], '2.4': [768.0, 320.0], '2.5': [800.0, 320.0], '3.0': [864.0, 288.0], '4.0': [1024.0, 256.0]} ASPECT_RATIO_256_BIN = {'0.25': [128.0, 512.0], '0.28': [128.0, 464.0], '0.32': [144.0, 448.0], '0.33': [144.0, 432.0], '0.35': [144.0, 416.0], '0.4': [160.0, 400.0], '0.42': [160.0, 384.0], '0.48': [176.0, 368.0], '0.5': [176.0, 352.0], '0.52': [176.0, 336.0], '0.57': [192.0, 336.0], '0.6': [192.0, 320.0], '0.68': [208.0, 304.0], '0.72': [208.0, 288.0], '0.78': [224.0, 288.0], '0.82': [224.0, 272.0], '0.88': [240.0, 272.0], '0.94': [240.0, 256.0], '1.0': [256.0, 256.0], '1.07': [256.0, 240.0], '1.13': [272.0, 240.0], '1.21': [272.0, 224.0], '1.29': [288.0, 224.0], '1.38': [288.0, 208.0], '1.46': [304.0, 208.0], '1.67': [320.0, 192.0], '1.75': [336.0, 192.0], '2.0': [352.0, 176.0], '2.09': [368.0, 176.0], '2.4': [384.0, 160.0], '2.5': [400.0, 160.0], '3.0': [432.0, 144.0], '4.0': [512.0, 128.0]} def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class PixArtAlphaPipeline(DiffusionPipeline): bad_punct_regex = re.compile('[' + '#®•©™&@·º½¾¿¡§~' + '\\)' + '\\(' + '\\]' + '\\[' + '\\}' + '\\{' + '\\|' + '\\' + '\\/' + '\\*' + ']{1,}') _optional_components = ['tokenizer', 'text_encoder'] model_cpu_offload_seq = 'text_encoder->transformer->vae' def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, vae: AutoencoderKL, transformer: PixArtTransformer2DModel, scheduler: DPMSolverMultistepScheduler): super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = PixArtImageProcessor(vae_scale_factor=self.vae_scale_factor) def encode_prompt(self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool=True, negative_prompt: str='', num_images_per_prompt: int=1, device: Optional[torch.device]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, clean_caption: bool=False, max_sequence_length: int=120, **kwargs): if 'mask_feature' in kwargs: deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." deprecate('mask_feature', '1.0.0', deprecation_message, standard_warn=False) if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = max_sequence_length if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because T5 can only handle sequences up to {max_length} tokens: {removed_text}') prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(bs_embed, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') negative_prompt_attention_mask = uncond_input.attention_mask negative_prompt_attention_mask = negative_prompt_attention_mask.to(device) negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) else: negative_prompt_embeds = None negative_prompt_attention_mask = None return (prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, negative_prompt, callback_steps, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError('Must provide `prompt_attention_mask` when specifying `prompt_embeds`.') if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError('Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError(f'`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask` {negative_prompt_attention_mask.shape}.') def _text_preprocessing(self, text, clean_caption=False): if clean_caption and (not is_bs4_available()): logger.warning(BACKENDS_MAPPING['bs4'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if clean_caption and (not is_ftfy_available()): logger.warning(BACKENDS_MAPPING['ftfy'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub('', 'person', caption) caption = re.sub('\\b((?:https?:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = re.sub('\\b((?:www:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = BeautifulSoup(caption, features='html.parser').text caption = re.sub('@[\\w\\d]+\\b', '', caption) caption = re.sub('[\\u31c0-\\u31ef]+', '', caption) caption = re.sub('[\\u31f0-\\u31ff]+', '', caption) caption = re.sub('[\\u3200-\\u32ff]+', '', caption) caption = re.sub('[\\u3300-\\u33ff]+', '', caption) caption = re.sub('[\\u3400-\\u4dbf]+', '', caption) caption = re.sub('[\\u4dc0-\\u4dff]+', '', caption) caption = re.sub('[\\u4e00-\\u9fff]+', '', caption) caption = re.sub('[\\u002D\\u058A\\u05BE\\u1400\\u1806\\u2010-\\u2015\\u2E17\\u2E1A\\u2E3A\\u2E3B\\u2E40\\u301C\\u3030\\u30A0\\uFE31\\uFE32\\uFE58\\uFE63\\uFF0D]+', '-', caption) caption = re.sub('[`´«»“”¨]', '"', caption) caption = re.sub('[‘’]', "'", caption) caption = re.sub('"?', '', caption) caption = re.sub('&', '', caption) caption = re.sub('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', ' ', caption) caption = re.sub('\\d:\\d\\d\\s+$', '', caption) caption = re.sub('\\\\n', ' ', caption) caption = re.sub('#\\d{1,3}\\b', '', caption) caption = re.sub('#\\d{5,}\\b', '', caption) caption = re.sub('\\b\\d{6,}\\b', '', caption) caption = re.sub('[\\S]+\\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption) caption = re.sub('[\\"\\\']{2,}', '"', caption) caption = re.sub('[\\.]{2,}', ' ', caption) caption = re.sub(self.bad_punct_regex, ' ', caption) caption = re.sub('\\s+\\.\\s+', ' ', caption) regex2 = re.compile('(?:\\-|\\_)') if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, ' ', caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub('\\b[a-zA-Z]{1,3}\\d{3,15}\\b', '', caption) caption = re.sub('\\b[a-zA-Z]+\\d+[a-zA-Z]+\\b', '', caption) caption = re.sub('\\b\\d+[a-zA-Z]+\\d+\\b', '', caption) caption = re.sub('(worldwide\\s+)?(free\\s+)?shipping', '', caption) caption = re.sub('(free\\s)?download(\\sfree)?', '', caption) caption = re.sub('\\bclick\\b\\s(?:for|on)\\s\\w+', '', caption) caption = re.sub('\\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\\simage[s]?)?', '', caption) caption = re.sub('\\bpage\\s+\\d+\\b', '', caption) caption = re.sub('\\b\\d*[a-zA-Z]+\\d+[a-zA-Z]+\\d+[a-zA-Z\\d]*\\b', ' ', caption) caption = re.sub('\\b\\d+\\.?\\d*[xх×]\\d+\\.?\\d*\\b', '', caption) caption = re.sub('\\b\\s+\\:\\s+', ': ', caption) caption = re.sub('(\\D[,\\./])\\b', '\\1 ', caption) caption = re.sub('\\s+', ' ', caption) caption.strip() caption = re.sub('^[\\"\\\']([\\w\\W]+)[\\"\\\']$', '\\1', caption) caption = re.sub("^[\\'\\_,\\-\\:;]", '', caption) caption = re.sub("[\\'\\_,\\-\\:\\-\\+]$", '', caption) caption = re.sub('^\\.\\S+$', '', caption) return caption.strip() def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, negative_prompt: str='', num_inference_steps: int=20, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: float=4.5, num_images_per_prompt: Optional[int]=1, height: Optional[int]=None, width: Optional[int]=None, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, clean_caption: bool=True, use_resolution_binning: bool=True, max_sequence_length: int=120, **kwargs) -> Union[ImagePipelineOutput, Tuple]: if 'mask_feature' in kwargs: deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." deprecate('mask_feature', '1.0.0', deprecation_message, standard_warn=False) height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor if use_resolution_binning: if self.transformer.config.sample_size == 128: aspect_ratio_bin = ASPECT_RATIO_1024_BIN elif self.transformer.config.sample_size == 64: aspect_ratio_bin = ASPECT_RATIO_512_BIN elif self.transformer.config.sample_size == 32: aspect_ratio_bin = ASPECT_RATIO_256_BIN else: raise ValueError('Invalid sample size') (orig_height, orig_width) = (height, width) (height, width) = self.image_processor.classify_height_width_bin(height, width, ratios=aspect_ratio_bin) self.check_inputs(prompt, height, width, negative_prompt, callback_steps, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask) = self.encode_prompt(prompt, do_classifier_free_guidance, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, clean_caption=clean_caption, max_sequence_length=max_sequence_length) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) latent_channels = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, latent_channels, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'resolution': None, 'aspect_ratio': None} if self.transformer.config.sample_size == 128: resolution = torch.tensor([height, width]).repeat(batch_size * num_images_per_prompt, 1) aspect_ratio = torch.tensor([float(height / width)]).repeat(batch_size * num_images_per_prompt, 1) resolution = resolution.to(dtype=prompt_embeds.dtype, device=device) aspect_ratio = aspect_ratio.to(dtype=prompt_embeds.dtype, device=device) if do_classifier_free_guidance: resolution = torch.cat([resolution, resolution], dim=0) aspect_ratio = torch.cat([aspect_ratio, aspect_ratio], dim=0) added_cond_kwargs = {'resolution': resolution, 'aspect_ratio': aspect_ratio} num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) current_timestep = t if not torch.is_tensor(current_timestep): is_mps = latent_model_input.device.type == 'mps' if isinstance(current_timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) elif len(current_timestep.shape) == 0: current_timestep = current_timestep[None].to(latent_model_input.device) current_timestep = current_timestep.expand(latent_model_input.shape[0]) noise_pred = self.transformer(latent_model_input, encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, timestep=current_timestep, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.transformer.config.out_channels // 2 == latent_channels: noise_pred = noise_pred.chunk(2, dim=1)[0] else: noise_pred = noise_pred if num_inference_steps == 1: latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).pred_original_sample else: latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] if use_resolution_binning: image = self.image_processor.resize_and_crop_tensor(image, orig_width, orig_height) else: image = latents if not output_type == 'latent': image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_sigma.py import html import inspect import re import urllib.parse as ul from typing import Callable, List, Optional, Tuple, Union import torch from transformers import T5EncoderModel, T5Tokenizer from ...image_processor import PixArtImageProcessor from ...models import AutoencoderKL, PixArtTransformer2DModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import BACKENDS_MAPPING, deprecate, is_bs4_available, is_ftfy_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .pipeline_pixart_alpha import ASPECT_RATIO_256_BIN, ASPECT_RATIO_512_BIN, ASPECT_RATIO_1024_BIN logger = logging.get_logger(__name__) if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy ASPECT_RATIO_2048_BIN = {'0.25': [1024.0, 4096.0], '0.26': [1024.0, 3968.0], '0.27': [1024.0, 3840.0], '0.28': [1024.0, 3712.0], '0.32': [1152.0, 3584.0], '0.33': [1152.0, 3456.0], '0.35': [1152.0, 3328.0], '0.4': [1280.0, 3200.0], '0.42': [1280.0, 3072.0], '0.48': [1408.0, 2944.0], '0.5': [1408.0, 2816.0], '0.52': [1408.0, 2688.0], '0.57': [1536.0, 2688.0], '0.6': [1536.0, 2560.0], '0.68': [1664.0, 2432.0], '0.72': [1664.0, 2304.0], '0.78': [1792.0, 2304.0], '0.82': [1792.0, 2176.0], '0.88': [1920.0, 2176.0], '0.94': [1920.0, 2048.0], '1.0': [2048.0, 2048.0], '1.07': [2048.0, 1920.0], '1.13': [2176.0, 1920.0], '1.21': [2176.0, 1792.0], '1.29': [2304.0, 1792.0], '1.38': [2304.0, 1664.0], '1.46': [2432.0, 1664.0], '1.67': [2560.0, 1536.0], '1.75': [2688.0, 1536.0], '2.0': [2816.0, 1408.0], '2.09': [2944.0, 1408.0], '2.4': [3072.0, 1280.0], '2.5': [3200.0, 1280.0], '2.89': [3328.0, 1152.0], '3.0': [3456.0, 1152.0], '3.11': [3584.0, 1152.0], '3.62': [3712.0, 1024.0], '3.75': [3840.0, 1024.0], '3.88': [3968.0, 1024.0], '4.0': [4096.0, 1024.0]} EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import PixArtSigmaPipeline\n\n >>> # You can replace the checkpoint id with "PixArt-alpha/PixArt-Sigma-XL-2-512-MS" too.\n >>> pipe = PixArtSigmaPipeline.from_pretrained(\n ... "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS", torch_dtype=torch.float16\n ... )\n >>> # Enable memory optimizations.\n >>> # pipe.enable_model_cpu_offload()\n\n >>> prompt = "A small cactus with a happy face in the Sahara desert."\n >>> image = pipe(prompt).images[0]\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class PixArtSigmaPipeline(DiffusionPipeline): bad_punct_regex = re.compile('[' + '#®•©™&@·º½¾¿¡§~' + '\\)' + '\\(' + '\\]' + '\\[' + '\\}' + '\\{' + '\\|' + '\\' + '\\/' + '\\*' + ']{1,}') _optional_components = ['tokenizer', 'text_encoder'] model_cpu_offload_seq = 'text_encoder->transformer->vae' def __init__(self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, vae: AutoencoderKL, transformer: PixArtTransformer2DModel, scheduler: KarrasDiffusionSchedulers): super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = PixArtImageProcessor(vae_scale_factor=self.vae_scale_factor) def encode_prompt(self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool=True, negative_prompt: str='', num_images_per_prompt: int=1, device: Optional[torch.device]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, clean_caption: bool=False, max_sequence_length: int=300, **kwargs): if 'mask_feature' in kwargs: deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." deprecate('mask_feature', '1.0.0', deprecation_message, standard_warn=False) if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = max_sequence_length if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=max_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because T5 can only handle sequences up to {max_length} tokens: {removed_text}') prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(bs_embed, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors='pt') negative_prompt_attention_mask = uncond_input.attention_mask negative_prompt_attention_mask = negative_prompt_attention_mask.to(device) negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) else: negative_prompt_embeds = None negative_prompt_attention_mask = None return (prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, negative_prompt, callback_steps, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError('Must provide `prompt_attention_mask` when specifying `prompt_embeds`.') if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError('Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError(f'`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask` {negative_prompt_attention_mask.shape}.') def _text_preprocessing(self, text, clean_caption=False): if clean_caption and (not is_bs4_available()): logger.warning(BACKENDS_MAPPING['bs4'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if clean_caption and (not is_ftfy_available()): logger.warning(BACKENDS_MAPPING['ftfy'][-1].format('Setting `clean_caption=True`')) logger.warning('Setting `clean_caption` to False...') clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub('', 'person', caption) caption = re.sub('\\b((?:https?:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = re.sub('\\b((?:www:(?:\\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\\w/-]*\\b\\/?(?!@)))', '', caption) caption = BeautifulSoup(caption, features='html.parser').text caption = re.sub('@[\\w\\d]+\\b', '', caption) caption = re.sub('[\\u31c0-\\u31ef]+', '', caption) caption = re.sub('[\\u31f0-\\u31ff]+', '', caption) caption = re.sub('[\\u3200-\\u32ff]+', '', caption) caption = re.sub('[\\u3300-\\u33ff]+', '', caption) caption = re.sub('[\\u3400-\\u4dbf]+', '', caption) caption = re.sub('[\\u4dc0-\\u4dff]+', '', caption) caption = re.sub('[\\u4e00-\\u9fff]+', '', caption) caption = re.sub('[\\u002D\\u058A\\u05BE\\u1400\\u1806\\u2010-\\u2015\\u2E17\\u2E1A\\u2E3A\\u2E3B\\u2E40\\u301C\\u3030\\u30A0\\uFE31\\uFE32\\uFE58\\uFE63\\uFF0D]+', '-', caption) caption = re.sub('[`´«»“”¨]', '"', caption) caption = re.sub('[‘’]', "'", caption) caption = re.sub('"?', '', caption) caption = re.sub('&', '', caption) caption = re.sub('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', ' ', caption) caption = re.sub('\\d:\\d\\d\\s+$', '', caption) caption = re.sub('\\\\n', ' ', caption) caption = re.sub('#\\d{1,3}\\b', '', caption) caption = re.sub('#\\d{5,}\\b', '', caption) caption = re.sub('\\b\\d{6,}\\b', '', caption) caption = re.sub('[\\S]+\\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption) caption = re.sub('[\\"\\\']{2,}', '"', caption) caption = re.sub('[\\.]{2,}', ' ', caption) caption = re.sub(self.bad_punct_regex, ' ', caption) caption = re.sub('\\s+\\.\\s+', ' ', caption) regex2 = re.compile('(?:\\-|\\_)') if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, ' ', caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub('\\b[a-zA-Z]{1,3}\\d{3,15}\\b', '', caption) caption = re.sub('\\b[a-zA-Z]+\\d+[a-zA-Z]+\\b', '', caption) caption = re.sub('\\b\\d+[a-zA-Z]+\\d+\\b', '', caption) caption = re.sub('(worldwide\\s+)?(free\\s+)?shipping', '', caption) caption = re.sub('(free\\s)?download(\\sfree)?', '', caption) caption = re.sub('\\bclick\\b\\s(?:for|on)\\s\\w+', '', caption) caption = re.sub('\\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\\simage[s]?)?', '', caption) caption = re.sub('\\bpage\\s+\\d+\\b', '', caption) caption = re.sub('\\b\\d*[a-zA-Z]+\\d+[a-zA-Z]+\\d+[a-zA-Z\\d]*\\b', ' ', caption) caption = re.sub('\\b\\d+\\.?\\d*[xх×]\\d+\\.?\\d*\\b', '', caption) caption = re.sub('\\b\\s+\\:\\s+', ': ', caption) caption = re.sub('(\\D[,\\./])\\b', '\\1 ', caption) caption = re.sub('\\s+', ' ', caption) caption.strip() caption = re.sub('^[\\"\\\']([\\w\\W]+)[\\"\\\']$', '\\1', caption) caption = re.sub("^[\\'\\_,\\-\\:;]", '', caption) caption = re.sub("[\\'\\_,\\-\\:\\-\\+]$", '', caption) caption = re.sub('^\\.\\S+$', '', caption) return caption.strip() def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, negative_prompt: str='', num_inference_steps: int=20, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: float=4.5, num_images_per_prompt: Optional[int]=1, height: Optional[int]=None, width: Optional[int]=None, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, prompt_attention_mask: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_attention_mask: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, clean_caption: bool=True, use_resolution_binning: bool=True, max_sequence_length: int=300, **kwargs) -> Union[ImagePipelineOutput, Tuple]: height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor if use_resolution_binning: if self.transformer.config.sample_size == 256: aspect_ratio_bin = ASPECT_RATIO_2048_BIN elif self.transformer.config.sample_size == 128: aspect_ratio_bin = ASPECT_RATIO_1024_BIN elif self.transformer.config.sample_size == 64: aspect_ratio_bin = ASPECT_RATIO_512_BIN elif self.transformer.config.sample_size == 32: aspect_ratio_bin = ASPECT_RATIO_256_BIN else: raise ValueError('Invalid sample size') (orig_height, orig_width) = (height, width) (height, width) = self.image_processor.classify_height_width_bin(height, width, ratios=aspect_ratio_bin) self.check_inputs(prompt, height, width, negative_prompt, callback_steps, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask) = self.encode_prompt(prompt, do_classifier_free_guidance, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, clean_caption=clean_caption, max_sequence_length=max_sequence_length) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) latent_channels = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, latent_channels, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'resolution': None, 'aspect_ratio': None} num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) current_timestep = t if not torch.is_tensor(current_timestep): is_mps = latent_model_input.device.type == 'mps' if isinstance(current_timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) elif len(current_timestep.shape) == 0: current_timestep = current_timestep[None].to(latent_model_input.device) current_timestep = current_timestep.expand(latent_model_input.shape[0]) noise_pred = self.transformer(latent_model_input, encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, timestep=current_timestep, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.transformer.config.out_channels // 2 == latent_channels: noise_pred = noise_pred.chunk(2, dim=1)[0] else: noise_pred = noise_pred latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] if use_resolution_binning: image = self.image_processor.resize_and_crop_tensor(image, orig_width, orig_height) else: image = latents if not output_type == 'latent': image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_output'] = ['SemanticStableDiffusionPipelineOutput'] _import_structure['pipeline_semantic_stable_diffusion'] = ['SemanticStableDiffusionPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_output.py from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image from ...utils import BaseOutput @dataclass class SemanticStableDiffusionPipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] # File: diffusers-main/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py import inspect from itertools import repeat from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, UNet2DConditionModel from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...schedulers import KarrasDiffusionSchedulers from ...utils import deprecate, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import SemanticStableDiffusionPipelineOutput logger = logging.get_logger(__name__) class SemanticStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__(self, prompt: Union[str, List[str]], height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: int=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, editing_prompt: Optional[Union[str, List[str]]]=None, editing_prompt_embeddings: Optional[torch.Tensor]=None, reverse_editing_direction: Optional[Union[bool, List[bool]]]=False, edit_guidance_scale: Optional[Union[float, List[float]]]=5, edit_warmup_steps: Optional[Union[int, List[int]]]=10, edit_cooldown_steps: Optional[Union[int, List[int]]]=None, edit_threshold: Optional[Union[float, List[float]]]=0.9, edit_momentum_scale: Optional[float]=0.1, edit_mom_beta: Optional[float]=0.4, edit_weights: Optional[List[float]]=None, sem_guidance: Optional[List[torch.Tensor]]=None): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps) batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device if editing_prompt: enable_edit_guidance = True if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] enabled_editing_prompts = len(editing_prompt) elif editing_prompt_embeddings is not None: enable_edit_guidance = True enabled_editing_prompts = editing_prompt_embeddings.shape[0] else: enabled_editing_prompts = 0 enable_edit_guidance = False text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt') text_input_ids = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length:]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids[:, :self.tokenizer.model_max_length] text_embeddings = self.text_encoder(text_input_ids.to(device))[0] (bs_embed, seq_len, _) = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if enable_edit_guidance: if editing_prompt_embeddings is None: edit_concepts_input = self.tokenizer([x for item in editing_prompt for x in repeat(item, batch_size)], padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt') edit_concepts_input_ids = edit_concepts_input.input_ids if edit_concepts_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(edit_concepts_input_ids[:, self.tokenizer.model_max_length:]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') edit_concepts_input_ids = edit_concepts_input_ids[:, :self.tokenizer.model_max_length] edit_concepts = self.text_encoder(edit_concepts_input_ids.to(device))[0] else: edit_concepts = editing_prompt_embeddings.to(device).repeat(batch_size, 1, 1) (bs_embed_edit, seq_len_edit, _) = edit_concepts.shape edit_concepts = edit_concepts.repeat(1, num_images_per_prompt, 1) edit_concepts = edit_concepts.view(bs_embed_edit * num_images_per_prompt, seq_len_edit, -1) do_classifier_free_guidance = guidance_scale > 1.0 if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(device))[0] seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) if enable_edit_guidance: text_embeddings = torch.cat([uncond_embeddings, text_embeddings, edit_concepts]) else: text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, text_embeddings.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) edit_momentum = None self.uncond_estimates = None self.text_estimates = None self.edit_estimates = None self.sem_guidance = None for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * (2 + enabled_editing_prompts)) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample if do_classifier_free_guidance: noise_pred_out = noise_pred.chunk(2 + enabled_editing_prompts) (noise_pred_uncond, noise_pred_text) = (noise_pred_out[0], noise_pred_out[1]) noise_pred_edit_concepts = noise_pred_out[2:] noise_guidance = guidance_scale * (noise_pred_text - noise_pred_uncond) if self.uncond_estimates is None: self.uncond_estimates = torch.zeros((num_inference_steps + 1, *noise_pred_uncond.shape)) self.uncond_estimates[i] = noise_pred_uncond.detach().cpu() if self.text_estimates is None: self.text_estimates = torch.zeros((num_inference_steps + 1, *noise_pred_text.shape)) self.text_estimates[i] = noise_pred_text.detach().cpu() if self.edit_estimates is None and enable_edit_guidance: self.edit_estimates = torch.zeros((num_inference_steps + 1, len(noise_pred_edit_concepts), *noise_pred_edit_concepts[0].shape)) if self.sem_guidance is None: self.sem_guidance = torch.zeros((num_inference_steps + 1, *noise_pred_text.shape)) if edit_momentum is None: edit_momentum = torch.zeros_like(noise_guidance) if enable_edit_guidance: concept_weights = torch.zeros((len(noise_pred_edit_concepts), noise_guidance.shape[0]), device=device, dtype=noise_guidance.dtype) noise_guidance_edit = torch.zeros((len(noise_pred_edit_concepts), *noise_guidance.shape), device=device, dtype=noise_guidance.dtype) warmup_inds = [] for (c, noise_pred_edit_concept) in enumerate(noise_pred_edit_concepts): self.edit_estimates[i, c] = noise_pred_edit_concept if isinstance(edit_guidance_scale, list): edit_guidance_scale_c = edit_guidance_scale[c] else: edit_guidance_scale_c = edit_guidance_scale if isinstance(edit_threshold, list): edit_threshold_c = edit_threshold[c] else: edit_threshold_c = edit_threshold if isinstance(reverse_editing_direction, list): reverse_editing_direction_c = reverse_editing_direction[c] else: reverse_editing_direction_c = reverse_editing_direction if edit_weights: edit_weight_c = edit_weights[c] else: edit_weight_c = 1.0 if isinstance(edit_warmup_steps, list): edit_warmup_steps_c = edit_warmup_steps[c] else: edit_warmup_steps_c = edit_warmup_steps if isinstance(edit_cooldown_steps, list): edit_cooldown_steps_c = edit_cooldown_steps[c] elif edit_cooldown_steps is None: edit_cooldown_steps_c = i + 1 else: edit_cooldown_steps_c = edit_cooldown_steps if i >= edit_warmup_steps_c: warmup_inds.append(c) if i >= edit_cooldown_steps_c: noise_guidance_edit[c, :, :, :, :] = torch.zeros_like(noise_pred_edit_concept) continue noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond tmp_weights = (noise_guidance - noise_pred_edit_concept).sum(dim=(1, 2, 3)) tmp_weights = torch.full_like(tmp_weights, edit_weight_c) if reverse_editing_direction_c: noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 concept_weights[c, :] = tmp_weights noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c if noise_guidance_edit_tmp.dtype == torch.float32: tmp = torch.quantile(torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False) else: tmp = torch.quantile(torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False).to(noise_guidance_edit_tmp.dtype) noise_guidance_edit_tmp = torch.where(torch.abs(noise_guidance_edit_tmp) >= tmp[:, :, None, None], noise_guidance_edit_tmp, torch.zeros_like(noise_guidance_edit_tmp)) noise_guidance_edit[c, :, :, :, :] = noise_guidance_edit_tmp warmup_inds = torch.tensor(warmup_inds).to(device) if len(noise_pred_edit_concepts) > warmup_inds.shape[0] > 0: concept_weights = concept_weights.to('cpu') noise_guidance_edit = noise_guidance_edit.to('cpu') concept_weights_tmp = torch.index_select(concept_weights.to(device), 0, warmup_inds) concept_weights_tmp = torch.where(concept_weights_tmp < 0, torch.zeros_like(concept_weights_tmp), concept_weights_tmp) concept_weights_tmp = concept_weights_tmp / concept_weights_tmp.sum(dim=0) noise_guidance_edit_tmp = torch.index_select(noise_guidance_edit.to(device), 0, warmup_inds) noise_guidance_edit_tmp = torch.einsum('cb,cbijk->bijk', concept_weights_tmp, noise_guidance_edit_tmp) noise_guidance = noise_guidance + noise_guidance_edit_tmp self.sem_guidance[i] = noise_guidance_edit_tmp.detach().cpu() del noise_guidance_edit_tmp del concept_weights_tmp concept_weights = concept_weights.to(device) noise_guidance_edit = noise_guidance_edit.to(device) concept_weights = torch.where(concept_weights < 0, torch.zeros_like(concept_weights), concept_weights) concept_weights = torch.nan_to_num(concept_weights) noise_guidance_edit = torch.einsum('cb,cbijk->bijk', concept_weights, noise_guidance_edit) noise_guidance_edit = noise_guidance_edit.to(edit_momentum.device) noise_guidance_edit = noise_guidance_edit + edit_momentum_scale * edit_momentum edit_momentum = edit_mom_beta * edit_momentum + (1 - edit_mom_beta) * noise_guidance_edit if warmup_inds.shape[0] == len(noise_pred_edit_concepts): noise_guidance = noise_guidance + noise_guidance_edit self.sem_guidance[i] = noise_guidance_edit.detach().cpu() if sem_guidance is not None: edit_guidance = sem_guidance[i].to(device) noise_guidance = noise_guidance + edit_guidance noise_pred = noise_pred_uncond + noise_guidance latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, text_embeddings.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if not return_dict: return (image, has_nsfw_concept) return SemanticStableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/shap_e/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['camera'] = ['create_pan_cameras'] _import_structure['pipeline_shap_e'] = ['ShapEPipeline'] _import_structure['pipeline_shap_e_img2img'] = ['ShapEImg2ImgPipeline'] _import_structure['renderer'] = ['BoundingBoxVolume', 'ImportanceRaySampler', 'MLPNeRFModelOutput', 'MLPNeRSTFModel', 'ShapEParamsProjModel', 'ShapERenderer', 'StratifiedRaySampler', 'VoidNeRFModel'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImg2ImgPipeline from .renderer import BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/shap_e/camera.py from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class DifferentiableProjectiveCamera: origin: torch.Tensor x: torch.Tensor y: torch.Tensor z: torch.Tensor width: int height: int x_fov: float y_fov: float shape: Tuple[int] def __post_init__(self): assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2 def resolution(self): return torch.from_numpy(np.array([self.width, self.height], dtype=np.float32)) def fov(self): return torch.from_numpy(np.array([self.x_fov, self.y_fov], dtype=np.float32)) def get_image_coords(self) -> torch.Tensor: pixel_indices = torch.arange(self.height * self.width) coords = torch.stack([pixel_indices % self.width, torch.div(pixel_indices, self.width, rounding_mode='trunc')], axis=1) return coords @property def camera_rays(self): (batch_size, *inner_shape) = self.shape inner_batch_size = int(np.prod(inner_shape)) coords = self.get_image_coords() coords = torch.broadcast_to(coords.unsqueeze(0), [batch_size * inner_batch_size, *coords.shape]) rays = self.get_camera_rays(coords) rays = rays.view(batch_size, inner_batch_size * self.height * self.width, 2, 3) return rays def get_camera_rays(self, coords: torch.Tensor) -> torch.Tensor: (batch_size, *shape, n_coords) = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] flat = coords.view(batch_size, -1, 2) res = self.resolution() fov = self.fov() fracs = flat.float() / (res - 1) * 2 - 1 fracs = fracs * torch.tan(fov / 2) fracs = fracs.view(batch_size, -1, 2) directions = self.z.view(batch_size, 1, 3) + self.x.view(batch_size, 1, 3) * fracs[:, :, :1] + self.y.view(batch_size, 1, 3) * fracs[:, :, 1:] directions = directions / directions.norm(dim=-1, keepdim=True) rays = torch.stack([torch.broadcast_to(self.origin.view(batch_size, 1, 3), [batch_size, directions.shape[1], 3]), directions], dim=2) return rays.view(batch_size, *shape, 2, 3) def resize_image(self, width: int, height: int) -> 'DifferentiableProjectiveCamera': assert width * self.height == height * self.width, 'The aspect ratio should not change.' return DifferentiableProjectiveCamera(origin=self.origin, x=self.x, y=self.y, z=self.z, width=width, height=height, x_fov=self.x_fov, y_fov=self.y_fov) def create_pan_cameras(size: int) -> DifferentiableProjectiveCamera: origins = [] xs = [] ys = [] zs = [] for theta in np.linspace(0, 2 * np.pi, num=20): z = np.array([np.sin(theta), np.cos(theta), -0.5]) z /= np.sqrt(np.sum(z ** 2)) origin = -z * 4 x = np.array([np.cos(theta), -np.sin(theta), 0.0]) y = np.cross(z, x) origins.append(origin) xs.append(x) ys.append(y) zs.append(z) return DifferentiableProjectiveCamera(origin=torch.from_numpy(np.stack(origins, axis=0)).float(), x=torch.from_numpy(np.stack(xs, axis=0)).float(), y=torch.from_numpy(np.stack(ys, axis=0)).float(), z=torch.from_numpy(np.stack(zs, axis=0)).float(), width=size, height=size, x_fov=0.7, y_fov=0.7, shape=(1, len(xs))) # File: diffusers-main/src/diffusers/pipelines/shap_e/pipeline_shap_e.py import math from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer from ...models import PriorTransformer from ...schedulers import HeunDiscreteScheduler from ...utils import BaseOutput, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .renderer import ShapERenderer logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 15.0\n >>> prompt = "a shark"\n\n >>> images = pipe(\n ... prompt,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "shark_3d.gif")\n ```\n' @dataclass class ShapEPipelineOutput(BaseOutput): images: Union[List[List[PIL.Image.Image]], List[List[np.ndarray]]] class ShapEPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->prior' _exclude_from_cpu_offload = ['shap_e_renderer'] def __init__(self, prior: PriorTransformer, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, scheduler: HeunDiscreteScheduler, shap_e_renderer: ShapERenderer): super().__init__() self.register_modules(prior=prior, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, shap_e_renderer=shap_e_renderer) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): len(prompt) if isinstance(prompt, list) else 1 self.tokenizer.pad_token_id = 0 text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) prompt_embeds = prompt_embeds / torch.linalg.norm(prompt_embeds, dim=-1, keepdim=True) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(prompt_embeds) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_embeds = math.sqrt(prompt_embeds.shape[1]) * prompt_embeds return prompt_embeds @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: str, num_images_per_prompt: int=1, num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, guidance_scale: float=4.0, frame_size: int=64, output_type: Optional[str]='pil', return_dict: bool=True): if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_embeddings = self.prior.config.num_embeddings embedding_dim = self.prior.config.embedding_dim latents = self.prepare_latents((batch_size, num_embeddings * embedding_dim), prompt_embeds.dtype, device, generator, latents, self.scheduler) latents = latents.reshape(latents.shape[0], num_embeddings, embedding_dim) for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.prior(scaled_model_input, timestep=t, proj_embedding=prompt_embeds).predicted_image_embedding (noise_pred, _) = noise_pred.split(scaled_model_input.shape[2], dim=2) if do_classifier_free_guidance: (noise_pred_uncond, noise_pred) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) latents = self.scheduler.step(noise_pred, timestep=t, sample=latents).prev_sample self.maybe_free_model_hooks() if output_type not in ['np', 'pil', 'latent', 'mesh']: raise ValueError(f'Only the output types `pil`, `np`, `latent` and `mesh` are supported not output_type={output_type}') if output_type == 'latent': return ShapEPipelineOutput(images=latents) images = [] if output_type == 'mesh': for (i, latent) in enumerate(latents): mesh = self.shap_e_renderer.decode_to_mesh(latent[None, :], device) images.append(mesh) else: for (i, latent) in enumerate(latents): image = self.shap_e_renderer.decode_to_image(latent[None, :], device, size=frame_size) images.append(image) images = torch.stack(images) images = images.cpu().numpy() if output_type == 'pil': images = [self.numpy_to_pil(image) for image in images] if not return_dict: return (images,) return ShapEPipelineOutput(images=images) # File: diffusers-main/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...schedulers import HeunDiscreteScheduler from ...utils import BaseOutput, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .renderer import ShapERenderer logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n' @dataclass class ShapEPipelineOutput(BaseOutput): images: Union[PIL.Image.Image, np.ndarray] class ShapEImg2ImgPipeline(DiffusionPipeline): model_cpu_offload_seq = 'image_encoder->prior' _exclude_from_cpu_offload = ['shap_e_renderer'] def __init__(self, prior: PriorTransformer, image_encoder: CLIPVisionModel, image_processor: CLIPImageProcessor, scheduler: HeunDiscreteScheduler, shap_e_renderer: ShapERenderer): super().__init__() self.register_modules(prior=prior, image_encoder=image_encoder, image_processor=image_processor, scheduler=scheduler, shap_e_renderer=shap_e_renderer) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): if isinstance(image, List) and isinstance(image[0], torch.Tensor): image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) if not isinstance(image, torch.Tensor): image = self.image_processor(image, return_tensors='pt').pixel_values[0].unsqueeze(0) image = image.to(dtype=self.image_encoder.dtype, device=device) image_embeds = self.image_encoder(image)['last_hidden_state'] image_embeds = image_embeds[:, 1:, :].contiguous() image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: negative_image_embeds = torch.zeros_like(image_embeds) image_embeds = torch.cat([negative_image_embeds, image_embeds]) return image_embeds @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, image: Union[PIL.Image.Image, List[PIL.Image.Image]], num_images_per_prompt: int=1, num_inference_steps: int=25, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, guidance_scale: float=4.0, frame_size: int=64, output_type: Optional[str]='pil', return_dict: bool=True): if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, torch.Tensor): batch_size = image.shape[0] elif isinstance(image, list) and isinstance(image[0], (torch.Tensor, PIL.Image.Image)): batch_size = len(image) else: raise ValueError(f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(image)}') device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 image_embeds = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_embeddings = self.prior.config.num_embeddings embedding_dim = self.prior.config.embedding_dim if latents is None: latents = self.prepare_latents((batch_size, num_embeddings * embedding_dim), image_embeds.dtype, device, generator, latents, self.scheduler) latents = latents.reshape(latents.shape[0], num_embeddings, embedding_dim) for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.prior(scaled_model_input, timestep=t, proj_embedding=image_embeds).predicted_image_embedding (noise_pred, _) = noise_pred.split(scaled_model_input.shape[2], dim=2) if do_classifier_free_guidance: (noise_pred_uncond, noise_pred) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) latents = self.scheduler.step(noise_pred, timestep=t, sample=latents).prev_sample if output_type not in ['np', 'pil', 'latent', 'mesh']: raise ValueError(f'Only the output types `pil`, `np`, `latent` and `mesh` are supported not output_type={output_type}') self.maybe_free_model_hooks() if output_type == 'latent': return ShapEPipelineOutput(images=latents) images = [] if output_type == 'mesh': for (i, latent) in enumerate(latents): mesh = self.shap_e_renderer.decode_to_mesh(latent[None, :], device) images.append(mesh) else: for (i, latent) in enumerate(latents): image = self.shap_e_renderer.decode_to_image(latent[None, :], device, size=frame_size) images.append(image) images = torch.stack(images) images = images.cpu().numpy() if output_type == 'pil': images = [self.numpy_to_pil(image) for image in images] if not return_dict: return (images,) return ShapEPipelineOutput(images=images) # File: diffusers-main/src/diffusers/pipelines/shap_e/renderer.py import math from dataclasses import dataclass from typing import Dict, Optional, Tuple import numpy as np import torch import torch.nn.functional as F from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin from ...utils import BaseOutput from .camera import create_pan_cameras def sample_pmf(pmf: torch.Tensor, n_samples: int) -> torch.Tensor: (*shape, support_size, last_dim) = pmf.shape assert last_dim == 1 cdf = torch.cumsum(pmf.view(-1, support_size), dim=1) inds = torch.searchsorted(cdf, torch.rand(cdf.shape[0], n_samples, device=cdf.device)) return inds.view(*shape, n_samples, 1).clamp(0, support_size - 1) def posenc_nerf(x: torch.Tensor, min_deg: int=0, max_deg: int=15) -> torch.Tensor: if min_deg == max_deg: return x scales = 2.0 ** torch.arange(min_deg, max_deg, dtype=x.dtype, device=x.device) (*shape, dim) = x.shape xb = (x.reshape(-1, 1, dim) * scales.view(1, -1, 1)).reshape(*shape, -1) assert xb.shape[-1] == dim * (max_deg - min_deg) emb = torch.cat([xb, xb + math.pi / 2.0], axis=-1).sin() return torch.cat([x, emb], dim=-1) def encode_position(position): return posenc_nerf(position, min_deg=0, max_deg=15) def encode_direction(position, direction=None): if direction is None: return torch.zeros_like(posenc_nerf(position, min_deg=0, max_deg=8)) else: return posenc_nerf(direction, min_deg=0, max_deg=8) def _sanitize_name(x: str) -> str: return x.replace('.', '__') def integrate_samples(volume_range, ts, density, channels): (_, _, dt) = volume_range.partition(ts) ddensity = density * dt mass = torch.cumsum(ddensity, dim=-2) transmittance = torch.exp(-mass[..., -1, :]) alphas = 1.0 - torch.exp(-ddensity) Ts = torch.exp(torch.cat([torch.zeros_like(mass[..., :1, :]), -mass[..., :-1, :]], dim=-2)) weights = alphas * Ts channels = torch.sum(channels * weights, dim=-2) return (channels, weights, transmittance) def volume_query_points(volume, grid_size): indices = torch.arange(grid_size ** 3, device=volume.bbox_min.device) zs = indices % grid_size ys = torch.div(indices, grid_size, rounding_mode='trunc') % grid_size xs = torch.div(indices, grid_size ** 2, rounding_mode='trunc') % grid_size combined = torch.stack([xs, ys, zs], dim=1) return combined.float() / (grid_size - 1) * (volume.bbox_max - volume.bbox_min) + volume.bbox_min def _convert_srgb_to_linear(u: torch.Tensor): return torch.where(u <= 0.04045, u / 12.92, ((u + 0.055) / 1.055) ** 2.4) def _create_flat_edge_indices(flat_cube_indices: torch.Tensor, grid_size: Tuple[int, int, int]): num_xs = (grid_size[0] - 1) * grid_size[1] * grid_size[2] y_offset = num_xs num_ys = grid_size[0] * (grid_size[1] - 1) * grid_size[2] z_offset = num_xs + num_ys return torch.stack([flat_cube_indices[:, 0] * grid_size[1] * grid_size[2] + flat_cube_indices[:, 1] * grid_size[2] + flat_cube_indices[:, 2], flat_cube_indices[:, 0] * grid_size[1] * grid_size[2] + (flat_cube_indices[:, 1] + 1) * grid_size[2] + flat_cube_indices[:, 2], flat_cube_indices[:, 0] * grid_size[1] * grid_size[2] + flat_cube_indices[:, 1] * grid_size[2] + flat_cube_indices[:, 2] + 1, flat_cube_indices[:, 0] * grid_size[1] * grid_size[2] + (flat_cube_indices[:, 1] + 1) * grid_size[2] + flat_cube_indices[:, 2] + 1, y_offset + flat_cube_indices[:, 0] * (grid_size[1] - 1) * grid_size[2] + flat_cube_indices[:, 1] * grid_size[2] + flat_cube_indices[:, 2], y_offset + (flat_cube_indices[:, 0] + 1) * (grid_size[1] - 1) * grid_size[2] + flat_cube_indices[:, 1] * grid_size[2] + flat_cube_indices[:, 2], y_offset + flat_cube_indices[:, 0] * (grid_size[1] - 1) * grid_size[2] + flat_cube_indices[:, 1] * grid_size[2] + flat_cube_indices[:, 2] + 1, y_offset + (flat_cube_indices[:, 0] + 1) * (grid_size[1] - 1) * grid_size[2] + flat_cube_indices[:, 1] * grid_size[2] + flat_cube_indices[:, 2] + 1, z_offset + flat_cube_indices[:, 0] * grid_size[1] * (grid_size[2] - 1) + flat_cube_indices[:, 1] * (grid_size[2] - 1) + flat_cube_indices[:, 2], z_offset + (flat_cube_indices[:, 0] + 1) * grid_size[1] * (grid_size[2] - 1) + flat_cube_indices[:, 1] * (grid_size[2] - 1) + flat_cube_indices[:, 2], z_offset + flat_cube_indices[:, 0] * grid_size[1] * (grid_size[2] - 1) + (flat_cube_indices[:, 1] + 1) * (grid_size[2] - 1) + flat_cube_indices[:, 2], z_offset + (flat_cube_indices[:, 0] + 1) * grid_size[1] * (grid_size[2] - 1) + (flat_cube_indices[:, 1] + 1) * (grid_size[2] - 1) + flat_cube_indices[:, 2]], dim=-1) class VoidNeRFModel(nn.Module): def __init__(self, background, channel_scale=255.0): super().__init__() background = nn.Parameter(torch.from_numpy(np.array(background)).to(dtype=torch.float32) / channel_scale) self.register_buffer('background', background) def forward(self, position): background = self.background[None].to(position.device) shape = position.shape[:-1] ones = [1] * (len(shape) - 1) n_channels = background.shape[-1] background = torch.broadcast_to(background.view(background.shape[0], *ones, n_channels), [*shape, n_channels]) return background @dataclass class VolumeRange: t0: torch.Tensor t1: torch.Tensor intersected: torch.Tensor def __post_init__(self): assert self.t0.shape == self.t1.shape == self.intersected.shape def partition(self, ts): mids = (ts[..., 1:, :] + ts[..., :-1, :]) * 0.5 lower = torch.cat([self.t0[..., None, :], mids], dim=-2) upper = torch.cat([mids, self.t1[..., None, :]], dim=-2) delta = upper - lower assert lower.shape == upper.shape == delta.shape == ts.shape return (lower, upper, delta) class BoundingBoxVolume(nn.Module): def __init__(self, *, bbox_min, bbox_max, min_dist: float=0.0, min_t_range: float=0.001): super().__init__() self.min_dist = min_dist self.min_t_range = min_t_range self.bbox_min = torch.tensor(bbox_min) self.bbox_max = torch.tensor(bbox_max) self.bbox = torch.stack([self.bbox_min, self.bbox_max]) assert self.bbox.shape == (2, 3) assert min_dist >= 0.0 assert min_t_range > 0.0 def intersect(self, origin: torch.Tensor, direction: torch.Tensor, t0_lower: Optional[torch.Tensor]=None, epsilon=1e-06): (batch_size, *shape, _) = origin.shape ones = [1] * len(shape) bbox = self.bbox.view(1, *ones, 2, 3).to(origin.device) def _safe_divide(a, b, epsilon=1e-06): return a / torch.where(b < 0, b - epsilon, b + epsilon) ts = _safe_divide(bbox - origin[..., None, :], direction[..., None, :], epsilon=epsilon) t0 = ts.min(dim=-2).values.max(dim=-1, keepdim=True).values.clamp(self.min_dist) t1 = ts.max(dim=-2).values.min(dim=-1, keepdim=True).values assert t0.shape == t1.shape == (batch_size, *shape, 1) if t0_lower is not None: assert t0.shape == t0_lower.shape t0 = torch.maximum(t0, t0_lower) intersected = t0 + self.min_t_range < t1 t0 = torch.where(intersected, t0, torch.zeros_like(t0)) t1 = torch.where(intersected, t1, torch.ones_like(t1)) return VolumeRange(t0=t0, t1=t1, intersected=intersected) class StratifiedRaySampler(nn.Module): def __init__(self, depth_mode: str='linear'): self.depth_mode = depth_mode assert self.depth_mode in ('linear', 'geometric', 'harmonic') def sample(self, t0: torch.Tensor, t1: torch.Tensor, n_samples: int, epsilon: float=0.001) -> torch.Tensor: ones = [1] * (len(t0.shape) - 1) ts = torch.linspace(0, 1, n_samples).view(*ones, n_samples).to(t0.dtype).to(t0.device) if self.depth_mode == 'linear': ts = t0 * (1.0 - ts) + t1 * ts elif self.depth_mode == 'geometric': ts = (t0.clamp(epsilon).log() * (1.0 - ts) + t1.clamp(epsilon).log() * ts).exp() elif self.depth_mode == 'harmonic': ts = 1.0 / (1.0 / t0.clamp(epsilon) * (1.0 - ts) + 1.0 / t1.clamp(epsilon) * ts) mids = 0.5 * (ts[..., 1:] + ts[..., :-1]) upper = torch.cat([mids, t1], dim=-1) lower = torch.cat([t0, mids], dim=-1) torch.manual_seed(0) t_rand = torch.rand_like(ts) ts = lower + (upper - lower) * t_rand return ts.unsqueeze(-1) class ImportanceRaySampler(nn.Module): def __init__(self, volume_range: VolumeRange, ts: torch.Tensor, weights: torch.Tensor, blur_pool: bool=False, alpha: float=1e-05): self.volume_range = volume_range self.ts = ts.clone().detach() self.weights = weights.clone().detach() self.blur_pool = blur_pool self.alpha = alpha @torch.no_grad() def sample(self, t0: torch.Tensor, t1: torch.Tensor, n_samples: int) -> torch.Tensor: (lower, upper, _) = self.volume_range.partition(self.ts) (batch_size, *shape, n_coarse_samples, _) = self.ts.shape weights = self.weights if self.blur_pool: padded = torch.cat([weights[..., :1, :], weights, weights[..., -1:, :]], dim=-2) maxes = torch.maximum(padded[..., :-1, :], padded[..., 1:, :]) weights = 0.5 * (maxes[..., :-1, :] + maxes[..., 1:, :]) weights = weights + self.alpha pmf = weights / weights.sum(dim=-2, keepdim=True) inds = sample_pmf(pmf, n_samples) assert inds.shape == (batch_size, *shape, n_samples, 1) assert (inds >= 0).all() and (inds < n_coarse_samples).all() t_rand = torch.rand(inds.shape, device=inds.device) lower_ = torch.gather(lower, -2, inds) upper_ = torch.gather(upper, -2, inds) ts = lower_ + (upper_ - lower_) * t_rand ts = torch.sort(ts, dim=-2).values return ts @dataclass class MeshDecoderOutput(BaseOutput): verts: torch.Tensor faces: torch.Tensor vertex_channels: Dict[str, torch.Tensor] class MeshDecoder(nn.Module): def __init__(self): super().__init__() cases = torch.zeros(256, 5, 3, dtype=torch.long) masks = torch.zeros(256, 5, dtype=torch.bool) self.register_buffer('cases', cases) self.register_buffer('masks', masks) def forward(self, field: torch.Tensor, min_point: torch.Tensor, size: torch.Tensor): assert len(field.shape) == 3, 'input must be a 3D scalar field' dev = field.device cases = self.cases.to(dev) masks = self.masks.to(dev) min_point = min_point.to(dev) size = size.to(dev) grid_size = field.shape grid_size_tensor = torch.tensor(grid_size).to(size) bitmasks = (field > 0).to(torch.uint8) bitmasks = bitmasks[:-1, :, :] | bitmasks[1:, :, :] << 1 bitmasks = bitmasks[:, :-1, :] | bitmasks[:, 1:, :] << 2 bitmasks = bitmasks[:, :, :-1] | bitmasks[:, :, 1:] << 4 corner_coords = torch.empty(*grid_size, 3, device=dev, dtype=field.dtype) corner_coords[range(grid_size[0]), :, :, 0] = torch.arange(grid_size[0], device=dev, dtype=field.dtype)[:, None, None] corner_coords[:, range(grid_size[1]), :, 1] = torch.arange(grid_size[1], device=dev, dtype=field.dtype)[:, None] corner_coords[:, :, range(grid_size[2]), 2] = torch.arange(grid_size[2], device=dev, dtype=field.dtype) edge_midpoints = torch.cat([((corner_coords[:-1] + corner_coords[1:]) / 2).reshape(-1, 3), ((corner_coords[:, :-1] + corner_coords[:, 1:]) / 2).reshape(-1, 3), ((corner_coords[:, :, :-1] + corner_coords[:, :, 1:]) / 2).reshape(-1, 3)], dim=0) cube_indices = torch.zeros(grid_size[0] - 1, grid_size[1] - 1, grid_size[2] - 1, 3, device=dev, dtype=torch.long) cube_indices[range(grid_size[0] - 1), :, :, 0] = torch.arange(grid_size[0] - 1, device=dev)[:, None, None] cube_indices[:, range(grid_size[1] - 1), :, 1] = torch.arange(grid_size[1] - 1, device=dev)[:, None] cube_indices[:, :, range(grid_size[2] - 1), 2] = torch.arange(grid_size[2] - 1, device=dev) flat_cube_indices = cube_indices.reshape(-1, 3) edge_indices = _create_flat_edge_indices(flat_cube_indices, grid_size) flat_bitmasks = bitmasks.reshape(-1).long() local_tris = cases[flat_bitmasks] local_masks = masks[flat_bitmasks] global_tris = torch.gather(edge_indices, 1, local_tris.reshape(local_tris.shape[0], -1)).reshape(local_tris.shape) selected_tris = global_tris.reshape(-1, 3)[local_masks.reshape(-1)] used_vertex_indices = torch.unique(selected_tris.view(-1)) used_edge_midpoints = edge_midpoints[used_vertex_indices] old_index_to_new_index = torch.zeros(len(edge_midpoints), device=dev, dtype=torch.long) old_index_to_new_index[used_vertex_indices] = torch.arange(len(used_vertex_indices), device=dev, dtype=torch.long) faces = torch.gather(old_index_to_new_index, 0, selected_tris.view(-1)).reshape(selected_tris.shape) v1 = torch.floor(used_edge_midpoints).to(torch.long) v2 = torch.ceil(used_edge_midpoints).to(torch.long) s1 = field[v1[:, 0], v1[:, 1], v1[:, 2]] s2 = field[v2[:, 0], v2[:, 1], v2[:, 2]] p1 = v1.float() / (grid_size_tensor - 1) * size + min_point p2 = v2.float() / (grid_size_tensor - 1) * size + min_point t = (s1 / (s1 - s2))[:, None] verts = t * p2 + (1 - t) * p1 return MeshDecoderOutput(verts=verts, faces=faces, vertex_channels=None) @dataclass class MLPNeRFModelOutput(BaseOutput): density: torch.Tensor signed_distance: torch.Tensor channels: torch.Tensor ts: torch.Tensor class MLPNeRSTFModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, d_hidden: int=256, n_output: int=12, n_hidden_layers: int=6, act_fn: str='swish', insert_direction_at: int=4): super().__init__() dummy = torch.eye(1, 3) d_posenc_pos = encode_position(position=dummy).shape[-1] d_posenc_dir = encode_direction(position=dummy).shape[-1] mlp_widths = [d_hidden] * n_hidden_layers input_widths = [d_posenc_pos] + mlp_widths output_widths = mlp_widths + [n_output] if insert_direction_at is not None: input_widths[insert_direction_at] += d_posenc_dir self.mlp = nn.ModuleList([nn.Linear(d_in, d_out) for (d_in, d_out) in zip(input_widths, output_widths)]) if act_fn == 'swish': self.activation = lambda x: F.silu(x) else: raise ValueError(f'Unsupported activation function {act_fn}') self.sdf_activation = torch.tanh self.density_activation = torch.nn.functional.relu self.channel_activation = torch.sigmoid def map_indices_to_keys(self, output): h_map = {'sdf': (0, 1), 'density_coarse': (1, 2), 'density_fine': (2, 3), 'stf': (3, 6), 'nerf_coarse': (6, 9), 'nerf_fine': (9, 12)} mapped_output = {k: output[..., start:end] for (k, (start, end)) in h_map.items()} return mapped_output def forward(self, *, position, direction, ts, nerf_level='coarse', rendering_mode='nerf'): h = encode_position(position) h_preact = h h_directionless = None for (i, layer) in enumerate(self.mlp): if i == self.config.insert_direction_at: h_directionless = h_preact h_direction = encode_direction(position, direction=direction) h = torch.cat([h, h_direction], dim=-1) h = layer(h) h_preact = h if i < len(self.mlp) - 1: h = self.activation(h) h_final = h if h_directionless is None: h_directionless = h_preact activation = self.map_indices_to_keys(h_final) if nerf_level == 'coarse': h_density = activation['density_coarse'] else: h_density = activation['density_fine'] if rendering_mode == 'nerf': if nerf_level == 'coarse': h_channels = activation['nerf_coarse'] else: h_channels = activation['nerf_fine'] elif rendering_mode == 'stf': h_channels = activation['stf'] density = self.density_activation(h_density) signed_distance = self.sdf_activation(activation['sdf']) channels = self.channel_activation(h_channels) return MLPNeRFModelOutput(density=density, signed_distance=signed_distance, channels=channels, ts=ts) class ChannelsProj(nn.Module): def __init__(self, *, vectors: int, channels: int, d_latent: int): super().__init__() self.proj = nn.Linear(d_latent, vectors * channels) self.norm = nn.LayerNorm(channels) self.d_latent = d_latent self.vectors = vectors self.channels = channels def forward(self, x: torch.Tensor) -> torch.Tensor: x_bvd = x w_vcd = self.proj.weight.view(self.vectors, self.channels, self.d_latent) b_vc = self.proj.bias.view(1, self.vectors, self.channels) h = torch.einsum('bvd,vcd->bvc', x_bvd, w_vcd) h = self.norm(h) h = h + b_vc return h class ShapEParamsProjModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, *, param_names: Tuple[str]=('nerstf.mlp.0.weight', 'nerstf.mlp.1.weight', 'nerstf.mlp.2.weight', 'nerstf.mlp.3.weight'), param_shapes: Tuple[Tuple[int]]=((256, 93), (256, 256), (256, 256), (256, 256)), d_latent: int=1024): super().__init__() if len(param_names) != len(param_shapes): raise ValueError('Must provide same number of `param_names` as `param_shapes`') self.projections = nn.ModuleDict({}) for (k, (vectors, channels)) in zip(param_names, param_shapes): self.projections[_sanitize_name(k)] = ChannelsProj(vectors=vectors, channels=channels, d_latent=d_latent) def forward(self, x: torch.Tensor): out = {} start = 0 for (k, shape) in zip(self.config.param_names, self.config.param_shapes): (vectors, _) = shape end = start + vectors x_bvd = x[:, start:end] out[k] = self.projections[_sanitize_name(k)](x_bvd).reshape(len(x), *shape) start = end return out class ShapERenderer(ModelMixin, ConfigMixin): @register_to_config def __init__(self, *, param_names: Tuple[str]=('nerstf.mlp.0.weight', 'nerstf.mlp.1.weight', 'nerstf.mlp.2.weight', 'nerstf.mlp.3.weight'), param_shapes: Tuple[Tuple[int]]=((256, 93), (256, 256), (256, 256), (256, 256)), d_latent: int=1024, d_hidden: int=256, n_output: int=12, n_hidden_layers: int=6, act_fn: str='swish', insert_direction_at: int=4, background: Tuple[float]=(255.0, 255.0, 255.0)): super().__init__() self.params_proj = ShapEParamsProjModel(param_names=param_names, param_shapes=param_shapes, d_latent=d_latent) self.mlp = MLPNeRSTFModel(d_hidden, n_output, n_hidden_layers, act_fn, insert_direction_at) self.void = VoidNeRFModel(background=background, channel_scale=255.0) self.volume = BoundingBoxVolume(bbox_max=[1.0, 1.0, 1.0], bbox_min=[-1.0, -1.0, -1.0]) self.mesh_decoder = MeshDecoder() @torch.no_grad() def render_rays(self, rays, sampler, n_samples, prev_model_out=None, render_with_direction=False): (origin, direction) = (rays[..., 0, :], rays[..., 1, :]) vrange = self.volume.intersect(origin, direction, t0_lower=None) ts = sampler.sample(vrange.t0, vrange.t1, n_samples) ts = ts.to(rays.dtype) if prev_model_out is not None: ts = torch.sort(torch.cat([ts, prev_model_out.ts], dim=-2), dim=-2).values (batch_size, *_shape, _t0_dim) = vrange.t0.shape (_, *ts_shape, _ts_dim) = ts.shape directions = torch.broadcast_to(direction.unsqueeze(-2), [batch_size, *ts_shape, 3]) positions = origin.unsqueeze(-2) + ts * directions directions = directions.to(self.mlp.dtype) positions = positions.to(self.mlp.dtype) optional_directions = directions if render_with_direction else None model_out = self.mlp(position=positions, direction=optional_directions, ts=ts, nerf_level='coarse' if prev_model_out is None else 'fine') (channels, weights, transmittance) = integrate_samples(vrange, model_out.ts, model_out.density, model_out.channels) transmittance = torch.where(vrange.intersected, transmittance, torch.ones_like(transmittance)) channels = torch.where(vrange.intersected, channels, torch.zeros_like(channels)) channels = channels + transmittance * self.void(origin) weighted_sampler = ImportanceRaySampler(vrange, ts=model_out.ts, weights=weights) return (channels, weighted_sampler, model_out) @torch.no_grad() def decode_to_image(self, latents, device, size: int=64, ray_batch_size: int=4096, n_coarse_samples=64, n_fine_samples=128): projected_params = self.params_proj(latents) for (name, param) in self.mlp.state_dict().items(): if f'nerstf.{name}' in projected_params.keys(): param.copy_(projected_params[f'nerstf.{name}'].squeeze(0)) camera = create_pan_cameras(size) rays = camera.camera_rays rays = rays.to(device) n_batches = rays.shape[1] // ray_batch_size coarse_sampler = StratifiedRaySampler() images = [] for idx in range(n_batches): rays_batch = rays[:, idx * ray_batch_size:(idx + 1) * ray_batch_size] (_, fine_sampler, coarse_model_out) = self.render_rays(rays_batch, coarse_sampler, n_coarse_samples) (channels, _, _) = self.render_rays(rays_batch, fine_sampler, n_fine_samples, prev_model_out=coarse_model_out) images.append(channels) images = torch.cat(images, dim=1) images = images.view(*camera.shape, camera.height, camera.width, -1).squeeze(0) return images @torch.no_grad() def decode_to_mesh(self, latents, device, grid_size: int=128, query_batch_size: int=4096, texture_channels: Tuple=('R', 'G', 'B')): projected_params = self.params_proj(latents) for (name, param) in self.mlp.state_dict().items(): if f'nerstf.{name}' in projected_params.keys(): param.copy_(projected_params[f'nerstf.{name}'].squeeze(0)) query_points = volume_query_points(self.volume, grid_size) query_positions = query_points[None].repeat(1, 1, 1).to(device=device, dtype=self.mlp.dtype) fields = [] for idx in range(0, query_positions.shape[1], query_batch_size): query_batch = query_positions[:, idx:idx + query_batch_size] model_out = self.mlp(position=query_batch, direction=None, ts=None, nerf_level='fine', rendering_mode='stf') fields.append(model_out.signed_distance) fields = torch.cat(fields, dim=1) fields = fields.float() assert len(fields.shape) == 3 and fields.shape[-1] == 1, f'expected [meta_batch x inner_batch] SDF results, but got {fields.shape}' fields = fields.reshape(1, *[grid_size] * 3) full_grid = torch.zeros(1, grid_size + 2, grid_size + 2, grid_size + 2, device=fields.device, dtype=fields.dtype) full_grid.fill_(-1.0) full_grid[:, 1:-1, 1:-1, 1:-1] = fields fields = full_grid raw_meshes = [] mesh_mask = [] for field in fields: raw_mesh = self.mesh_decoder(field, self.volume.bbox_min, self.volume.bbox_max - self.volume.bbox_min) mesh_mask.append(True) raw_meshes.append(raw_mesh) mesh_mask = torch.tensor(mesh_mask, device=fields.device) max_vertices = max((len(m.verts) for m in raw_meshes)) texture_query_positions = torch.stack([m.verts[torch.arange(0, max_vertices) % len(m.verts)] for m in raw_meshes], dim=0) texture_query_positions = texture_query_positions.to(device=device, dtype=self.mlp.dtype) textures = [] for idx in range(0, texture_query_positions.shape[1], query_batch_size): query_batch = texture_query_positions[:, idx:idx + query_batch_size] texture_model_out = self.mlp(position=query_batch, direction=None, ts=None, nerf_level='fine', rendering_mode='stf') textures.append(texture_model_out.channels) textures = torch.cat(textures, dim=1) textures = _convert_srgb_to_linear(textures) textures = textures.float() assert len(textures.shape) == 3 and textures.shape[-1] == len(texture_channels), f'expected [meta_batch x inner_batch x texture_channels] field results, but got {textures.shape}' for (m, texture) in zip(raw_meshes, textures): texture = texture[:len(m.verts)] m.vertex_channels = dict(zip(texture_channels, texture.unbind(-1))) return raw_meshes[0] # File: diffusers-main/src/diffusers/pipelines/stable_audio/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, is_transformers_version _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.27.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['modeling_stable_audio'] = ['StableAudioProjectionModel'] _import_structure['pipeline_stable_audio'] = ['StableAudioPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.27.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .modeling_stable_audio import StableAudioProjectionModel from .pipeline_stable_audio import StableAudioPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_audio/modeling_stable_audio.py from dataclasses import dataclass from math import pi from typing import Optional import torch import torch.nn as nn import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin from ...utils import BaseOutput, logging logger = logging.get_logger(__name__) class StableAudioPositionalEmbedding(nn.Module): def __init__(self, dim: int): super().__init__() assert dim % 2 == 0 half_dim = dim // 2 self.weights = nn.Parameter(torch.randn(half_dim)) def forward(self, times: torch.Tensor) -> torch.Tensor: times = times[..., None] freqs = times * self.weights[None] * 2 * pi fouriered = torch.cat((freqs.sin(), freqs.cos()), dim=-1) fouriered = torch.cat((times, fouriered), dim=-1) return fouriered @dataclass class StableAudioProjectionModelOutput(BaseOutput): text_hidden_states: Optional[torch.Tensor] = None seconds_start_hidden_states: Optional[torch.Tensor] = None seconds_end_hidden_states: Optional[torch.Tensor] = None class StableAudioNumberConditioner(nn.Module): def __init__(self, number_embedding_dim, min_value, max_value, internal_dim: Optional[int]=256): super().__init__() self.time_positional_embedding = nn.Sequential(StableAudioPositionalEmbedding(internal_dim), nn.Linear(in_features=internal_dim + 1, out_features=number_embedding_dim)) self.number_embedding_dim = number_embedding_dim self.min_value = min_value self.max_value = max_value def forward(self, floats: torch.Tensor): floats = floats.clamp(self.min_value, self.max_value) normalized_floats = (floats - self.min_value) / (self.max_value - self.min_value) embedder_dtype = next(self.time_positional_embedding.parameters()).dtype normalized_floats = normalized_floats.to(embedder_dtype) embedding = self.time_positional_embedding(normalized_floats) float_embeds = embedding.view(-1, 1, self.number_embedding_dim) return float_embeds class StableAudioProjectionModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, text_encoder_dim, conditioning_dim, min_value, max_value): super().__init__() self.text_projection = nn.Identity() if conditioning_dim == text_encoder_dim else nn.Linear(text_encoder_dim, conditioning_dim) self.start_number_conditioner = StableAudioNumberConditioner(conditioning_dim, min_value, max_value) self.end_number_conditioner = StableAudioNumberConditioner(conditioning_dim, min_value, max_value) def forward(self, text_hidden_states: Optional[torch.Tensor]=None, start_seconds: Optional[torch.Tensor]=None, end_seconds: Optional[torch.Tensor]=None): text_hidden_states = text_hidden_states if text_hidden_states is None else self.text_projection(text_hidden_states) seconds_start_hidden_states = start_seconds if start_seconds is None else self.start_number_conditioner(start_seconds) seconds_end_hidden_states = end_seconds if end_seconds is None else self.end_number_conditioner(end_seconds) return StableAudioProjectionModelOutput(text_hidden_states=text_hidden_states, seconds_start_hidden_states=seconds_start_hidden_states, seconds_end_hidden_states=seconds_end_hidden_states) # File: diffusers-main/src/diffusers/pipelines/stable_audio/pipeline_stable_audio.py import inspect from typing import Callable, List, Optional, Union import torch from transformers import T5EncoderModel, T5Tokenizer, T5TokenizerFast from ...models import AutoencoderOobleck, StableAudioDiTModel from ...models.embeddings import get_1d_rotary_pos_embed from ...schedulers import EDMDPMSolverMultistepScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .modeling_stable_audio import StableAudioProjectionModel logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import scipy\n >>> import torch\n >>> import soundfile as sf\n >>> from diffusers import StableAudioPipeline\n\n >>> repo_id = "stabilityai/stable-audio-open-1.0"\n >>> pipe = StableAudioPipeline.from_pretrained(repo_id, torch_dtype=torch.float16)\n >>> pipe = pipe.to("cuda")\n\n >>> # define the prompts\n >>> prompt = "The sound of a hammer hitting a wooden surface."\n >>> negative_prompt = "Low quality."\n\n >>> # set the seed for generator\n >>> generator = torch.Generator("cuda").manual_seed(0)\n\n >>> # run the generation\n >>> audio = pipe(\n ... prompt,\n ... negative_prompt=negative_prompt,\n ... num_inference_steps=200,\n ... audio_end_in_s=10.0,\n ... num_waveforms_per_prompt=3,\n ... generator=generator,\n ... ).audios\n\n >>> output = audio[0].T.float().cpu().numpy()\n >>> sf.write("hammer.wav", output, pipe.vae.sampling_rate)\n ```\n' class StableAudioPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->projection_model->transformer->vae' def __init__(self, vae: AutoencoderOobleck, text_encoder: T5EncoderModel, projection_model: StableAudioProjectionModel, tokenizer: Union[T5Tokenizer, T5TokenizerFast], transformer: StableAudioDiTModel, scheduler: EDMDPMSolverMultistepScheduler): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, projection_model=projection_model, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler) self.rotary_embed_dim = self.transformer.config.attention_head_dim // 2 def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def encode_prompt(self, prompt, device, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, attention_mask: Optional[torch.LongTensor]=None, negative_attention_mask: Optional[torch.LongTensor]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because {self.text_encoder.config.model_type} can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids.to(device) attention_mask = attention_mask.to(device) self.text_encoder.eval() prompt_embeds = self.text_encoder(text_input_ids, attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] if do_classifier_free_guidance and negative_prompt is not None: uncond_tokens: List[str] if type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') uncond_input_ids = uncond_input.input_ids.to(device) negative_attention_mask = uncond_input.attention_mask.to(device) self.text_encoder.eval() negative_prompt_embeds = self.text_encoder(uncond_input_ids, attention_mask=negative_attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if negative_attention_mask is not None: negative_prompt_embeds = torch.where(negative_attention_mask.to(torch.bool).unsqueeze(2), negative_prompt_embeds, 0.0) if do_classifier_free_guidance and negative_prompt_embeds is not None: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if attention_mask is not None and negative_attention_mask is None: negative_attention_mask = torch.ones_like(attention_mask) elif attention_mask is None and negative_attention_mask is not None: attention_mask = torch.ones_like(negative_attention_mask) if attention_mask is not None: attention_mask = torch.cat([negative_attention_mask, attention_mask]) prompt_embeds = self.projection_model(text_hidden_states=prompt_embeds).text_hidden_states if attention_mask is not None: prompt_embeds = prompt_embeds * attention_mask.unsqueeze(-1).to(prompt_embeds.dtype) prompt_embeds = prompt_embeds * attention_mask.unsqueeze(-1).to(prompt_embeds.dtype) return prompt_embeds def encode_duration(self, audio_start_in_s, audio_end_in_s, device, do_classifier_free_guidance, batch_size): audio_start_in_s = audio_start_in_s if isinstance(audio_start_in_s, list) else [audio_start_in_s] audio_end_in_s = audio_end_in_s if isinstance(audio_end_in_s, list) else [audio_end_in_s] if len(audio_start_in_s) == 1: audio_start_in_s = audio_start_in_s * batch_size if len(audio_end_in_s) == 1: audio_end_in_s = audio_end_in_s * batch_size audio_start_in_s = [float(x) for x in audio_start_in_s] audio_start_in_s = torch.tensor(audio_start_in_s).to(device) audio_end_in_s = [float(x) for x in audio_end_in_s] audio_end_in_s = torch.tensor(audio_end_in_s).to(device) projection_output = self.projection_model(start_seconds=audio_start_in_s, end_seconds=audio_end_in_s) seconds_start_hidden_states = projection_output.seconds_start_hidden_states seconds_end_hidden_states = projection_output.seconds_end_hidden_states if do_classifier_free_guidance: seconds_start_hidden_states = torch.cat([seconds_start_hidden_states, seconds_start_hidden_states], dim=0) seconds_end_hidden_states = torch.cat([seconds_end_hidden_states, seconds_end_hidden_states], dim=0) return (seconds_start_hidden_states, seconds_end_hidden_states) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, audio_start_in_s, audio_end_in_s, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, attention_mask=None, negative_attention_mask=None, initial_audio_waveforms=None, initial_audio_sampling_rate=None): if audio_end_in_s < audio_start_in_s: raise ValueError(f"`audio_end_in_s={audio_end_in_s}' must be higher than 'audio_start_in_s={audio_start_in_s}` but ") if audio_start_in_s < self.projection_model.config.min_value or audio_start_in_s > self.projection_model.config.max_value: raise ValueError(f'`audio_start_in_s` must be greater than or equal to {self.projection_model.config.min_value}, and lower than or equal to {self.projection_model.config.max_value} but is {audio_start_in_s}.') if audio_end_in_s < self.projection_model.config.min_value or audio_end_in_s > self.projection_model.config.max_value: raise ValueError(f'`audio_end_in_s` must be greater than or equal to {self.projection_model.config.min_value}, and lower than or equal to {self.projection_model.config.max_value} but is {audio_end_in_s}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt`, or `prompt_embeds`. Cannot leave`prompt` undefined without specifying `prompt_embeds`.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if attention_mask is not None and attention_mask.shape != prompt_embeds.shape[:2]: raise ValueError(f'`attention_mask should have the same batch size and sequence length as `prompt_embeds`, but got:`attention_mask: {attention_mask.shape} != `prompt_embeds` {prompt_embeds.shape}') if initial_audio_sampling_rate is None and initial_audio_waveforms is not None: raise ValueError("`initial_audio_waveforms' is provided but the sampling rate is not. Make sure to pass `initial_audio_sampling_rate`.") if initial_audio_sampling_rate is not None and initial_audio_sampling_rate != self.vae.sampling_rate: raise ValueError(f"`initial_audio_sampling_rate` must be {self.vae.hop_length}' but is `{initial_audio_sampling_rate}`.Make sure to resample the `initial_audio_waveforms` and to correct the sampling rate. ") def prepare_latents(self, batch_size, num_channels_vae, sample_size, dtype, device, generator, latents=None, initial_audio_waveforms=None, num_waveforms_per_prompt=None, audio_channels=None): shape = (batch_size, num_channels_vae, sample_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma if initial_audio_waveforms is not None: if initial_audio_waveforms.ndim == 2: initial_audio_waveforms = initial_audio_waveforms.unsqueeze(1) elif initial_audio_waveforms.ndim != 3: raise ValueError(f'`initial_audio_waveforms` must be of shape `(batch_size, num_channels, audio_length)` or `(batch_size, audio_length)` but has `{initial_audio_waveforms.ndim}` dimensions') audio_vae_length = self.transformer.config.sample_size * self.vae.hop_length audio_shape = (batch_size // num_waveforms_per_prompt, audio_channels, audio_vae_length) if initial_audio_waveforms.shape[1] == 1 and audio_channels == 2: initial_audio_waveforms = initial_audio_waveforms.repeat(1, 2, 1) elif initial_audio_waveforms.shape[1] == 2 and audio_channels == 1: initial_audio_waveforms = initial_audio_waveforms.mean(1, keepdim=True) if initial_audio_waveforms.shape[:2] != audio_shape[:2]: raise ValueError(f'`initial_audio_waveforms` must be of shape `(batch_size, num_channels, audio_length)` or `(batch_size, audio_length)` but is of shape `{initial_audio_waveforms.shape}`') audio_length = initial_audio_waveforms.shape[-1] if audio_length < audio_vae_length: logger.warning(f'The provided input waveform is shorter ({audio_length}) than the required audio length ({audio_vae_length}) of the model and will thus be padded.') elif audio_length > audio_vae_length: logger.warning(f'The provided input waveform is longer ({audio_length}) than the required audio length ({audio_vae_length}) of the model and will thus be cropped.') audio = initial_audio_waveforms.new_zeros(audio_shape) audio[:, :, :min(audio_length, audio_vae_length)] = initial_audio_waveforms[:, :, :audio_vae_length] encoded_audio = self.vae.encode(audio).latent_dist.sample(generator) encoded_audio = encoded_audio.repeat((num_waveforms_per_prompt, 1, 1)) latents = encoded_audio + latents return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, audio_end_in_s: Optional[float]=None, audio_start_in_s: Optional[float]=0.0, num_inference_steps: int=100, guidance_scale: float=7.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_waveforms_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, initial_audio_waveforms: Optional[torch.Tensor]=None, initial_audio_sampling_rate: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, attention_mask: Optional[torch.LongTensor]=None, negative_attention_mask: Optional[torch.LongTensor]=None, return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: Optional[int]=1, output_type: Optional[str]='pt'): downsample_ratio = self.vae.hop_length max_audio_length_in_s = self.transformer.config.sample_size * downsample_ratio / self.vae.config.sampling_rate if audio_end_in_s is None: audio_end_in_s = max_audio_length_in_s if audio_end_in_s - audio_start_in_s > max_audio_length_in_s: raise ValueError(f"The total audio length requested ({audio_end_in_s - audio_start_in_s}s) is longer than the model maximum possible length ({max_audio_length_in_s}). Make sure that 'audio_end_in_s-audio_start_in_s<={max_audio_length_in_s}'.") waveform_start = int(audio_start_in_s * self.vae.config.sampling_rate) waveform_end = int(audio_end_in_s * self.vae.config.sampling_rate) waveform_length = int(self.transformer.config.sample_size) self.check_inputs(prompt, audio_start_in_s, audio_end_in_s, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask, initial_audio_waveforms, initial_audio_sampling_rate) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self.encode_prompt(prompt, device, do_classifier_free_guidance, negative_prompt, prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask) (seconds_start_hidden_states, seconds_end_hidden_states) = self.encode_duration(audio_start_in_s, audio_end_in_s, device, do_classifier_free_guidance and (negative_prompt is not None or negative_prompt_embeds is not None), batch_size) text_audio_duration_embeds = torch.cat([prompt_embeds, seconds_start_hidden_states, seconds_end_hidden_states], dim=1) audio_duration_embeds = torch.cat([seconds_start_hidden_states, seconds_end_hidden_states], dim=2) if do_classifier_free_guidance and negative_prompt_embeds is None and (negative_prompt is None): negative_text_audio_duration_embeds = torch.zeros_like(text_audio_duration_embeds, device=text_audio_duration_embeds.device) text_audio_duration_embeds = torch.cat([negative_text_audio_duration_embeds, text_audio_duration_embeds], dim=0) audio_duration_embeds = torch.cat([audio_duration_embeds, audio_duration_embeds], dim=0) (bs_embed, seq_len, hidden_size) = text_audio_duration_embeds.shape text_audio_duration_embeds = text_audio_duration_embeds.repeat(1, num_waveforms_per_prompt, 1) text_audio_duration_embeds = text_audio_duration_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len, hidden_size) audio_duration_embeds = audio_duration_embeds.repeat(1, num_waveforms_per_prompt, 1) audio_duration_embeds = audio_duration_embeds.view(bs_embed * num_waveforms_per_prompt, -1, audio_duration_embeds.shape[-1]) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_vae = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_waveforms_per_prompt, num_channels_vae, waveform_length, text_audio_duration_embeds.dtype, device, generator, latents, initial_audio_waveforms, num_waveforms_per_prompt, audio_channels=self.vae.config.audio_channels) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) rotary_embedding = get_1d_rotary_pos_embed(self.rotary_embed_dim, latents.shape[2] + audio_duration_embeds.shape[1], use_real=True, repeat_interleave_real=False) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.transformer(latent_model_input, t.unsqueeze(0), encoder_hidden_states=text_audio_duration_embeds, global_hidden_states=audio_duration_embeds, rotary_embedding=rotary_embedding, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': audio = self.vae.decode(latents).sample else: return AudioPipelineOutput(audios=latents) audio = audio[:, :, waveform_start:waveform_end] if output_type == 'np': audio = audio.cpu().float().numpy() self.maybe_free_model_hooks() if not return_dict: return (audio,) return AudioPipelineOutput(audios=audio) # File: diffusers-main/src/diffusers/pipelines/stable_cascade/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_stable_cascade'] = ['StableCascadeDecoderPipeline'] _import_structure['pipeline_stable_cascade_combined'] = ['StableCascadeCombinedPipeline'] _import_structure['pipeline_stable_cascade_prior'] = ['StableCascadePriorPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_cascade import StableCascadeDecoderPipeline from .pipeline_stable_cascade_combined import StableCascadeCombinedPipeline from .pipeline_stable_cascade_prior import StableCascadePriorPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py from typing import Callable, Dict, List, Optional, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...models import StableCascadeUNet from ...schedulers import DDPMWuerstchenScheduler from ...utils import is_torch_version, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from ..wuerstchen.modeling_paella_vq_model import PaellaVQModel logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableCascadePriorPipeline, StableCascadeDecoderPipeline\n\n >>> prior_pipe = StableCascadePriorPipeline.from_pretrained(\n ... "stabilityai/stable-cascade-prior", torch_dtype=torch.bfloat16\n ... ).to("cuda")\n >>> gen_pipe = StableCascadeDecoderPipeline.from_pretrain(\n ... "stabilityai/stable-cascade", torch_dtype=torch.float16\n ... ).to("cuda")\n\n >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet"\n >>> prior_output = pipe(prompt)\n >>> images = gen_pipe(prior_output.image_embeddings, prompt=prompt)\n ```\n' class StableCascadeDecoderPipeline(DiffusionPipeline): unet_name = 'decoder' text_encoder_name = 'text_encoder' model_cpu_offload_seq = 'text_encoder->decoder->vqgan' _callback_tensor_inputs = ['latents', 'prompt_embeds_pooled', 'negative_prompt_embeds', 'image_embeddings'] def __init__(self, decoder: StableCascadeUNet, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, scheduler: DDPMWuerstchenScheduler, vqgan: PaellaVQModel, latent_dim_scale: float=10.67) -> None: super().__init__() self.register_modules(decoder=decoder, tokenizer=tokenizer, text_encoder=text_encoder, scheduler=scheduler, vqgan=vqgan) self.register_to_config(latent_dim_scale=latent_dim_scale) def prepare_latents(self, batch_size, image_embeddings, num_images_per_prompt, dtype, device, generator, latents, scheduler): (_, channels, height, width) = image_embeddings.shape latents_shape = (batch_size * num_images_per_prompt, 4, int(height * self.config.latent_dim_scale), int(width * self.config.latent_dim_scale)) if latents is None: latents = randn_tensor(latents_shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def encode_prompt(self, device, batch_size, num_images_per_prompt, do_classifier_free_guidance, prompt=None, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, prompt_embeds_pooled: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds_pooled: Optional[torch.Tensor]=None): if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids[:, :self.tokenizer.model_max_length] attention_mask = attention_mask[:, :self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask.to(device), output_hidden_states=True) prompt_embeds = text_encoder_output.hidden_states[-1] if prompt_embeds_pooled is None: prompt_embeds_pooled = text_encoder_output.text_embeds.unsqueeze(1) prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) prompt_embeds_pooled = prompt_embeds_pooled.to(dtype=self.text_encoder.dtype, device=device) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) prompt_embeds_pooled = prompt_embeds_pooled.repeat_interleave(num_images_per_prompt, dim=0) if negative_prompt_embeds is None and do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=uncond_input.attention_mask.to(device), output_hidden_states=True) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.hidden_states[-1] negative_prompt_embeds_pooled = negative_prompt_embeds_text_encoder_output.text_embeds.unsqueeze(1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) seq_len = negative_prompt_embeds_pooled.shape[1] negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.view(batch_size * num_images_per_prompt, seq_len, -1) return (prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled) def check_inputs(self, prompt, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps def get_timestep_ratio_conditioning(self, t, alphas_cumprod): s = torch.tensor([0.008]) clamp_range = [0, 1] min_var = torch.cos(s / (1 + s) * torch.pi * 0.5) ** 2 var = alphas_cumprod[t] var = var.clamp(*clamp_range) (s, min_var) = (s.to(var.device), min_var.to(var.device)) ratio = ((var * min_var) ** 0.5).acos() / (torch.pi * 0.5) * (1 + s) - s return ratio @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, image_embeddings: Union[torch.Tensor, List[torch.Tensor]], prompt: Union[str, List[str]]=None, num_inference_steps: int=10, guidance_scale: float=0.0, negative_prompt: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.Tensor]=None, prompt_embeds_pooled: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds_pooled: Optional[torch.Tensor]=None, num_images_per_prompt: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents']): device = self._execution_device dtype = self.decoder.dtype self._guidance_scale = guidance_scale if is_torch_version('<', '2.2.0') and dtype == torch.bfloat16: raise ValueError('`StableCascadeDecoderPipeline` requires torch>=2.2.0 when using `torch.bfloat16` dtype.') self.check_inputs(prompt, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs) if isinstance(image_embeddings, list): image_embeddings = torch.cat(image_embeddings, dim=0) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] num_images_per_prompt = num_images_per_prompt * (image_embeddings.shape[0] // batch_size) if prompt_embeds is None and negative_prompt_embeds is None: (_, prompt_embeds_pooled, _, negative_prompt_embeds_pooled) = self.encode_prompt(prompt=prompt, device=device, batch_size=batch_size, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, prompt_embeds_pooled=prompt_embeds_pooled, negative_prompt_embeds=negative_prompt_embeds, negative_prompt_embeds_pooled=negative_prompt_embeds_pooled) prompt_embeds_pooled = torch.cat([prompt_embeds_pooled, negative_prompt_embeds_pooled]) if self.do_classifier_free_guidance else prompt_embeds_pooled effnet = torch.cat([image_embeddings, torch.zeros_like(image_embeddings)]) if self.do_classifier_free_guidance else image_embeddings self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps latents = self.prepare_latents(batch_size, image_embeddings, num_images_per_prompt, dtype, device, generator, latents, self.scheduler) if isinstance(self.scheduler, DDPMWuerstchenScheduler): timesteps = timesteps[:-1] elif hasattr(self.scheduler.config, 'clip_sample') and self.scheduler.config.clip_sample: self.scheduler.config.clip_sample = False logger.warning(' set `clip_sample` to be False') if hasattr(self.scheduler, 'betas'): alphas = 1.0 - self.scheduler.betas alphas_cumprod = torch.cumprod(alphas, dim=0) else: alphas_cumprod = [] self._num_timesteps = len(timesteps) for (i, t) in enumerate(self.progress_bar(timesteps)): if not isinstance(self.scheduler, DDPMWuerstchenScheduler): if len(alphas_cumprod) > 0: timestep_ratio = self.get_timestep_ratio_conditioning(t.long().cpu(), alphas_cumprod) timestep_ratio = timestep_ratio.expand(latents.size(0)).to(dtype).to(device) else: timestep_ratio = t.float().div(self.scheduler.timesteps[-1]).expand(latents.size(0)).to(dtype) else: timestep_ratio = t.expand(latents.size(0)).to(dtype) predicted_latents = self.decoder(sample=torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, timestep_ratio=torch.cat([timestep_ratio] * 2) if self.do_classifier_free_guidance else timestep_ratio, clip_text_pooled=prompt_embeds_pooled, effnet=effnet, return_dict=False)[0] if self.do_classifier_free_guidance: (predicted_latents_text, predicted_latents_uncond) = predicted_latents.chunk(2) predicted_latents = torch.lerp(predicted_latents_uncond, predicted_latents_text, self.guidance_scale) if not isinstance(self.scheduler, DDPMWuerstchenScheduler): timestep_ratio = t latents = self.scheduler.step(model_output=predicted_latents, timestep=timestep_ratio, sample=latents, generator=generator).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if output_type not in ['pt', 'np', 'pil', 'latent']: raise ValueError(f'Only the output types `pt`, `np`, `pil` and `latent` are supported not output_type={output_type}') if not output_type == 'latent': latents = self.vqgan.config.scale_factor * latents images = self.vqgan.decode(latents).sample.clamp(0, 1) if output_type == 'np': images = images.permute(0, 2, 3, 1).cpu().float().numpy() elif output_type == 'pil': images = images.permute(0, 2, 3, 1).cpu().float().numpy() images = self.numpy_to_pil(images) else: images = latents self.maybe_free_model_hooks() if not return_dict: return images return ImagePipelineOutput(images) # File: diffusers-main/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py from typing import Callable, Dict, List, Optional, Union import PIL import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...models import StableCascadeUNet from ...schedulers import DDPMWuerstchenScheduler from ...utils import is_torch_version, replace_example_docstring from ..pipeline_utils import DiffusionPipeline from ..wuerstchen.modeling_paella_vq_model import PaellaVQModel from .pipeline_stable_cascade import StableCascadeDecoderPipeline from .pipeline_stable_cascade_prior import StableCascadePriorPipeline TEXT2IMAGE_EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableCascadeCombinedPipeline\n\n >>> pipe = StableCascadeCombinedPipeline.from_pretrained(\n ... "stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16\n ... )\n >>> pipe.enable_model_cpu_offload()\n >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet"\n >>> images = pipe(prompt=prompt)\n ```\n' class StableCascadeCombinedPipeline(DiffusionPipeline): _load_connected_pipes = True _optional_components = ['prior_feature_extractor', 'prior_image_encoder'] def __init__(self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, decoder: StableCascadeUNet, scheduler: DDPMWuerstchenScheduler, vqgan: PaellaVQModel, prior_prior: StableCascadeUNet, prior_text_encoder: CLIPTextModel, prior_tokenizer: CLIPTokenizer, prior_scheduler: DDPMWuerstchenScheduler, prior_feature_extractor: Optional[CLIPImageProcessor]=None, prior_image_encoder: Optional[CLIPVisionModelWithProjection]=None): super().__init__() self.register_modules(text_encoder=text_encoder, tokenizer=tokenizer, decoder=decoder, scheduler=scheduler, vqgan=vqgan, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_prior=prior_prior, prior_scheduler=prior_scheduler, prior_feature_extractor=prior_feature_extractor, prior_image_encoder=prior_image_encoder) self.prior_pipe = StableCascadePriorPipeline(prior=prior_prior, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_encoder=prior_image_encoder, feature_extractor=prior_feature_extractor) self.decoder_pipe = StableCascadeDecoderPipeline(text_encoder=text_encoder, tokenizer=tokenizer, decoder=decoder, scheduler=scheduler, vqgan=vqgan) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_model_cpu_offload(self, gpu_id: Optional[int]=None, device: Union[torch.device, str]='cuda'): self.prior_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) def enable_sequential_cpu_offload(self, gpu_id: Optional[int]=None, device: Union[torch.device, str]='cuda'): self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[str, List[str]]]=None, images: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]]=None, height: int=512, width: int=512, prior_num_inference_steps: int=60, prior_guidance_scale: float=4.0, num_inference_steps: int=12, decoder_guidance_scale: float=0.0, negative_prompt: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.Tensor]=None, prompt_embeds_pooled: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds_pooled: Optional[torch.Tensor]=None, num_images_per_prompt: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, prior_callback_on_step_end_tensor_inputs: List[str]=['latents'], callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents']): dtype = self.decoder_pipe.decoder.dtype if is_torch_version('<', '2.2.0') and dtype == torch.bfloat16: raise ValueError('`StableCascadeCombinedPipeline` requires torch>=2.2.0 when using `torch.bfloat16` dtype.') prior_outputs = self.prior_pipe(prompt=prompt if prompt_embeds is None else None, images=images, height=height, width=width, num_inference_steps=prior_num_inference_steps, guidance_scale=prior_guidance_scale, negative_prompt=negative_prompt if negative_prompt_embeds is None else None, prompt_embeds=prompt_embeds, prompt_embeds_pooled=prompt_embeds_pooled, negative_prompt_embeds=negative_prompt_embeds, negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, num_images_per_prompt=num_images_per_prompt, generator=generator, latents=latents, output_type='pt', return_dict=True, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs) image_embeddings = prior_outputs.image_embeddings prompt_embeds = prior_outputs.get('prompt_embeds', None) prompt_embeds_pooled = prior_outputs.get('prompt_embeds_pooled', None) negative_prompt_embeds = prior_outputs.get('negative_prompt_embeds', None) negative_prompt_embeds_pooled = prior_outputs.get('negative_prompt_embeds_pooled', None) outputs = self.decoder_pipe(image_embeddings=image_embeddings, prompt=prompt if prompt_embeds is None else None, num_inference_steps=num_inference_steps, guidance_scale=decoder_guidance_scale, negative_prompt=negative_prompt if negative_prompt_embeds is None else None, prompt_embeds=prompt_embeds, prompt_embeds_pooled=prompt_embeds_pooled, negative_prompt_embeds=negative_prompt_embeds, negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, generator=generator, output_type=output_type, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs) return outputs # File: diffusers-main/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_prior.py from dataclasses import dataclass from math import ceil from typing import Callable, Dict, List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...models import StableCascadeUNet from ...schedulers import DDPMWuerstchenScheduler from ...utils import BaseOutput, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline logger = logging.get_logger(__name__) DEFAULT_STAGE_C_TIMESTEPS = list(np.linspace(1.0, 2 / 3, 20)) + list(np.linspace(2 / 3, 0.0, 11))[1:] EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableCascadePriorPipeline\n\n >>> prior_pipe = StableCascadePriorPipeline.from_pretrained(\n ... "stabilityai/stable-cascade-prior", torch_dtype=torch.bfloat16\n ... ).to("cuda")\n\n >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet"\n >>> prior_output = pipe(prompt)\n ```\n' @dataclass class StableCascadePriorPipelineOutput(BaseOutput): image_embeddings: Union[torch.Tensor, np.ndarray] prompt_embeds: Union[torch.Tensor, np.ndarray] prompt_embeds_pooled: Union[torch.Tensor, np.ndarray] negative_prompt_embeds: Union[torch.Tensor, np.ndarray] negative_prompt_embeds_pooled: Union[torch.Tensor, np.ndarray] class StableCascadePriorPipeline(DiffusionPipeline): unet_name = 'prior' text_encoder_name = 'text_encoder' model_cpu_offload_seq = 'image_encoder->text_encoder->prior' _optional_components = ['image_encoder', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'text_encoder_hidden_states', 'negative_prompt_embeds'] def __init__(self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, prior: StableCascadeUNet, scheduler: DDPMWuerstchenScheduler, resolution_multiple: float=42.67, feature_extractor: Optional[CLIPImageProcessor]=None, image_encoder: Optional[CLIPVisionModelWithProjection]=None) -> None: super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, image_encoder=image_encoder, feature_extractor=feature_extractor, prior=prior, scheduler=scheduler) self.register_to_config(resolution_multiple=resolution_multiple) def prepare_latents(self, batch_size, height, width, num_images_per_prompt, dtype, device, generator, latents, scheduler): latent_shape = (num_images_per_prompt * batch_size, self.prior.config.in_channels, ceil(height / self.config.resolution_multiple), ceil(width / self.config.resolution_multiple)) if latents is None: latents = randn_tensor(latent_shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != latent_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latent_shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def encode_prompt(self, device, batch_size, num_images_per_prompt, do_classifier_free_guidance, prompt=None, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, prompt_embeds_pooled: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds_pooled: Optional[torch.Tensor]=None): if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids[:, :self.tokenizer.model_max_length] attention_mask = attention_mask[:, :self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask.to(device), output_hidden_states=True) prompt_embeds = text_encoder_output.hidden_states[-1] if prompt_embeds_pooled is None: prompt_embeds_pooled = text_encoder_output.text_embeds.unsqueeze(1) prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) prompt_embeds_pooled = prompt_embeds_pooled.to(dtype=self.text_encoder.dtype, device=device) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) prompt_embeds_pooled = prompt_embeds_pooled.repeat_interleave(num_images_per_prompt, dim=0) if negative_prompt_embeds is None and do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=uncond_input.attention_mask.to(device), output_hidden_states=True) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.hidden_states[-1] negative_prompt_embeds_pooled = negative_prompt_embeds_text_encoder_output.text_embeds.unsqueeze(1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) seq_len = negative_prompt_embeds_pooled.shape[1] negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.view(batch_size * num_images_per_prompt, seq_len, -1) return (prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled) def encode_image(self, images, device, dtype, batch_size, num_images_per_prompt): image_embeds = [] for image in images: image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) image_embed = self.image_encoder(image).image_embeds.unsqueeze(1) image_embeds.append(image_embed) image_embeds = torch.cat(image_embeds, dim=1) image_embeds = image_embeds.repeat(batch_size * num_images_per_prompt, 1, 1) negative_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, negative_image_embeds) def check_inputs(self, prompt, images=None, image_embeds=None, negative_prompt=None, prompt_embeds=None, prompt_embeds_pooled=None, negative_prompt_embeds=None, negative_prompt_embeds_pooled=None, callback_on_step_end_tensor_inputs=None): if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and prompt_embeds_pooled is None: raise ValueError('If `prompt_embeds` are provided, `prompt_embeds_pooled` must also be provided. Make sure to generate `prompt_embeds_pooled` from the same text encoder that was used to generate `prompt_embeds`') if negative_prompt_embeds is not None and negative_prompt_embeds_pooled is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_prompt_embeds_pooled` must also be provided. Make sure to generate `prompt_embeds_pooled` from the same text encoder that was used to generate `prompt_embeds`') if prompt_embeds_pooled is not None and negative_prompt_embeds_pooled is not None: if prompt_embeds_pooled.shape != negative_prompt_embeds_pooled.shape: raise ValueError(f'`prompt_embeds_pooled` and `negative_prompt_embeds_pooled` must have the same shape when passeddirectly, but got: `prompt_embeds_pooled` {prompt_embeds_pooled.shape} !=`negative_prompt_embeds_pooled` {negative_prompt_embeds_pooled.shape}.') if image_embeds is not None and images is not None: raise ValueError(f'Cannot forward both `images`: {images} and `image_embeds`: {image_embeds}. Please make sure to only forward one of the two.') if images: for (i, image) in enumerate(images): if not isinstance(image, torch.Tensor) and (not isinstance(image, PIL.Image.Image)): raise TypeError(f"'images' must contain images of type 'torch.Tensor' or 'PIL.Image.Image, but got{type(image)} for image number {i}.") @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps def get_timestep_ratio_conditioning(self, t, alphas_cumprod): s = torch.tensor([0.008]) clamp_range = [0, 1] min_var = torch.cos(s / (1 + s) * torch.pi * 0.5) ** 2 var = alphas_cumprod[t] var = var.clamp(*clamp_range) (s, min_var) = (s.to(var.device), min_var.to(var.device)) ratio = ((var * min_var) ** 0.5).acos() / (torch.pi * 0.5) * (1 + s) - s return ratio @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[str, List[str]]]=None, images: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]]=None, height: int=1024, width: int=1024, num_inference_steps: int=20, timesteps: List[float]=None, guidance_scale: float=4.0, negative_prompt: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.Tensor]=None, prompt_embeds_pooled: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds_pooled: Optional[torch.Tensor]=None, image_embeds: Optional[torch.Tensor]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pt', return_dict: bool=True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents']): device = self._execution_device dtype = next(self.prior.parameters()).dtype self._guidance_scale = guidance_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] self.check_inputs(prompt, images=images, image_embeds=image_embeds, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, prompt_embeds_pooled=prompt_embeds_pooled, negative_prompt_embeds=negative_prompt_embeds, negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs) (prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled) = self.encode_prompt(prompt=prompt, device=device, batch_size=batch_size, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, prompt_embeds_pooled=prompt_embeds_pooled, negative_prompt_embeds=negative_prompt_embeds, negative_prompt_embeds_pooled=negative_prompt_embeds_pooled) if images is not None: (image_embeds_pooled, uncond_image_embeds_pooled) = self.encode_image(images=images, device=device, dtype=dtype, batch_size=batch_size, num_images_per_prompt=num_images_per_prompt) elif image_embeds is not None: image_embeds_pooled = image_embeds.repeat(batch_size * num_images_per_prompt, 1, 1) uncond_image_embeds_pooled = torch.zeros_like(image_embeds_pooled) else: image_embeds_pooled = torch.zeros(batch_size * num_images_per_prompt, 1, self.prior.config.clip_image_in_channels, device=device, dtype=dtype) uncond_image_embeds_pooled = torch.zeros(batch_size * num_images_per_prompt, 1, self.prior.config.clip_image_in_channels, device=device, dtype=dtype) if self.do_classifier_free_guidance: image_embeds = torch.cat([image_embeds_pooled, uncond_image_embeds_pooled], dim=0) else: image_embeds = image_embeds_pooled text_encoder_hidden_states = torch.cat([prompt_embeds, negative_prompt_embeds]) if negative_prompt_embeds is not None else prompt_embeds text_encoder_pooled = torch.cat([prompt_embeds_pooled, negative_prompt_embeds_pooled]) if negative_prompt_embeds is not None else prompt_embeds_pooled self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps latents = self.prepare_latents(batch_size, height, width, num_images_per_prompt, dtype, device, generator, latents, self.scheduler) if isinstance(self.scheduler, DDPMWuerstchenScheduler): timesteps = timesteps[:-1] elif hasattr(self.scheduler.config, 'clip_sample') and self.scheduler.config.clip_sample: self.scheduler.config.clip_sample = False logger.warning(' set `clip_sample` to be False') if hasattr(self.scheduler, 'betas'): alphas = 1.0 - self.scheduler.betas alphas_cumprod = torch.cumprod(alphas, dim=0) else: alphas_cumprod = [] self._num_timesteps = len(timesteps) for (i, t) in enumerate(self.progress_bar(timesteps)): if not isinstance(self.scheduler, DDPMWuerstchenScheduler): if len(alphas_cumprod) > 0: timestep_ratio = self.get_timestep_ratio_conditioning(t.long().cpu(), alphas_cumprod) timestep_ratio = timestep_ratio.expand(latents.size(0)).to(dtype).to(device) else: timestep_ratio = t.float().div(self.scheduler.timesteps[-1]).expand(latents.size(0)).to(dtype) else: timestep_ratio = t.expand(latents.size(0)).to(dtype) predicted_image_embedding = self.prior(sample=torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, timestep_ratio=torch.cat([timestep_ratio] * 2) if self.do_classifier_free_guidance else timestep_ratio, clip_text_pooled=text_encoder_pooled, clip_text=text_encoder_hidden_states, clip_img=image_embeds, return_dict=False)[0] if self.do_classifier_free_guidance: (predicted_image_embedding_text, predicted_image_embedding_uncond) = predicted_image_embedding.chunk(2) predicted_image_embedding = torch.lerp(predicted_image_embedding_uncond, predicted_image_embedding_text, self.guidance_scale) if not isinstance(self.scheduler, DDPMWuerstchenScheduler): timestep_ratio = t latents = self.scheduler.step(model_output=predicted_image_embedding, timestep=timestep_ratio, sample=latents, generator=generator).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) self.maybe_free_model_hooks() if output_type == 'np': latents = latents.cpu().float().numpy() prompt_embeds = prompt_embeds.cpu().float().numpy() negative_prompt_embeds = negative_prompt_embeds.cpu().float().numpy() if negative_prompt_embeds is not None else None if not return_dict: return (latents, prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled) return StableCascadePriorPipelineOutput(image_embeddings=latents, prompt_embeds=prompt_embeds, prompt_embeds_pooled=prompt_embeds_pooled, negative_prompt_embeds=negative_prompt_embeds, negative_prompt_embeds_pooled=negative_prompt_embeds_pooled) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version _dummy_objects = {} _additional_imports = {} _import_structure = {'pipeline_output': ['StableDiffusionPipelineOutput']} if is_transformers_available() and is_flax_available(): _import_structure['pipeline_output'].extend(['FlaxStableDiffusionPipelineOutput']) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['clip_image_project_model'] = ['CLIPImageProjection'] _import_structure['pipeline_cycle_diffusion'] = ['CycleDiffusionPipeline'] _import_structure['pipeline_stable_diffusion'] = ['StableDiffusionPipeline'] _import_structure['pipeline_stable_diffusion_attend_and_excite'] = ['StableDiffusionAttendAndExcitePipeline'] _import_structure['pipeline_stable_diffusion_gligen'] = ['StableDiffusionGLIGENPipeline'] _import_structure['pipeline_stable_diffusion_gligen_text_image'] = ['StableDiffusionGLIGENTextImagePipeline'] _import_structure['pipeline_stable_diffusion_img2img'] = ['StableDiffusionImg2ImgPipeline'] _import_structure['pipeline_stable_diffusion_inpaint'] = ['StableDiffusionInpaintPipeline'] _import_structure['pipeline_stable_diffusion_inpaint_legacy'] = ['StableDiffusionInpaintPipelineLegacy'] _import_structure['pipeline_stable_diffusion_instruct_pix2pix'] = ['StableDiffusionInstructPix2PixPipeline'] _import_structure['pipeline_stable_diffusion_latent_upscale'] = ['StableDiffusionLatentUpscalePipeline'] _import_structure['pipeline_stable_diffusion_model_editing'] = ['StableDiffusionModelEditingPipeline'] _import_structure['pipeline_stable_diffusion_paradigms'] = ['StableDiffusionParadigmsPipeline'] _import_structure['pipeline_stable_diffusion_upscale'] = ['StableDiffusionUpscalePipeline'] _import_structure['pipeline_stable_unclip'] = ['StableUnCLIPPipeline'] _import_structure['pipeline_stable_unclip_img2img'] = ['StableUnCLIPImg2ImgPipeline'] _import_structure['safety_checker'] = ['StableDiffusionSafetyChecker'] _import_structure['stable_unclip_image_normalizer'] = ['StableUnCLIPImageNormalizer'] try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline _dummy_objects.update({'StableDiffusionImageVariationPipeline': StableDiffusionImageVariationPipeline}) else: _import_structure['pipeline_stable_diffusion_image_variation'] = ['StableDiffusionImageVariationPipeline'] try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionDepth2ImgPipeline _dummy_objects.update({'StableDiffusionDepth2ImgPipeline': StableDiffusionDepth2ImgPipeline}) else: _import_structure['pipeline_stable_diffusion_depth2img'] = ['StableDiffusionDepth2ImgPipeline'] try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_onnx_objects _dummy_objects.update(get_objects_from_module(dummy_onnx_objects)) else: _import_structure['pipeline_onnx_stable_diffusion'] = ['OnnxStableDiffusionPipeline', 'StableDiffusionOnnxPipeline'] _import_structure['pipeline_onnx_stable_diffusion_img2img'] = ['OnnxStableDiffusionImg2ImgPipeline'] _import_structure['pipeline_onnx_stable_diffusion_inpaint'] = ['OnnxStableDiffusionInpaintPipeline'] _import_structure['pipeline_onnx_stable_diffusion_inpaint_legacy'] = ['OnnxStableDiffusionInpaintPipelineLegacy'] _import_structure['pipeline_onnx_stable_diffusion_upscale'] = ['OnnxStableDiffusionUpscalePipeline'] if is_transformers_available() and is_flax_available(): from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState _additional_imports.update({'PNDMSchedulerState': PNDMSchedulerState}) _import_structure['pipeline_flax_stable_diffusion'] = ['FlaxStableDiffusionPipeline'] _import_structure['pipeline_flax_stable_diffusion_img2img'] = ['FlaxStableDiffusionImg2ImgPipeline'] _import_structure['pipeline_flax_stable_diffusion_inpaint'] = ['FlaxStableDiffusionInpaintPipeline'] _import_structure['safety_checker_flax'] = ['FlaxStableDiffusionSafetyChecker'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .clip_image_project_model import CLIPImageProjection from .pipeline_stable_diffusion import StableDiffusionPipeline, StableDiffusionPipelineOutput from .pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_instruct_pix2pix import StableDiffusionInstructPix2PixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_img2img import StableUnCLIPImg2ImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionDepth2ImgPipeline else: from .pipeline_stable_diffusion_depth2img import StableDiffusionDepth2ImgPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_img2img import OnnxStableDiffusionImg2ImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_flax_objects import * else: from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_img2img import FlaxStableDiffusionImg2ImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .pipeline_output import FlaxStableDiffusionPipelineOutput from .safety_checker_flax import FlaxStableDiffusionSafetyChecker else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) for (name, value) in _additional_imports.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/clip_image_project_model.py from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class CLIPImageProjection(ModelMixin, ConfigMixin): @register_to_config def __init__(self, hidden_size: int=768): super().__init__() self.hidden_size = hidden_size self.project = nn.Linear(self.hidden_size, self.hidden_size, bias=False) def forward(self, x): return self.project(x) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py """""" import re from contextlib import nullcontext from io import BytesIO from typing import Dict, Optional, Union import requests import torch import yaml from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection from ...models import AutoencoderKL, ControlNetModel, PriorTransformer, UNet2DConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, UnCLIPScheduler from ...utils import is_accelerate_available, logging from ..latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel from ..paint_by_example import PaintByExampleImageEncoder from ..pipeline_utils import DiffusionPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import set_module_tensor_to_device logger = logging.get_logger(__name__) def shave_segments(path, n_shave_prefix_segments=1): if n_shave_prefix_segments >= 0: return '.'.join(path.split('.')[n_shave_prefix_segments:]) else: return '.'.join(path.split('.')[:n_shave_prefix_segments]) def renew_resnet_paths(old_list, n_shave_prefix_segments=0): mapping = [] for old_item in old_list: new_item = old_item.replace('in_layers.0', 'norm1') new_item = new_item.replace('in_layers.2', 'conv1') new_item = new_item.replace('out_layers.0', 'norm2') new_item = new_item.replace('out_layers.3', 'conv2') new_item = new_item.replace('emb_layers.1', 'time_emb_proj') new_item = new_item.replace('skip_connection', 'conv_shortcut') new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({'old': old_item, 'new': new_item}) return mapping def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0): mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace('nin_shortcut', 'conv_shortcut') new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({'old': old_item, 'new': new_item}) return mapping def renew_attention_paths(old_list, n_shave_prefix_segments=0): mapping = [] for old_item in old_list: new_item = old_item mapping.append({'old': old_item, 'new': new_item}) return mapping def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace('norm.weight', 'group_norm.weight') new_item = new_item.replace('norm.bias', 'group_norm.bias') new_item = new_item.replace('q.weight', 'to_q.weight') new_item = new_item.replace('q.bias', 'to_q.bias') new_item = new_item.replace('k.weight', 'to_k.weight') new_item = new_item.replace('k.bias', 'to_k.bias') new_item = new_item.replace('v.weight', 'to_v.weight') new_item = new_item.replace('v.bias', 'to_v.bias') new_item = new_item.replace('proj_out.weight', 'to_out.0.weight') new_item = new_item.replace('proj_out.bias', 'to_out.0.bias') new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({'old': old_item, 'new': new_item}) return mapping def assign_to_checkpoint(paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None): assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." if attention_paths_to_split is not None: for (path, path_map) in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = (-1, channels) if len(old_tensor.shape) == 3 else -1 num_heads = old_tensor.shape[0] // config['num_head_channels'] // 3 old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) (query, key, value) = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map['query']] = query.reshape(target_shape) checkpoint[path_map['key']] = key.reshape(target_shape) checkpoint[path_map['value']] = value.reshape(target_shape) for path in paths: new_path = path['new'] if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue new_path = new_path.replace('middle_block.0', 'mid_block.resnets.0') new_path = new_path.replace('middle_block.1', 'mid_block.attentions.0') new_path = new_path.replace('middle_block.2', 'mid_block.resnets.1') if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace(replacement['old'], replacement['new']) is_attn_weight = 'proj_attn.weight' in new_path or ('attentions' in new_path and 'to_' in new_path) shape = old_checkpoint[path['old']].shape if is_attn_weight and len(shape) == 3: checkpoint[new_path] = old_checkpoint[path['old']][:, :, 0] elif is_attn_weight and len(shape) == 4: checkpoint[new_path] = old_checkpoint[path['old']][:, :, 0, 0] else: checkpoint[new_path] = old_checkpoint[path['old']] def conv_attn_to_linear(checkpoint): keys = list(checkpoint.keys()) attn_keys = ['query.weight', 'key.weight', 'value.weight'] for key in keys: if '.'.join(key.split('.')[-2:]) in attn_keys: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0, 0] elif 'proj_attn.weight' in key: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0] def create_unet_diffusers_config(original_config, image_size: int, controlnet=False): if controlnet: unet_params = original_config['model']['params']['control_stage_config']['params'] elif 'unet_config' in original_config['model']['params'] and original_config['model']['params']['unet_config'] is not None: unet_params = original_config['model']['params']['unet_config']['params'] else: unet_params = original_config['model']['params']['network_config']['params'] vae_params = original_config['model']['params']['first_stage_config']['params']['ddconfig'] block_out_channels = [unet_params['model_channels'] * mult for mult in unet_params['channel_mult']] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = 'CrossAttnDownBlock2D' if resolution in unet_params['attention_resolutions'] else 'DownBlock2D' down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = 'CrossAttnUpBlock2D' if resolution in unet_params['attention_resolutions'] else 'UpBlock2D' up_block_types.append(block_type) resolution //= 2 if unet_params['transformer_depth'] is not None: transformer_layers_per_block = unet_params['transformer_depth'] if isinstance(unet_params['transformer_depth'], int) else list(unet_params['transformer_depth']) else: transformer_layers_per_block = 1 vae_scale_factor = 2 ** (len(vae_params['ch_mult']) - 1) head_dim = unet_params['num_heads'] if 'num_heads' in unet_params else None use_linear_projection = unet_params['use_linear_in_transformer'] if 'use_linear_in_transformer' in unet_params else False if use_linear_projection: if head_dim is None: head_dim_mult = unet_params['model_channels'] // unet_params['num_head_channels'] head_dim = [head_dim_mult * c for c in list(unet_params['channel_mult'])] class_embed_type = None addition_embed_type = None addition_time_embed_dim = None projection_class_embeddings_input_dim = None context_dim = None if unet_params['context_dim'] is not None: context_dim = unet_params['context_dim'] if isinstance(unet_params['context_dim'], int) else unet_params['context_dim'][0] if 'num_classes' in unet_params: if unet_params['num_classes'] == 'sequential': if context_dim in [2048, 1280]: addition_embed_type = 'text_time' addition_time_embed_dim = 256 else: class_embed_type = 'projection' assert 'adm_in_channels' in unet_params projection_class_embeddings_input_dim = unet_params['adm_in_channels'] config = {'sample_size': image_size // vae_scale_factor, 'in_channels': unet_params['in_channels'], 'down_block_types': tuple(down_block_types), 'block_out_channels': tuple(block_out_channels), 'layers_per_block': unet_params['num_res_blocks'], 'cross_attention_dim': context_dim, 'attention_head_dim': head_dim, 'use_linear_projection': use_linear_projection, 'class_embed_type': class_embed_type, 'addition_embed_type': addition_embed_type, 'addition_time_embed_dim': addition_time_embed_dim, 'projection_class_embeddings_input_dim': projection_class_embeddings_input_dim, 'transformer_layers_per_block': transformer_layers_per_block} if 'disable_self_attentions' in unet_params: config['only_cross_attention'] = unet_params['disable_self_attentions'] if 'num_classes' in unet_params and isinstance(unet_params['num_classes'], int): config['num_class_embeds'] = unet_params['num_classes'] if controlnet: config['conditioning_channels'] = unet_params['hint_channels'] else: config['out_channels'] = unet_params['out_channels'] config['up_block_types'] = tuple(up_block_types) return config def create_vae_diffusers_config(original_config, image_size: int): vae_params = original_config['model']['params']['first_stage_config']['params']['ddconfig'] _ = original_config['model']['params']['first_stage_config']['params']['embed_dim'] block_out_channels = [vae_params['ch'] * mult for mult in vae_params['ch_mult']] down_block_types = ['DownEncoderBlock2D'] * len(block_out_channels) up_block_types = ['UpDecoderBlock2D'] * len(block_out_channels) config = {'sample_size': image_size, 'in_channels': vae_params['in_channels'], 'out_channels': vae_params['out_ch'], 'down_block_types': tuple(down_block_types), 'up_block_types': tuple(up_block_types), 'block_out_channels': tuple(block_out_channels), 'latent_channels': vae_params['z_channels'], 'layers_per_block': vae_params['num_res_blocks']} return config def create_diffusers_schedular(original_config): schedular = DDIMScheduler(num_train_timesteps=original_config['model']['params']['timesteps'], beta_start=original_config['model']['params']['linear_start'], beta_end=original_config['model']['params']['linear_end'], beta_schedule='scaled_linear') return schedular def create_ldm_bert_config(original_config): bert_params = original_config['model']['params']['cond_stage_config']['params'] config = LDMBertConfig(d_model=bert_params.n_embed, encoder_layers=bert_params.n_layer, encoder_ffn_dim=bert_params.n_embed * 4) return config def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False, controlnet=False, skip_extract_state_dict=False): if skip_extract_state_dict: unet_state_dict = checkpoint else: unet_state_dict = {} keys = list(checkpoint.keys()) if controlnet: unet_key = 'control_model.' else: unet_key = 'model.diffusion_model.' if sum((k.startswith('model_ema') for k in keys)) > 100 and extract_ema: logger.warning(f'Checkpoint {path} has both EMA and non-EMA weights.') logger.warning('In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.') for key in keys: if key.startswith('model.diffusion_model'): flat_ema_key = 'model_ema.' + ''.join(key.split('.')[1:]) unet_state_dict[key.replace(unet_key, '')] = checkpoint.pop(flat_ema_key) else: if sum((k.startswith('model_ema') for k in keys)) > 100: logger.warning('In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA weights (usually better for inference), please make sure to add the `--extract_ema` flag.') for key in keys: if key.startswith(unet_key): unet_state_dict[key.replace(unet_key, '')] = checkpoint.pop(key) new_checkpoint = {} new_checkpoint['time_embedding.linear_1.weight'] = unet_state_dict['time_embed.0.weight'] new_checkpoint['time_embedding.linear_1.bias'] = unet_state_dict['time_embed.0.bias'] new_checkpoint['time_embedding.linear_2.weight'] = unet_state_dict['time_embed.2.weight'] new_checkpoint['time_embedding.linear_2.bias'] = unet_state_dict['time_embed.2.bias'] if config['class_embed_type'] is None: ... elif config['class_embed_type'] == 'timestep' or config['class_embed_type'] == 'projection': new_checkpoint['class_embedding.linear_1.weight'] = unet_state_dict['label_emb.0.0.weight'] new_checkpoint['class_embedding.linear_1.bias'] = unet_state_dict['label_emb.0.0.bias'] new_checkpoint['class_embedding.linear_2.weight'] = unet_state_dict['label_emb.0.2.weight'] new_checkpoint['class_embedding.linear_2.bias'] = unet_state_dict['label_emb.0.2.bias'] else: raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}") if config['addition_embed_type'] == 'text_time': new_checkpoint['add_embedding.linear_1.weight'] = unet_state_dict['label_emb.0.0.weight'] new_checkpoint['add_embedding.linear_1.bias'] = unet_state_dict['label_emb.0.0.bias'] new_checkpoint['add_embedding.linear_2.weight'] = unet_state_dict['label_emb.0.2.weight'] new_checkpoint['add_embedding.linear_2.bias'] = unet_state_dict['label_emb.0.2.bias'] if 'num_class_embeds' in config: if config['num_class_embeds'] is not None and 'label_emb.weight' in unet_state_dict: new_checkpoint['class_embedding.weight'] = unet_state_dict['label_emb.weight'] new_checkpoint['conv_in.weight'] = unet_state_dict['input_blocks.0.0.weight'] new_checkpoint['conv_in.bias'] = unet_state_dict['input_blocks.0.0.bias'] if not controlnet: new_checkpoint['conv_norm_out.weight'] = unet_state_dict['out.0.weight'] new_checkpoint['conv_norm_out.bias'] = unet_state_dict['out.0.bias'] new_checkpoint['conv_out.weight'] = unet_state_dict['out.2.weight'] new_checkpoint['conv_out.bias'] = unet_state_dict['out.2.bias'] num_input_blocks = len({'.'.join(layer.split('.')[:2]) for layer in unet_state_dict if 'input_blocks' in layer}) input_blocks = {layer_id: [key for key in unet_state_dict if f'input_blocks.{layer_id}' in key] for layer_id in range(num_input_blocks)} num_middle_blocks = len({'.'.join(layer.split('.')[:2]) for layer in unet_state_dict if 'middle_block' in layer}) middle_blocks = {layer_id: [key for key in unet_state_dict if f'middle_block.{layer_id}' in key] for layer_id in range(num_middle_blocks)} num_output_blocks = len({'.'.join(layer.split('.')[:2]) for layer in unet_state_dict if 'output_blocks' in layer}) output_blocks = {layer_id: [key for key in unet_state_dict if f'output_blocks.{layer_id}' in key] for layer_id in range(num_output_blocks)} for i in range(1, num_input_blocks): block_id = (i - 1) // (config['layers_per_block'] + 1) layer_in_block_id = (i - 1) % (config['layers_per_block'] + 1) resnets = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key and f'input_blocks.{i}.0.op' not in key] attentions = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if f'input_blocks.{i}.0.op.weight' in unet_state_dict: new_checkpoint[f'down_blocks.{block_id}.downsamplers.0.conv.weight'] = unet_state_dict.pop(f'input_blocks.{i}.0.op.weight') new_checkpoint[f'down_blocks.{block_id}.downsamplers.0.conv.bias'] = unet_state_dict.pop(f'input_blocks.{i}.0.op.bias') paths = renew_resnet_paths(resnets) meta_path = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config) if len(attentions): paths = renew_attention_paths(attentions) meta_path = {'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}'} assign_to_checkpoint(paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config) resnet_0 = middle_blocks[0] attentions = middle_blocks[1] resnet_1 = middle_blocks[2] resnet_0_paths = renew_resnet_paths(resnet_0) assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config) resnet_1_paths = renew_resnet_paths(resnet_1) assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config) attentions_paths = renew_attention_paths(attentions) meta_path = {'old': 'middle_block.1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config) for i in range(num_output_blocks): block_id = i // (config['layers_per_block'] + 1) layer_in_block_id = i % (config['layers_per_block'] + 1) output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] output_block_list = {} for layer in output_block_layers: (layer_id, layer_name) = (layer.split('.')[0], shave_segments(layer, 1)) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key] attentions = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key] resnet_0_paths = renew_resnet_paths(resnets) paths = renew_resnet_paths(resnets) meta_path = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config) output_block_list = {k: sorted(v) for (k, v) in sorted(output_block_list.items())} if ['conv.bias', 'conv.weight'] in output_block_list.values(): index = list(output_block_list.values()).index(['conv.bias', 'conv.weight']) new_checkpoint[f'up_blocks.{block_id}.upsamplers.0.conv.weight'] = unet_state_dict[f'output_blocks.{i}.{index}.conv.weight'] new_checkpoint[f'up_blocks.{block_id}.upsamplers.0.conv.bias'] = unet_state_dict[f'output_blocks.{i}.{index}.conv.bias'] if len(attentions) == 2: attentions = [] if len(attentions): paths = renew_attention_paths(attentions) meta_path = {'old': f'output_blocks.{i}.1', 'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}'} assign_to_checkpoint(paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config) else: resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) for path in resnet_0_paths: old_path = '.'.join(['output_blocks', str(i), path['old']]) new_path = '.'.join(['up_blocks', str(block_id), 'resnets', str(layer_in_block_id), path['new']]) new_checkpoint[new_path] = unet_state_dict[old_path] if controlnet: orig_index = 0 new_checkpoint['controlnet_cond_embedding.conv_in.weight'] = unet_state_dict.pop(f'input_hint_block.{orig_index}.weight') new_checkpoint['controlnet_cond_embedding.conv_in.bias'] = unet_state_dict.pop(f'input_hint_block.{orig_index}.bias') orig_index += 2 diffusers_index = 0 while diffusers_index < 6: new_checkpoint[f'controlnet_cond_embedding.blocks.{diffusers_index}.weight'] = unet_state_dict.pop(f'input_hint_block.{orig_index}.weight') new_checkpoint[f'controlnet_cond_embedding.blocks.{diffusers_index}.bias'] = unet_state_dict.pop(f'input_hint_block.{orig_index}.bias') diffusers_index += 1 orig_index += 2 new_checkpoint['controlnet_cond_embedding.conv_out.weight'] = unet_state_dict.pop(f'input_hint_block.{orig_index}.weight') new_checkpoint['controlnet_cond_embedding.conv_out.bias'] = unet_state_dict.pop(f'input_hint_block.{orig_index}.bias') for i in range(num_input_blocks): new_checkpoint[f'controlnet_down_blocks.{i}.weight'] = unet_state_dict.pop(f'zero_convs.{i}.0.weight') new_checkpoint[f'controlnet_down_blocks.{i}.bias'] = unet_state_dict.pop(f'zero_convs.{i}.0.bias') new_checkpoint['controlnet_mid_block.weight'] = unet_state_dict.pop('middle_block_out.0.weight') new_checkpoint['controlnet_mid_block.bias'] = unet_state_dict.pop('middle_block_out.0.bias') return new_checkpoint def convert_ldm_vae_checkpoint(checkpoint, config): vae_state_dict = {} keys = list(checkpoint.keys()) vae_key = 'first_stage_model.' if any((k.startswith('first_stage_model.') for k in keys)) else '' for key in keys: if key.startswith(vae_key): vae_state_dict[key.replace(vae_key, '')] = checkpoint.get(key) new_checkpoint = {} new_checkpoint['encoder.conv_in.weight'] = vae_state_dict['encoder.conv_in.weight'] new_checkpoint['encoder.conv_in.bias'] = vae_state_dict['encoder.conv_in.bias'] new_checkpoint['encoder.conv_out.weight'] = vae_state_dict['encoder.conv_out.weight'] new_checkpoint['encoder.conv_out.bias'] = vae_state_dict['encoder.conv_out.bias'] new_checkpoint['encoder.conv_norm_out.weight'] = vae_state_dict['encoder.norm_out.weight'] new_checkpoint['encoder.conv_norm_out.bias'] = vae_state_dict['encoder.norm_out.bias'] new_checkpoint['decoder.conv_in.weight'] = vae_state_dict['decoder.conv_in.weight'] new_checkpoint['decoder.conv_in.bias'] = vae_state_dict['decoder.conv_in.bias'] new_checkpoint['decoder.conv_out.weight'] = vae_state_dict['decoder.conv_out.weight'] new_checkpoint['decoder.conv_out.bias'] = vae_state_dict['decoder.conv_out.bias'] new_checkpoint['decoder.conv_norm_out.weight'] = vae_state_dict['decoder.norm_out.weight'] new_checkpoint['decoder.conv_norm_out.bias'] = vae_state_dict['decoder.norm_out.bias'] new_checkpoint['quant_conv.weight'] = vae_state_dict['quant_conv.weight'] new_checkpoint['quant_conv.bias'] = vae_state_dict['quant_conv.bias'] new_checkpoint['post_quant_conv.weight'] = vae_state_dict['post_quant_conv.weight'] new_checkpoint['post_quant_conv.bias'] = vae_state_dict['post_quant_conv.bias'] num_down_blocks = len({'.'.join(layer.split('.')[:3]) for layer in vae_state_dict if 'encoder.down' in layer}) down_blocks = {layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(num_down_blocks)} num_up_blocks = len({'.'.join(layer.split('.')[:3]) for layer in vae_state_dict if 'decoder.up' in layer}) up_blocks = {layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(num_up_blocks)} for i in range(num_down_blocks): resnets = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key] if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict: new_checkpoint[f'encoder.down_blocks.{i}.downsamplers.0.conv.weight'] = vae_state_dict.pop(f'encoder.down.{i}.downsample.conv.weight') new_checkpoint[f'encoder.down_blocks.{i}.downsamplers.0.conv.bias'] = vae_state_dict.pop(f'encoder.down.{i}.downsample.conv.bias') paths = renew_vae_resnet_paths(resnets) meta_path = {'old': f'down.{i}.block', 'new': f'down_blocks.{i}.resnets'} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if 'encoder.mid.block' in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key] paths = renew_vae_resnet_paths(resnets) meta_path = {'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if 'encoder.mid.attn' in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key] if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict: new_checkpoint[f'decoder.up_blocks.{i}.upsamplers.0.conv.weight'] = vae_state_dict[f'decoder.up.{block_id}.upsample.conv.weight'] new_checkpoint[f'decoder.up_blocks.{i}.upsamplers.0.conv.bias'] = vae_state_dict[f'decoder.up.{block_id}.upsample.conv.bias'] paths = renew_vae_resnet_paths(resnets) meta_path = {'old': f'up.{block_id}.block', 'new': f'up_blocks.{i}.resnets'} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if 'decoder.mid.block' in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key] paths = renew_vae_resnet_paths(resnets) meta_path = {'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if 'decoder.mid.attn' in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) return new_checkpoint def convert_ldm_bert_checkpoint(checkpoint, config): def _copy_attn_layer(hf_attn_layer, pt_attn_layer): hf_attn_layer.q_proj.weight.data = pt_attn_layer.to_q.weight hf_attn_layer.k_proj.weight.data = pt_attn_layer.to_k.weight hf_attn_layer.v_proj.weight.data = pt_attn_layer.to_v.weight hf_attn_layer.out_proj.weight = pt_attn_layer.to_out.weight hf_attn_layer.out_proj.bias = pt_attn_layer.to_out.bias def _copy_linear(hf_linear, pt_linear): hf_linear.weight = pt_linear.weight hf_linear.bias = pt_linear.bias def _copy_layer(hf_layer, pt_layer): _copy_linear(hf_layer.self_attn_layer_norm, pt_layer[0][0]) _copy_linear(hf_layer.final_layer_norm, pt_layer[1][0]) _copy_attn_layer(hf_layer.self_attn, pt_layer[0][1]) pt_mlp = pt_layer[1][1] _copy_linear(hf_layer.fc1, pt_mlp.net[0][0]) _copy_linear(hf_layer.fc2, pt_mlp.net[2]) def _copy_layers(hf_layers, pt_layers): for (i, hf_layer) in enumerate(hf_layers): if i != 0: i += i pt_layer = pt_layers[i:i + 2] _copy_layer(hf_layer, pt_layer) hf_model = LDMBertModel(config).eval() hf_model.model.embed_tokens.weight = checkpoint.transformer.token_emb.weight hf_model.model.embed_positions.weight.data = checkpoint.transformer.pos_emb.emb.weight _copy_linear(hf_model.model.layer_norm, checkpoint.transformer.norm) _copy_layers(hf_model.model.layers, checkpoint.transformer.attn_layers.layers) _copy_linear(hf_model.to_logits, checkpoint.transformer.to_logits) return hf_model def convert_ldm_clip_checkpoint(checkpoint, local_files_only=False, text_encoder=None): if text_encoder is None: config_name = 'openai/clip-vit-large-patch14' try: config = CLIPTextConfig.from_pretrained(config_name, local_files_only=local_files_only) except Exception: raise ValueError(f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: 'openai/clip-vit-large-patch14'.") ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): text_model = CLIPTextModel(config) else: text_model = text_encoder keys = list(checkpoint.keys()) text_model_dict = {} remove_prefixes = ['cond_stage_model.transformer', 'conditioner.embedders.0.transformer'] for key in keys: for prefix in remove_prefixes: if key.startswith(prefix): text_model_dict[key[len(prefix + '.'):]] = checkpoint[key] if is_accelerate_available(): for (param_name, param) in text_model_dict.items(): set_module_tensor_to_device(text_model, param_name, 'cpu', value=param) else: if not (hasattr(text_model, 'embeddings') and hasattr(text_model.embeddings.position_ids)): text_model_dict.pop('text_model.embeddings.position_ids', None) text_model.load_state_dict(text_model_dict) return text_model textenc_conversion_lst = [('positional_embedding', 'text_model.embeddings.position_embedding.weight'), ('token_embedding.weight', 'text_model.embeddings.token_embedding.weight'), ('ln_final.weight', 'text_model.final_layer_norm.weight'), ('ln_final.bias', 'text_model.final_layer_norm.bias'), ('text_projection', 'text_projection.weight')] textenc_conversion_map = {x[0]: x[1] for x in textenc_conversion_lst} textenc_transformer_conversion_lst = [('resblocks.', 'text_model.encoder.layers.'), ('ln_1', 'layer_norm1'), ('ln_2', 'layer_norm2'), ('.c_fc.', '.fc1.'), ('.c_proj.', '.fc2.'), ('.attn', '.self_attn'), ('ln_final.', 'transformer.text_model.final_layer_norm.'), ('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'), ('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight')] protected = {re.escape(x[0]): x[1] for x in textenc_transformer_conversion_lst} textenc_pattern = re.compile('|'.join(protected.keys())) def convert_paint_by_example_checkpoint(checkpoint, local_files_only=False): config = CLIPVisionConfig.from_pretrained('openai/clip-vit-large-patch14', local_files_only=local_files_only) model = PaintByExampleImageEncoder(config) keys = list(checkpoint.keys()) text_model_dict = {} for key in keys: if key.startswith('cond_stage_model.transformer'): text_model_dict[key[len('cond_stage_model.transformer.'):]] = checkpoint[key] model.model.load_state_dict(text_model_dict) keys_mapper = {k[len('cond_stage_model.mapper.res'):]: v for (k, v) in checkpoint.items() if k.startswith('cond_stage_model.mapper')} MAPPING = {'attn.c_qkv': ['attn1.to_q', 'attn1.to_k', 'attn1.to_v'], 'attn.c_proj': ['attn1.to_out.0'], 'ln_1': ['norm1'], 'ln_2': ['norm3'], 'mlp.c_fc': ['ff.net.0.proj'], 'mlp.c_proj': ['ff.net.2']} mapped_weights = {} for (key, value) in keys_mapper.items(): prefix = key[:len('blocks.i')] suffix = key.split(prefix)[-1].split('.')[-1] name = key.split(prefix)[-1].split(suffix)[0][1:-1] mapped_names = MAPPING[name] num_splits = len(mapped_names) for (i, mapped_name) in enumerate(mapped_names): new_name = '.'.join([prefix, mapped_name, suffix]) shape = value.shape[0] // num_splits mapped_weights[new_name] = value[i * shape:(i + 1) * shape] model.mapper.load_state_dict(mapped_weights) model.final_layer_norm.load_state_dict({'bias': checkpoint['cond_stage_model.final_ln.bias'], 'weight': checkpoint['cond_stage_model.final_ln.weight']}) model.proj_out.load_state_dict({'bias': checkpoint['proj_out.bias'], 'weight': checkpoint['proj_out.weight']}) model.uncond_vector.data = torch.nn.Parameter(checkpoint['learnable_vector']) return model def convert_open_clip_checkpoint(checkpoint, config_name, prefix='cond_stage_model.model.', has_projection=False, local_files_only=False, **config_kwargs): try: config = CLIPTextConfig.from_pretrained(config_name, **config_kwargs, local_files_only=local_files_only) except Exception: raise ValueError(f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: '{config_name}'.") ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): text_model = CLIPTextModelWithProjection(config) if has_projection else CLIPTextModel(config) keys = list(checkpoint.keys()) keys_to_ignore = [] if config_name == 'stabilityai/stable-diffusion-2' and config.num_hidden_layers == 23: keys_to_ignore += [k for k in keys if k.startswith('cond_stage_model.model.transformer.resblocks.23')] keys_to_ignore += ['cond_stage_model.model.text_projection'] text_model_dict = {} if prefix + 'text_projection' in checkpoint: d_model = int(checkpoint[prefix + 'text_projection'].shape[0]) else: d_model = 1024 text_model_dict['text_model.embeddings.position_ids'] = text_model.text_model.embeddings.get_buffer('position_ids') for key in keys: if key in keys_to_ignore: continue if key[len(prefix):] in textenc_conversion_map: if key.endswith('text_projection'): value = checkpoint[key].T.contiguous() else: value = checkpoint[key] text_model_dict[textenc_conversion_map[key[len(prefix):]]] = value if key.startswith(prefix + 'transformer.'): new_key = key[len(prefix + 'transformer.'):] if new_key.endswith('.in_proj_weight'): new_key = new_key[:-len('.in_proj_weight')] new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) text_model_dict[new_key + '.q_proj.weight'] = checkpoint[key][:d_model, :] text_model_dict[new_key + '.k_proj.weight'] = checkpoint[key][d_model:d_model * 2, :] text_model_dict[new_key + '.v_proj.weight'] = checkpoint[key][d_model * 2:, :] elif new_key.endswith('.in_proj_bias'): new_key = new_key[:-len('.in_proj_bias')] new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) text_model_dict[new_key + '.q_proj.bias'] = checkpoint[key][:d_model] text_model_dict[new_key + '.k_proj.bias'] = checkpoint[key][d_model:d_model * 2] text_model_dict[new_key + '.v_proj.bias'] = checkpoint[key][d_model * 2:] else: new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) text_model_dict[new_key] = checkpoint[key] if is_accelerate_available(): for (param_name, param) in text_model_dict.items(): set_module_tensor_to_device(text_model, param_name, 'cpu', value=param) else: if not (hasattr(text_model, 'embeddings') and hasattr(text_model.embeddings.position_ids)): text_model_dict.pop('text_model.embeddings.position_ids', None) text_model.load_state_dict(text_model_dict) return text_model def stable_unclip_image_encoder(original_config, local_files_only=False): image_embedder_config = original_config['model']['params']['embedder_config'] sd_clip_image_embedder_class = image_embedder_config['target'] sd_clip_image_embedder_class = sd_clip_image_embedder_class.split('.')[-1] if sd_clip_image_embedder_class == 'ClipImageEmbedder': clip_model_name = image_embedder_config.params.model if clip_model_name == 'ViT-L/14': feature_extractor = CLIPImageProcessor() image_encoder = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14', local_files_only=local_files_only) else: raise NotImplementedError(f'Unknown CLIP checkpoint name in stable diffusion checkpoint {clip_model_name}') elif sd_clip_image_embedder_class == 'FrozenOpenCLIPImageEmbedder': feature_extractor = CLIPImageProcessor() image_encoder = CLIPVisionModelWithProjection.from_pretrained('laion/CLIP-ViT-H-14-laion2B-s32B-b79K', local_files_only=local_files_only) else: raise NotImplementedError(f'Unknown CLIP image embedder class in stable diffusion checkpoint {sd_clip_image_embedder_class}') return (feature_extractor, image_encoder) def stable_unclip_image_noising_components(original_config, clip_stats_path: Optional[str]=None, device: Optional[str]=None): noise_aug_config = original_config['model']['params']['noise_aug_config'] noise_aug_class = noise_aug_config['target'] noise_aug_class = noise_aug_class.split('.')[-1] if noise_aug_class == 'CLIPEmbeddingNoiseAugmentation': noise_aug_config = noise_aug_config.params embedding_dim = noise_aug_config.timestep_dim max_noise_level = noise_aug_config.noise_schedule_config.timesteps beta_schedule = noise_aug_config.noise_schedule_config.beta_schedule image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedding_dim) image_noising_scheduler = DDPMScheduler(num_train_timesteps=max_noise_level, beta_schedule=beta_schedule) if 'clip_stats_path' in noise_aug_config: if clip_stats_path is None: raise ValueError('This stable unclip config requires a `clip_stats_path`') (clip_mean, clip_std) = torch.load(clip_stats_path, map_location=device) clip_mean = clip_mean[None, :] clip_std = clip_std[None, :] clip_stats_state_dict = {'mean': clip_mean, 'std': clip_std} image_normalizer.load_state_dict(clip_stats_state_dict) else: raise NotImplementedError(f'Unknown noise augmentor class: {noise_aug_class}') return (image_normalizer, image_noising_scheduler) def convert_controlnet_checkpoint(checkpoint, original_config, checkpoint_path, image_size, upcast_attention, extract_ema, use_linear_projection=None, cross_attention_dim=None): ctrlnet_config = create_unet_diffusers_config(original_config, image_size=image_size, controlnet=True) ctrlnet_config['upcast_attention'] = upcast_attention ctrlnet_config.pop('sample_size') if use_linear_projection is not None: ctrlnet_config['use_linear_projection'] = use_linear_projection if cross_attention_dim is not None: ctrlnet_config['cross_attention_dim'] = cross_attention_dim ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): controlnet = ControlNetModel(**ctrlnet_config) if 'time_embed.0.weight' in checkpoint: skip_extract_state_dict = True else: skip_extract_state_dict = False converted_ctrl_checkpoint = convert_ldm_unet_checkpoint(checkpoint, ctrlnet_config, path=checkpoint_path, extract_ema=extract_ema, controlnet=True, skip_extract_state_dict=skip_extract_state_dict) if is_accelerate_available(): for (param_name, param) in converted_ctrl_checkpoint.items(): set_module_tensor_to_device(controlnet, param_name, 'cpu', value=param) else: controlnet.load_state_dict(converted_ctrl_checkpoint) return controlnet def download_from_original_stable_diffusion_ckpt(checkpoint_path_or_dict: Union[str, Dict[str, torch.Tensor]], original_config_file: str=None, image_size: Optional[int]=None, prediction_type: str=None, model_type: str=None, extract_ema: bool=False, scheduler_type: str='pndm', num_in_channels: Optional[int]=None, upcast_attention: Optional[bool]=None, device: str=None, from_safetensors: bool=False, stable_unclip: Optional[str]=None, stable_unclip_prior: Optional[str]=None, clip_stats_path: Optional[str]=None, controlnet: Optional[bool]=None, adapter: Optional[bool]=None, load_safety_checker: bool=True, safety_checker: Optional[StableDiffusionSafetyChecker]=None, feature_extractor: Optional[AutoFeatureExtractor]=None, pipeline_class: DiffusionPipeline=None, local_files_only=False, vae_path=None, vae=None, text_encoder=None, text_encoder_2=None, tokenizer=None, tokenizer_2=None, config_files=None) -> DiffusionPipeline: from diffusers import LDMTextToImagePipeline, PaintByExamplePipeline, StableDiffusionControlNetPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline, StableDiffusionUpscalePipeline, StableDiffusionXLControlNetInpaintPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLPipeline, StableUnCLIPImg2ImgPipeline, StableUnCLIPPipeline if prediction_type == 'v-prediction': prediction_type = 'v_prediction' if isinstance(checkpoint_path_or_dict, str): if from_safetensors: from safetensors.torch import load_file as safe_load checkpoint = safe_load(checkpoint_path_or_dict, device='cpu') elif device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' checkpoint = torch.load(checkpoint_path_or_dict, map_location=device) else: checkpoint = torch.load(checkpoint_path_or_dict, map_location=device) elif isinstance(checkpoint_path_or_dict, dict): checkpoint = checkpoint_path_or_dict if 'global_step' in checkpoint: global_step = checkpoint['global_step'] else: logger.debug('global_step key not found in model') global_step = None while 'state_dict' in checkpoint: checkpoint = checkpoint['state_dict'] if original_config_file is None: key_name_v2_1 = 'model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight' key_name_sd_xl_base = 'conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias' key_name_sd_xl_refiner = 'conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias' is_upscale = pipeline_class == StableDiffusionUpscalePipeline config_url = None if config_files is not None and 'v1' in config_files: original_config_file = config_files['v1'] else: config_url = 'https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' if key_name_v2_1 in checkpoint and checkpoint[key_name_v2_1].shape[-1] == 1024: if config_files is not None and 'v2' in config_files: original_config_file = config_files['v2'] else: config_url = 'https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml' if global_step == 110000: upcast_attention = True elif key_name_sd_xl_base in checkpoint: if config_files is not None and 'xl' in config_files: original_config_file = config_files['xl'] else: config_url = 'https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml' elif key_name_sd_xl_refiner in checkpoint: if config_files is not None and 'xl_refiner' in config_files: original_config_file = config_files['xl_refiner'] else: config_url = 'https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml' if is_upscale: config_url = 'https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml' if config_url is not None: original_config_file = BytesIO(requests.get(config_url).content) else: with open(original_config_file, 'r') as f: original_config_file = f.read() else: with open(original_config_file, 'r') as f: original_config_file = f.read() original_config = yaml.safe_load(original_config_file) if model_type is None and 'cond_stage_config' in original_config['model']['params'] and (original_config['model']['params']['cond_stage_config'] is not None): model_type = original_config['model']['params']['cond_stage_config']['target'].split('.')[-1] logger.debug(f'no `model_type` given, `model_type` inferred as: {model_type}') elif model_type is None and original_config['model']['params']['network_config'] is not None: if original_config['model']['params']['network_config']['params']['context_dim'] == 2048: model_type = 'SDXL' else: model_type = 'SDXL-Refiner' if image_size is None: image_size = 1024 if pipeline_class is None: if model_type not in ['SDXL', 'SDXL-Refiner']: pipeline_class = StableDiffusionPipeline if not controlnet else StableDiffusionControlNetPipeline else: pipeline_class = StableDiffusionXLPipeline if model_type == 'SDXL' else StableDiffusionXLImg2ImgPipeline if num_in_channels is None and pipeline_class in [StableDiffusionInpaintPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLControlNetInpaintPipeline]: num_in_channels = 9 if num_in_channels is None and pipeline_class == StableDiffusionUpscalePipeline: num_in_channels = 7 elif num_in_channels is None: num_in_channels = 4 if 'unet_config' in original_config['model']['params']: original_config['model']['params']['unet_config']['params']['in_channels'] = num_in_channels elif 'network_config' in original_config['model']['params']: original_config['model']['params']['network_config']['params']['in_channels'] = num_in_channels if 'parameterization' in original_config['model']['params'] and original_config['model']['params']['parameterization'] == 'v': if prediction_type is None: prediction_type = 'epsilon' if global_step == 875000 else 'v_prediction' if image_size is None: image_size = 512 if global_step == 875000 else 768 else: if prediction_type is None: prediction_type = 'epsilon' if image_size is None: image_size = 512 if controlnet is None and 'control_stage_config' in original_config['model']['params']: path = checkpoint_path_or_dict if isinstance(checkpoint_path_or_dict, str) else '' controlnet = convert_controlnet_checkpoint(checkpoint, original_config, path, image_size, upcast_attention, extract_ema) if 'timesteps' in original_config['model']['params']: num_train_timesteps = original_config['model']['params']['timesteps'] else: num_train_timesteps = 1000 if model_type in ['SDXL', 'SDXL-Refiner']: scheduler_dict = {'beta_schedule': 'scaled_linear', 'beta_start': 0.00085, 'beta_end': 0.012, 'interpolation_type': 'linear', 'num_train_timesteps': num_train_timesteps, 'prediction_type': 'epsilon', 'sample_max_value': 1.0, 'set_alpha_to_one': False, 'skip_prk_steps': True, 'steps_offset': 1, 'timestep_spacing': 'leading'} scheduler = EulerDiscreteScheduler.from_config(scheduler_dict) scheduler_type = 'euler' else: if 'linear_start' in original_config['model']['params']: beta_start = original_config['model']['params']['linear_start'] else: beta_start = 0.02 if 'linear_end' in original_config['model']['params']: beta_end = original_config['model']['params']['linear_end'] else: beta_end = 0.085 scheduler = DDIMScheduler(beta_end=beta_end, beta_schedule='scaled_linear', beta_start=beta_start, num_train_timesteps=num_train_timesteps, steps_offset=1, clip_sample=False, set_alpha_to_one=False, prediction_type=prediction_type) scheduler.register_to_config(clip_sample=False) if scheduler_type == 'pndm': config = dict(scheduler.config) config['skip_prk_steps'] = True scheduler = PNDMScheduler.from_config(config) elif scheduler_type == 'lms': scheduler = LMSDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == 'heun': scheduler = HeunDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == 'euler': scheduler = EulerDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == 'euler-ancestral': scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config) elif scheduler_type == 'dpm': scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) elif scheduler_type == 'ddim': scheduler = scheduler else: raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!") if pipeline_class == StableDiffusionUpscalePipeline: image_size = original_config['model']['params']['unet_config']['params']['image_size'] unet_config = create_unet_diffusers_config(original_config, image_size=image_size) unet_config['upcast_attention'] = upcast_attention path = checkpoint_path_or_dict if isinstance(checkpoint_path_or_dict, str) else '' converted_unet_checkpoint = convert_ldm_unet_checkpoint(checkpoint, unet_config, path=path, extract_ema=extract_ema) ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): unet = UNet2DConditionModel(**unet_config) if is_accelerate_available(): if model_type not in ['SDXL', 'SDXL-Refiner']: for (param_name, param) in converted_unet_checkpoint.items(): set_module_tensor_to_device(unet, param_name, 'cpu', value=param) else: unet.load_state_dict(converted_unet_checkpoint) if vae_path is None and vae is None: vae_config = create_vae_diffusers_config(original_config, image_size=image_size) converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config) if 'model' in original_config and 'params' in original_config['model'] and ('scale_factor' in original_config['model']['params']): vae_scaling_factor = original_config['model']['params']['scale_factor'] else: vae_scaling_factor = 0.18215 vae_config['scaling_factor'] = vae_scaling_factor ctx = init_empty_weights if is_accelerate_available() else nullcontext with ctx(): vae = AutoencoderKL(**vae_config) if is_accelerate_available(): for (param_name, param) in converted_vae_checkpoint.items(): set_module_tensor_to_device(vae, param_name, 'cpu', value=param) else: vae.load_state_dict(converted_vae_checkpoint) elif vae is None: vae = AutoencoderKL.from_pretrained(vae_path, local_files_only=local_files_only) if model_type == 'FrozenOpenCLIPEmbedder': config_name = 'stabilityai/stable-diffusion-2' config_kwargs = {'subfolder': 'text_encoder'} if text_encoder is None: text_model = convert_open_clip_checkpoint(checkpoint, config_name, local_files_only=local_files_only, **config_kwargs) else: text_model = text_encoder try: tokenizer = CLIPTokenizer.from_pretrained('stabilityai/stable-diffusion-2', subfolder='tokenizer', local_files_only=local_files_only) except Exception: raise ValueError(f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'stabilityai/stable-diffusion-2'.") if stable_unclip is None: if controlnet: pipe = pipeline_class(vae=vae, text_encoder=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, controlnet=controlnet, safety_checker=safety_checker, feature_extractor=feature_extractor) if hasattr(pipe, 'requires_safety_checker'): pipe.requires_safety_checker = False elif pipeline_class == StableDiffusionUpscalePipeline: scheduler = DDIMScheduler.from_pretrained('stabilityai/stable-diffusion-x4-upscaler', subfolder='scheduler') low_res_scheduler = DDPMScheduler.from_pretrained('stabilityai/stable-diffusion-x4-upscaler', subfolder='low_res_scheduler') pipe = pipeline_class(vae=vae, text_encoder=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, low_res_scheduler=low_res_scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) else: pipe = pipeline_class(vae=vae, text_encoder=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) if hasattr(pipe, 'requires_safety_checker'): pipe.requires_safety_checker = False else: (image_normalizer, image_noising_scheduler) = stable_unclip_image_noising_components(original_config, clip_stats_path=clip_stats_path, device=device) if stable_unclip == 'img2img': (feature_extractor, image_encoder) = stable_unclip_image_encoder(original_config) pipe = StableUnCLIPImg2ImgPipeline(feature_extractor=feature_extractor, image_encoder=image_encoder, image_normalizer=image_normalizer, image_noising_scheduler=image_noising_scheduler, tokenizer=tokenizer, text_encoder=text_model, unet=unet, scheduler=scheduler, vae=vae) elif stable_unclip == 'txt2img': if stable_unclip_prior is None or stable_unclip_prior == 'karlo': karlo_model = 'kakaobrain/karlo-v1-alpha' prior = PriorTransformer.from_pretrained(karlo_model, subfolder='prior', local_files_only=local_files_only) try: prior_tokenizer = CLIPTokenizer.from_pretrained('openai/clip-vit-large-patch14', local_files_only=local_files_only) except Exception: raise ValueError(f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'.") prior_text_model = CLIPTextModelWithProjection.from_pretrained('openai/clip-vit-large-patch14', local_files_only=local_files_only) prior_scheduler = UnCLIPScheduler.from_pretrained(karlo_model, subfolder='prior_scheduler', local_files_only=local_files_only) prior_scheduler = DDPMScheduler.from_config(prior_scheduler.config) else: raise NotImplementedError(f'unknown prior for stable unclip model: {stable_unclip_prior}') pipe = StableUnCLIPPipeline(prior_tokenizer=prior_tokenizer, prior_text_encoder=prior_text_model, prior=prior, prior_scheduler=prior_scheduler, image_normalizer=image_normalizer, image_noising_scheduler=image_noising_scheduler, tokenizer=tokenizer, text_encoder=text_model, unet=unet, scheduler=scheduler, vae=vae) else: raise NotImplementedError(f'unknown `stable_unclip` type: {stable_unclip}') elif model_type == 'PaintByExample': vision_model = convert_paint_by_example_checkpoint(checkpoint) try: tokenizer = CLIPTokenizer.from_pretrained('openai/clip-vit-large-patch14', local_files_only=local_files_only) except Exception: raise ValueError(f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'.") try: feature_extractor = AutoFeatureExtractor.from_pretrained('CompVis/stable-diffusion-safety-checker', local_files_only=local_files_only) except Exception: raise ValueError(f"With local_files_only set to {local_files_only}, you must first locally save the feature_extractor in the following path: 'CompVis/stable-diffusion-safety-checker'.") pipe = PaintByExamplePipeline(vae=vae, image_encoder=vision_model, unet=unet, scheduler=scheduler, safety_checker=None, feature_extractor=feature_extractor) elif model_type == 'FrozenCLIPEmbedder': text_model = convert_ldm_clip_checkpoint(checkpoint, local_files_only=local_files_only, text_encoder=text_encoder) try: tokenizer = CLIPTokenizer.from_pretrained('openai/clip-vit-large-patch14', local_files_only=local_files_only) if tokenizer is None else tokenizer except Exception: raise ValueError(f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'.") if load_safety_checker: safety_checker = StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker', local_files_only=local_files_only) feature_extractor = AutoFeatureExtractor.from_pretrained('CompVis/stable-diffusion-safety-checker', local_files_only=local_files_only) if controlnet: pipe = pipeline_class(vae=vae, text_encoder=text_model, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) else: pipe = pipeline_class(vae=vae, text_encoder=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) elif model_type in ['SDXL', 'SDXL-Refiner']: is_refiner = model_type == 'SDXL-Refiner' if is_refiner is False and tokenizer is None: try: tokenizer = CLIPTokenizer.from_pretrained('openai/clip-vit-large-patch14', local_files_only=local_files_only) except Exception: raise ValueError(f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'.") if is_refiner is False and text_encoder is None: text_encoder = convert_ldm_clip_checkpoint(checkpoint, local_files_only=local_files_only) if tokenizer_2 is None: try: tokenizer_2 = CLIPTokenizer.from_pretrained('laion/CLIP-ViT-bigG-14-laion2B-39B-b160k', pad_token='!', local_files_only=local_files_only) except Exception: raise ValueError(f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'laion/CLIP-ViT-bigG-14-laion2B-39B-b160k' with `pad_token` set to '!'.") if text_encoder_2 is None: config_name = 'laion/CLIP-ViT-bigG-14-laion2B-39B-b160k' config_kwargs = {'projection_dim': 1280} prefix = 'conditioner.embedders.0.model.' if is_refiner else 'conditioner.embedders.1.model.' text_encoder_2 = convert_open_clip_checkpoint(checkpoint, config_name, prefix=prefix, has_projection=True, local_files_only=local_files_only, **config_kwargs) if is_accelerate_available(): for (param_name, param) in converted_unet_checkpoint.items(): set_module_tensor_to_device(unet, param_name, 'cpu', value=param) if controlnet: pipe = pipeline_class(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, unet=unet, controlnet=controlnet, scheduler=scheduler, force_zeros_for_empty_prompt=True) elif adapter: pipe = pipeline_class(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, unet=unet, adapter=adapter, scheduler=scheduler, force_zeros_for_empty_prompt=True) else: pipeline_kwargs = {'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_2, 'tokenizer_2': tokenizer_2, 'unet': unet, 'scheduler': scheduler} if pipeline_class == StableDiffusionXLImg2ImgPipeline or pipeline_class == StableDiffusionXLInpaintPipeline: pipeline_kwargs.update({'requires_aesthetics_score': is_refiner}) if is_refiner: pipeline_kwargs.update({'force_zeros_for_empty_prompt': False}) pipe = pipeline_class(**pipeline_kwargs) else: text_config = create_ldm_bert_config(original_config) text_model = convert_ldm_bert_checkpoint(checkpoint, text_config) tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased', local_files_only=local_files_only) pipe = LDMTextToImagePipeline(vqvae=vae, bert=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler) return pipe def download_controlnet_from_original_ckpt(checkpoint_path: str, original_config_file: str, image_size: int=512, extract_ema: bool=False, num_in_channels: Optional[int]=None, upcast_attention: Optional[bool]=None, device: str=None, from_safetensors: bool=False, use_linear_projection: Optional[bool]=None, cross_attention_dim: Optional[bool]=None) -> DiffusionPipeline: if from_safetensors: from safetensors import safe_open checkpoint = {} with safe_open(checkpoint_path, framework='pt', device='cpu') as f: for key in f.keys(): checkpoint[key] = f.get_tensor(key) elif device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' checkpoint = torch.load(checkpoint_path, map_location=device) else: checkpoint = torch.load(checkpoint_path, map_location=device) while 'state_dict' in checkpoint: checkpoint = checkpoint['state_dict'] with open(original_config_file, 'r') as f: original_config_file = f.read() original_config = yaml.safe_load(original_config_file) if num_in_channels is not None: original_config['model']['params']['unet_config']['params']['in_channels'] = num_in_channels if 'control_stage_config' not in original_config['model']['params']: raise ValueError('`control_stage_config` not present in original config') controlnet = convert_controlnet_checkpoint(checkpoint, original_config, checkpoint_path, image_size, upcast_attention, extract_ema, use_linear_projection=use_linear_projection, cross_attention_dim=cross_attention_dim) return controlnet # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py import warnings from functools import partial from typing import Dict, List, Optional, Union import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict from flax.jax_utils import unreplicate from flax.training.common_utils import shard from packaging import version from PIL import Image from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel from ...schedulers import FlaxDDIMScheduler, FlaxDPMSolverMultistepScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler from ...utils import deprecate, logging, replace_example_docstring from ..pipeline_flax_utils import FlaxDiffusionPipeline from .pipeline_output import FlaxStableDiffusionPipelineOutput from .safety_checker_flax import FlaxStableDiffusionSafetyChecker logger = logging.get_logger(__name__) DEBUG = False EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import jax\n >>> import numpy as np\n >>> from flax.jax_utils import replicate\n >>> from flax.training.common_utils import shard\n\n >>> from diffusers import FlaxStableDiffusionPipeline\n\n >>> pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(\n ... "runwayml/stable-diffusion-v1-5", variant="bf16", dtype=jax.numpy.bfloat16\n ... )\n\n >>> prompt = "a photo of an astronaut riding a horse on mars"\n\n >>> prng_seed = jax.random.PRNGKey(0)\n >>> num_inference_steps = 50\n\n >>> num_samples = jax.device_count()\n >>> prompt = num_samples * [prompt]\n >>> prompt_ids = pipeline.prepare_inputs(prompt)\n # shard inputs and rng\n\n >>> params = replicate(params)\n >>> prng_seed = jax.random.split(prng_seed, jax.device_count())\n >>> prompt_ids = shard(prompt_ids)\n\n >>> images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images\n >>> images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))\n ```\n' class FlaxStableDiffusionPipeline(FlaxDiffusionPipeline): def __init__(self, vae: FlaxAutoencoderKL, text_encoder: FlaxCLIPTextModel, tokenizer: CLIPTokenizer, unet: FlaxUNet2DConditionModel, scheduler: Union[FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler], safety_checker: FlaxStableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, dtype: jnp.dtype=jnp.float32): super().__init__() self.dtype = dtype if safety_checker is None: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def prepare_inputs(self, prompt: Union[str, List[str]]): if not isinstance(prompt, (str, list)): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='np') return text_input.input_ids def _get_has_nsfw_concepts(self, features, params): has_nsfw_concepts = self.safety_checker(features, params) return has_nsfw_concepts def _run_safety_checker(self, images, safety_model_params, jit=False): pil_images = [Image.fromarray(image) for image in images] features = self.feature_extractor(pil_images, return_tensors='np').pixel_values if jit: features = shard(features) has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) has_nsfw_concepts = unshard(has_nsfw_concepts) safety_model_params = unreplicate(safety_model_params) else: has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) images_was_copied = False for (idx, has_nsfw_concept) in enumerate(has_nsfw_concepts): if has_nsfw_concept: if not images_was_copied: images_was_copied = True images = images.copy() images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) if any(has_nsfw_concepts): warnings.warn('Potential NSFW content was detected in one or more images. A black image will be returned instead. Try again with a different prompt and/or seed.') return (images, has_nsfw_concepts) def _generate(self, prompt_ids: jnp.array, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int, height: int, width: int, guidance_scale: float, latents: Optional[jnp.ndarray]=None, neg_prompt_ids: Optional[jnp.ndarray]=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') prompt_embeds = self.text_encoder(prompt_ids, params=params['text_encoder'])[0] batch_size = prompt_ids.shape[0] max_length = prompt_ids.shape[-1] if neg_prompt_ids is None: uncond_input = self.tokenizer([''] * batch_size, padding='max_length', max_length=max_length, return_tensors='np').input_ids else: uncond_input = neg_prompt_ids negative_prompt_embeds = self.text_encoder(uncond_input, params=params['text_encoder'])[0] context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) guidance_scale = jnp.array([guidance_scale], dtype=jnp.float32) latents_shape = (batch_size, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor) if latents is None: latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) elif latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}') def loop_body(step, args): (latents, scheduler_state) = args latents_input = jnp.concatenate([latents] * 2) t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] timestep = jnp.broadcast_to(t, latents_input.shape[0]) latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) noise_pred = self.unet.apply({'params': params['unet']}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=context).sample (noise_pred_uncond, noise_prediction_text) = jnp.split(noise_pred, 2, axis=0) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) (latents, scheduler_state) = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() return (latents, scheduler_state) scheduler_state = self.scheduler.set_timesteps(params['scheduler'], num_inference_steps=num_inference_steps, shape=latents.shape) latents = latents * params['scheduler'].init_noise_sigma if DEBUG: for i in range(num_inference_steps): (latents, scheduler_state) = loop_body(i, (latents, scheduler_state)) else: (latents, _) = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.apply({'params': params['vae']}, latents, method=self.vae.decode).sample image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) return image @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt_ids: jnp.array, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int=50, height: Optional[int]=None, width: Optional[int]=None, guidance_scale: Union[float, jnp.ndarray]=7.5, latents: jnp.ndarray=None, neg_prompt_ids: jnp.ndarray=None, return_dict: bool=True, jit: bool=False): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor if isinstance(guidance_scale, float): guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) if len(prompt_ids.shape) > 2: guidance_scale = guidance_scale[:, None] if jit: images = _p_generate(self, prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids) else: images = self._generate(prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids) if self.safety_checker is not None: safety_params = params['safety_checker'] images_uint8_casted = (images * 255).round().astype('uint8') (num_devices, batch_size) = images.shape[:2] images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) (images_uint8_casted, has_nsfw_concept) = self._run_safety_checker(images_uint8_casted, safety_params, jit) images = np.asarray(images).copy() if any(has_nsfw_concept): for (i, is_nsfw) in enumerate(has_nsfw_concept): if is_nsfw: images[i, 0] = np.asarray(images_uint8_casted[i]) images = images.reshape(num_devices, batch_size, height, width, 3) else: images = np.asarray(images) has_nsfw_concept = False if not return_dict: return (images, has_nsfw_concept) return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) @partial(jax.pmap, in_axes=(None, 0, 0, 0, None, None, None, 0, 0, 0), static_broadcasted_argnums=(0, 4, 5, 6)) def _p_generate(pipe, prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids): return pipe._generate(prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids) @partial(jax.pmap, static_broadcasted_argnums=(0,)) def _p_get_has_nsfw_concepts(pipe, features, params): return pipe._get_has_nsfw_concepts(features, params) def unshard(x: jnp.ndarray): (num_devices, batch_size) = x.shape[:2] rest = x.shape[2:] return x.reshape(num_devices * batch_size, *rest) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py import warnings from functools import partial from typing import Dict, List, Optional, Union import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict from flax.jax_utils import unreplicate from flax.training.common_utils import shard from PIL import Image from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel from ...schedulers import FlaxDDIMScheduler, FlaxDPMSolverMultistepScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring from ..pipeline_flax_utils import FlaxDiffusionPipeline from .pipeline_output import FlaxStableDiffusionPipelineOutput from .safety_checker_flax import FlaxStableDiffusionSafetyChecker logger = logging.get_logger(__name__) DEBUG = False EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import jax\n >>> import numpy as np\n >>> import jax.numpy as jnp\n >>> from flax.jax_utils import replicate\n >>> from flax.training.common_utils import shard\n >>> import requests\n >>> from io import BytesIO\n >>> from PIL import Image\n >>> from diffusers import FlaxStableDiffusionImg2ImgPipeline\n\n\n >>> def create_key(seed=0):\n ... return jax.random.PRNGKey(seed)\n\n\n >>> rng = create_key(0)\n\n >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"\n >>> response = requests.get(url)\n >>> init_img = Image.open(BytesIO(response.content)).convert("RGB")\n >>> init_img = init_img.resize((768, 512))\n\n >>> prompts = "A fantasy landscape, trending on artstation"\n\n >>> pipeline, params = FlaxStableDiffusionImg2ImgPipeline.from_pretrained(\n ... "CompVis/stable-diffusion-v1-4",\n ... revision="flax",\n ... dtype=jnp.bfloat16,\n ... )\n\n >>> num_samples = jax.device_count()\n >>> rng = jax.random.split(rng, jax.device_count())\n >>> prompt_ids, processed_image = pipeline.prepare_inputs(\n ... prompt=[prompts] * num_samples, image=[init_img] * num_samples\n ... )\n >>> p_params = replicate(params)\n >>> prompt_ids = shard(prompt_ids)\n >>> processed_image = shard(processed_image)\n\n >>> output = pipeline(\n ... prompt_ids=prompt_ids,\n ... image=processed_image,\n ... params=p_params,\n ... prng_seed=rng,\n ... strength=0.75,\n ... num_inference_steps=50,\n ... jit=True,\n ... height=512,\n ... width=768,\n ... ).images\n\n >>> output_images = pipeline.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:])))\n ```\n' class FlaxStableDiffusionImg2ImgPipeline(FlaxDiffusionPipeline): def __init__(self, vae: FlaxAutoencoderKL, text_encoder: FlaxCLIPTextModel, tokenizer: CLIPTokenizer, unet: FlaxUNet2DConditionModel, scheduler: Union[FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler], safety_checker: FlaxStableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, dtype: jnp.dtype=jnp.float32): super().__init__() self.dtype = dtype if safety_checker is None: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def prepare_inputs(self, prompt: Union[str, List[str]], image: Union[Image.Image, List[Image.Image]]): if not isinstance(prompt, (str, list)): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if not isinstance(image, (Image.Image, list)): raise ValueError(f'image has to be of type `PIL.Image.Image` or list but is {type(image)}') if isinstance(image, Image.Image): image = [image] processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image]) text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='np') return (text_input.input_ids, processed_images) def _get_has_nsfw_concepts(self, features, params): has_nsfw_concepts = self.safety_checker(features, params) return has_nsfw_concepts def _run_safety_checker(self, images, safety_model_params, jit=False): pil_images = [Image.fromarray(image) for image in images] features = self.feature_extractor(pil_images, return_tensors='np').pixel_values if jit: features = shard(features) has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) has_nsfw_concepts = unshard(has_nsfw_concepts) safety_model_params = unreplicate(safety_model_params) else: has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) images_was_copied = False for (idx, has_nsfw_concept) in enumerate(has_nsfw_concepts): if has_nsfw_concept: if not images_was_copied: images_was_copied = True images = images.copy() images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) if any(has_nsfw_concepts): warnings.warn('Potential NSFW content was detected in one or more images. A black image will be returned instead. Try again with a different prompt and/or seed.') return (images, has_nsfw_concepts) def get_timestep_start(self, num_inference_steps, strength): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) return t_start def _generate(self, prompt_ids: jnp.ndarray, image: jnp.ndarray, params: Union[Dict, FrozenDict], prng_seed: jax.Array, start_timestep: int, num_inference_steps: int, height: int, width: int, guidance_scale: float, noise: Optional[jnp.ndarray]=None, neg_prompt_ids: Optional[jnp.ndarray]=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') prompt_embeds = self.text_encoder(prompt_ids, params=params['text_encoder'])[0] batch_size = prompt_ids.shape[0] max_length = prompt_ids.shape[-1] if neg_prompt_ids is None: uncond_input = self.tokenizer([''] * batch_size, padding='max_length', max_length=max_length, return_tensors='np').input_ids else: uncond_input = neg_prompt_ids negative_prompt_embeds = self.text_encoder(uncond_input, params=params['text_encoder'])[0] context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) latents_shape = (batch_size, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor) if noise is None: noise = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) elif noise.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {noise.shape}, expected {latents_shape}') init_latent_dist = self.vae.apply({'params': params['vae']}, image, method=self.vae.encode).latent_dist init_latents = init_latent_dist.sample(key=prng_seed).transpose((0, 3, 1, 2)) init_latents = self.vae.config.scaling_factor * init_latents def loop_body(step, args): (latents, scheduler_state) = args latents_input = jnp.concatenate([latents] * 2) t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] timestep = jnp.broadcast_to(t, latents_input.shape[0]) latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) noise_pred = self.unet.apply({'params': params['unet']}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=context).sample (noise_pred_uncond, noise_prediction_text) = jnp.split(noise_pred, 2, axis=0) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) (latents, scheduler_state) = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() return (latents, scheduler_state) scheduler_state = self.scheduler.set_timesteps(params['scheduler'], num_inference_steps=num_inference_steps, shape=latents_shape) latent_timestep = scheduler_state.timesteps[start_timestep:start_timestep + 1].repeat(batch_size) latents = self.scheduler.add_noise(params['scheduler'], init_latents, noise, latent_timestep) latents = latents * params['scheduler'].init_noise_sigma if DEBUG: for i in range(start_timestep, num_inference_steps): (latents, scheduler_state) = loop_body(i, (latents, scheduler_state)) else: (latents, _) = jax.lax.fori_loop(start_timestep, num_inference_steps, loop_body, (latents, scheduler_state)) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.apply({'params': params['vae']}, latents, method=self.vae.decode).sample image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) return image @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt_ids: jnp.ndarray, image: jnp.ndarray, params: Union[Dict, FrozenDict], prng_seed: jax.Array, strength: float=0.8, num_inference_steps: int=50, height: Optional[int]=None, width: Optional[int]=None, guidance_scale: Union[float, jnp.ndarray]=7.5, noise: jnp.ndarray=None, neg_prompt_ids: jnp.ndarray=None, return_dict: bool=True, jit: bool=False): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor if isinstance(guidance_scale, float): guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) if len(prompt_ids.shape) > 2: guidance_scale = guidance_scale[:, None] start_timestep = self.get_timestep_start(num_inference_steps, strength) if jit: images = _p_generate(self, prompt_ids, image, params, prng_seed, start_timestep, num_inference_steps, height, width, guidance_scale, noise, neg_prompt_ids) else: images = self._generate(prompt_ids, image, params, prng_seed, start_timestep, num_inference_steps, height, width, guidance_scale, noise, neg_prompt_ids) if self.safety_checker is not None: safety_params = params['safety_checker'] images_uint8_casted = (images * 255).round().astype('uint8') (num_devices, batch_size) = images.shape[:2] images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) (images_uint8_casted, has_nsfw_concept) = self._run_safety_checker(images_uint8_casted, safety_params, jit) images = np.asarray(images) if any(has_nsfw_concept): for (i, is_nsfw) in enumerate(has_nsfw_concept): if is_nsfw: images[i] = np.asarray(images_uint8_casted[i]) images = images.reshape(num_devices, batch_size, height, width, 3) else: images = np.asarray(images) has_nsfw_concept = False if not return_dict: return (images, has_nsfw_concept) return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) @partial(jax.pmap, in_axes=(None, 0, 0, 0, 0, None, None, None, None, 0, 0, 0), static_broadcasted_argnums=(0, 5, 6, 7, 8)) def _p_generate(pipe, prompt_ids, image, params, prng_seed, start_timestep, num_inference_steps, height, width, guidance_scale, noise, neg_prompt_ids): return pipe._generate(prompt_ids, image, params, prng_seed, start_timestep, num_inference_steps, height, width, guidance_scale, noise, neg_prompt_ids) @partial(jax.pmap, static_broadcasted_argnums=(0,)) def _p_get_has_nsfw_concepts(pipe, features, params): return pipe._get_has_nsfw_concepts(features, params) def unshard(x: jnp.ndarray): (num_devices, batch_size) = x.shape[:2] rest = x.shape[2:] return x.reshape(num_devices * batch_size, *rest) def preprocess(image, dtype): (w, h) = image.size (w, h) = (x - x % 32 for x in (w, h)) image = image.resize((w, h), resample=PIL_INTERPOLATION['lanczos']) image = jnp.array(image).astype(dtype) / 255.0 image = image[None].transpose(0, 3, 1, 2) return 2.0 * image - 1.0 # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py import warnings from functools import partial from typing import Dict, List, Optional, Union import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict from flax.jax_utils import unreplicate from flax.training.common_utils import shard from packaging import version from PIL import Image from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel from ...schedulers import FlaxDDIMScheduler, FlaxDPMSolverMultistepScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler from ...utils import PIL_INTERPOLATION, deprecate, logging, replace_example_docstring from ..pipeline_flax_utils import FlaxDiffusionPipeline from .pipeline_output import FlaxStableDiffusionPipelineOutput from .safety_checker_flax import FlaxStableDiffusionSafetyChecker logger = logging.get_logger(__name__) DEBUG = False EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import jax\n >>> import numpy as np\n >>> from flax.jax_utils import replicate\n >>> from flax.training.common_utils import shard\n >>> import PIL\n >>> import requests\n >>> from io import BytesIO\n >>> from diffusers import FlaxStableDiffusionInpaintPipeline\n\n\n >>> def download_image(url):\n ... response = requests.get(url)\n ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")\n\n\n >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"\n >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"\n\n >>> init_image = download_image(img_url).resize((512, 512))\n >>> mask_image = download_image(mask_url).resize((512, 512))\n\n >>> pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained(\n ... "xvjiarui/stable-diffusion-2-inpainting"\n ... )\n\n >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench"\n >>> prng_seed = jax.random.PRNGKey(0)\n >>> num_inference_steps = 50\n\n >>> num_samples = jax.device_count()\n >>> prompt = num_samples * [prompt]\n >>> init_image = num_samples * [init_image]\n >>> mask_image = num_samples * [mask_image]\n >>> prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(\n ... prompt, init_image, mask_image\n ... )\n # shard inputs and rng\n\n >>> params = replicate(params)\n >>> prng_seed = jax.random.split(prng_seed, jax.device_count())\n >>> prompt_ids = shard(prompt_ids)\n >>> processed_masked_images = shard(processed_masked_images)\n >>> processed_masks = shard(processed_masks)\n\n >>> images = pipeline(\n ... prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True\n ... ).images\n >>> images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))\n ```\n' class FlaxStableDiffusionInpaintPipeline(FlaxDiffusionPipeline): def __init__(self, vae: FlaxAutoencoderKL, text_encoder: FlaxCLIPTextModel, tokenizer: CLIPTokenizer, unet: FlaxUNet2DConditionModel, scheduler: Union[FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler], safety_checker: FlaxStableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, dtype: jnp.dtype=jnp.float32): super().__init__() self.dtype = dtype if safety_checker is None: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def prepare_inputs(self, prompt: Union[str, List[str]], image: Union[Image.Image, List[Image.Image]], mask: Union[Image.Image, List[Image.Image]]): if not isinstance(prompt, (str, list)): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if not isinstance(image, (Image.Image, list)): raise ValueError(f'image has to be of type `PIL.Image.Image` or list but is {type(image)}') if isinstance(image, Image.Image): image = [image] if not isinstance(mask, (Image.Image, list)): raise ValueError(f'image has to be of type `PIL.Image.Image` or list but is {type(image)}') if isinstance(mask, Image.Image): mask = [mask] processed_images = jnp.concatenate([preprocess_image(img, jnp.float32) for img in image]) processed_masks = jnp.concatenate([preprocess_mask(m, jnp.float32) for m in mask]) processed_masks = processed_masks.at[processed_masks < 0.5].set(0) processed_masks = processed_masks.at[processed_masks >= 0.5].set(1) processed_masked_images = processed_images * (processed_masks < 0.5) text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='np') return (text_input.input_ids, processed_masked_images, processed_masks) def _get_has_nsfw_concepts(self, features, params): has_nsfw_concepts = self.safety_checker(features, params) return has_nsfw_concepts def _run_safety_checker(self, images, safety_model_params, jit=False): pil_images = [Image.fromarray(image) for image in images] features = self.feature_extractor(pil_images, return_tensors='np').pixel_values if jit: features = shard(features) has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) has_nsfw_concepts = unshard(has_nsfw_concepts) safety_model_params = unreplicate(safety_model_params) else: has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) images_was_copied = False for (idx, has_nsfw_concept) in enumerate(has_nsfw_concepts): if has_nsfw_concept: if not images_was_copied: images_was_copied = True images = images.copy() images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) if any(has_nsfw_concepts): warnings.warn('Potential NSFW content was detected in one or more images. A black image will be returned instead. Try again with a different prompt and/or seed.') return (images, has_nsfw_concepts) def _generate(self, prompt_ids: jnp.ndarray, mask: jnp.ndarray, masked_image: jnp.ndarray, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int, height: int, width: int, guidance_scale: float, latents: Optional[jnp.ndarray]=None, neg_prompt_ids: Optional[jnp.ndarray]=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') prompt_embeds = self.text_encoder(prompt_ids, params=params['text_encoder'])[0] batch_size = prompt_ids.shape[0] max_length = prompt_ids.shape[-1] if neg_prompt_ids is None: uncond_input = self.tokenizer([''] * batch_size, padding='max_length', max_length=max_length, return_tensors='np').input_ids else: uncond_input = neg_prompt_ids negative_prompt_embeds = self.text_encoder(uncond_input, params=params['text_encoder'])[0] context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) latents_shape = (batch_size, self.vae.config.latent_channels, height // self.vae_scale_factor, width // self.vae_scale_factor) if latents is None: latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=self.dtype) elif latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}') (prng_seed, mask_prng_seed) = jax.random.split(prng_seed) masked_image_latent_dist = self.vae.apply({'params': params['vae']}, masked_image, method=self.vae.encode).latent_dist masked_image_latents = masked_image_latent_dist.sample(key=mask_prng_seed).transpose((0, 3, 1, 2)) masked_image_latents = self.vae.config.scaling_factor * masked_image_latents del mask_prng_seed mask = jax.image.resize(mask, (*mask.shape[:-2], *masked_image_latents.shape[-2:]), method='nearest') num_channels_latents = self.vae.config.latent_channels num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image} = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of `pipeline.unet` or your `mask_image` or `image` input.') def loop_body(step, args): (latents, mask, masked_image_latents, scheduler_state) = args latents_input = jnp.concatenate([latents] * 2) mask_input = jnp.concatenate([mask] * 2) masked_image_latents_input = jnp.concatenate([masked_image_latents] * 2) t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] timestep = jnp.broadcast_to(t, latents_input.shape[0]) latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) latents_input = jnp.concatenate([latents_input, mask_input, masked_image_latents_input], axis=1) noise_pred = self.unet.apply({'params': params['unet']}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=context).sample (noise_pred_uncond, noise_prediction_text) = jnp.split(noise_pred, 2, axis=0) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) (latents, scheduler_state) = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() return (latents, mask, masked_image_latents, scheduler_state) scheduler_state = self.scheduler.set_timesteps(params['scheduler'], num_inference_steps=num_inference_steps, shape=latents.shape) latents = latents * params['scheduler'].init_noise_sigma if DEBUG: for i in range(num_inference_steps): (latents, mask, masked_image_latents, scheduler_state) = loop_body(i, (latents, mask, masked_image_latents, scheduler_state)) else: (latents, _, _, _) = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, mask, masked_image_latents, scheduler_state)) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.apply({'params': params['vae']}, latents, method=self.vae.decode).sample image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) return image @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt_ids: jnp.ndarray, mask: jnp.ndarray, masked_image: jnp.ndarray, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int=50, height: Optional[int]=None, width: Optional[int]=None, guidance_scale: Union[float, jnp.ndarray]=7.5, latents: jnp.ndarray=None, neg_prompt_ids: jnp.ndarray=None, return_dict: bool=True, jit: bool=False): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor masked_image = jax.image.resize(masked_image, (*masked_image.shape[:-2], height, width), method='bicubic') mask = jax.image.resize(mask, (*mask.shape[:-2], height, width), method='nearest') if isinstance(guidance_scale, float): guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) if len(prompt_ids.shape) > 2: guidance_scale = guidance_scale[:, None] if jit: images = _p_generate(self, prompt_ids, mask, masked_image, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids) else: images = self._generate(prompt_ids, mask, masked_image, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids) if self.safety_checker is not None: safety_params = params['safety_checker'] images_uint8_casted = (images * 255).round().astype('uint8') (num_devices, batch_size) = images.shape[:2] images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) (images_uint8_casted, has_nsfw_concept) = self._run_safety_checker(images_uint8_casted, safety_params, jit) images = np.asarray(images) if any(has_nsfw_concept): for (i, is_nsfw) in enumerate(has_nsfw_concept): if is_nsfw: images[i] = np.asarray(images_uint8_casted[i]) images = images.reshape(num_devices, batch_size, height, width, 3) else: images = np.asarray(images) has_nsfw_concept = False if not return_dict: return (images, has_nsfw_concept) return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) @partial(jax.pmap, in_axes=(None, 0, 0, 0, 0, 0, None, None, None, 0, 0, 0), static_broadcasted_argnums=(0, 6, 7, 8)) def _p_generate(pipe, prompt_ids, mask, masked_image, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids): return pipe._generate(prompt_ids, mask, masked_image, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids) @partial(jax.pmap, static_broadcasted_argnums=(0,)) def _p_get_has_nsfw_concepts(pipe, features, params): return pipe._get_has_nsfw_concepts(features, params) def unshard(x: jnp.ndarray): (num_devices, batch_size) = x.shape[:2] rest = x.shape[2:] return x.reshape(num_devices * batch_size, *rest) def preprocess_image(image, dtype): (w, h) = image.size (w, h) = (x - x % 32 for x in (w, h)) image = image.resize((w, h), resample=PIL_INTERPOLATION['lanczos']) image = jnp.array(image).astype(dtype) / 255.0 image = image[None].transpose(0, 3, 1, 2) return 2.0 * image - 1.0 def preprocess_mask(mask, dtype): (w, h) = mask.size (w, h) = (x - x % 32 for x in (w, h)) mask = mask.resize((w, h)) mask = jnp.array(mask.convert('L')).astype(dtype) / 255.0 mask = jnp.expand_dims(mask, axis=(0, 1)) return mask # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py import inspect from typing import Callable, List, Optional, Union import numpy as np import torch from transformers import CLIPImageProcessor, CLIPTokenizer from ...configuration_utils import FrozenDict from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils import deprecate, logging from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel from ..pipeline_utils import DiffusionPipeline from . import StableDiffusionPipelineOutput logger = logging.get_logger(__name__) class OnnxStableDiffusionPipeline(DiffusionPipeline): vae_encoder: OnnxRuntimeModel vae_decoder: OnnxRuntimeModel text_encoder: OnnxRuntimeModel tokenizer: CLIPTokenizer unet: OnnxRuntimeModel scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] safety_checker: OnnxRuntimeModel feature_extractor: CLIPImageProcessor _optional_components = ['safety_checker', 'feature_extractor'] _is_onnx = True def __init__(self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: OnnxRuntimeModel, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'clip_sample') and scheduler.config.clip_sample is True: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('clip_sample not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['clip_sample'] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt: Union[str, List[str]], num_images_per_prompt: Optional[int], do_classifier_free_guidance: bool, negative_prompt: Optional[str], prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='np') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='max_length', return_tensors='np').input_ids if not np.array_equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] * batch_size elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='np') negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] if do_classifier_free_guidance: negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def check_inputs(self, prompt: Union[str, List[str]], height: Optional[int], width: Optional[int], callback_steps: int, negative_prompt: Optional[str]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=512, width: Optional[int]=512, num_inference_steps: Optional[int]=50, guidance_scale: Optional[float]=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: Optional[float]=0.0, generator: Optional[np.random.RandomState]=None, latents: Optional[np.ndarray]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, np.ndarray], None]]=None, callback_steps: int=1): self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if generator is None: generator = np.random do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) latents_dtype = prompt_embeds.dtype latents_shape = (batch_size * num_images_per_prompt, 4, height // 8, width // 8) if latents is None: latents = generator.randn(*latents_shape).astype(latents_dtype) elif latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}') self.scheduler.set_timesteps(num_inference_steps) latents = latents * np.float64(self.scheduler.init_noise_sigma) accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta timestep_dtype = next((input.type for input in self.unet.model.get_inputs() if input.name == 'timestep'), 'tensor(float)') timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] for (i, t) in enumerate(self.progress_bar(self.scheduler.timesteps)): latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) latent_model_input = latent_model_input.cpu().numpy() timestep = np.array([t], dtype=timestep_dtype) noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds) noise_pred = noise_pred[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) scheduler_output = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs) latents = scheduler_output.prev_sample.numpy() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) latents = 1 / 0.18215 * latents image = np.concatenate([self.vae_decoder(latent_sample=latents[i:i + 1])[0] for i in range(latents.shape[0])]) image = np.clip(image / 2 + 0.5, 0, 1) image = image.transpose((0, 2, 3, 1)) if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='np').pixel_values.astype(image.dtype) (images, has_nsfw_concept) = ([], []) for i in range(image.shape[0]): (image_i, has_nsfw_concept_i) = self.safety_checker(clip_input=safety_checker_input[i:i + 1], images=image[i:i + 1]) images.append(image_i) has_nsfw_concept.append(has_nsfw_concept_i[0]) image = np.concatenate(images) else: has_nsfw_concept = None if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) class StableDiffusionOnnxPipeline(OnnxStableDiffusionPipeline): def __init__(self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: OnnxRuntimeModel, feature_extractor: CLIPImageProcessor): deprecation_message = 'Please use `OnnxStableDiffusionPipeline` instead of `StableDiffusionOnnxPipeline`.' deprecate('StableDiffusionOnnxPipeline', '1.0.0', deprecation_message) super().__init__(vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py import inspect from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTokenizer from ...configuration_utils import FrozenDict from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils import PIL_INTERPOLATION, deprecate, logging from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel from ..pipeline_utils import DiffusionPipeline from . import StableDiffusionPipelineOutput logger = logging.get_logger(__name__) def preprocess(image): deprecation_message = 'The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead' deprecate('preprocess', '1.0.0', deprecation_message, standard_warn=False) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): (w, h) = image[0].size (w, h) = (x - x % 64 for x in (w, h)) image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos']))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image class OnnxStableDiffusionImg2ImgPipeline(DiffusionPipeline): vae_encoder: OnnxRuntimeModel vae_decoder: OnnxRuntimeModel text_encoder: OnnxRuntimeModel tokenizer: CLIPTokenizer unet: OnnxRuntimeModel scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] safety_checker: OnnxRuntimeModel feature_extractor: CLIPImageProcessor _optional_components = ['safety_checker', 'feature_extractor'] _is_onnx = True def __init__(self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: OnnxRuntimeModel, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'clip_sample') and scheduler.config.clip_sample is True: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('clip_sample not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['clip_sample'] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt: Union[str, List[str]], num_images_per_prompt: Optional[int], do_classifier_free_guidance: bool, negative_prompt: Optional[str], prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='np') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='max_length', return_tensors='np').input_ids if not np.array_equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] * batch_size elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='np') negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] if do_classifier_free_guidance: negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def check_inputs(self, prompt: Union[str, List[str]], callback_steps: int, negative_prompt: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None): if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def __call__(self, prompt: Union[str, List[str]], image: Union[np.ndarray, PIL.Image.Image]=None, strength: float=0.8, num_inference_steps: Optional[int]=50, guidance_scale: Optional[float]=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: Optional[float]=0.0, generator: Optional[np.random.RandomState]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, np.ndarray], None]]=None, callback_steps: int=1): self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if generator is None: generator = np.random self.scheduler.set_timesteps(num_inference_steps) image = preprocess(image).cpu().numpy() do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) latents_dtype = prompt_embeds.dtype image = image.astype(latents_dtype) init_latents = self.vae_encoder(sample=image)[0] init_latents = 0.18215 * init_latents if isinstance(prompt, str): prompt = [prompt] if len(prompt) > init_latents.shape[0] and len(prompt) % init_latents.shape[0] == 0: deprecation_message = f'You have passed {len(prompt)} text prompts (`prompt`), but only {init_latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_image_per_prompt = len(prompt) // init_latents.shape[0] init_latents = np.concatenate([init_latents] * additional_image_per_prompt * num_images_per_prompt, axis=0) elif len(prompt) > init_latents.shape[0] and len(prompt) % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {len(prompt)} text prompts.') else: init_latents = np.concatenate([init_latents] * num_images_per_prompt, axis=0) offset = self.scheduler.config.get('steps_offset', 0) init_timestep = int(num_inference_steps * strength) + offset init_timestep = min(init_timestep, num_inference_steps) timesteps = self.scheduler.timesteps.numpy()[-init_timestep] timesteps = np.array([timesteps] * batch_size * num_images_per_prompt) noise = generator.randn(*init_latents.shape).astype(latents_dtype) init_latents = self.scheduler.add_noise(torch.from_numpy(init_latents), torch.from_numpy(noise), torch.from_numpy(timesteps)) init_latents = init_latents.numpy() accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta latents = init_latents t_start = max(num_inference_steps - init_timestep + offset, 0) timesteps = self.scheduler.timesteps[t_start:].numpy() timestep_dtype = next((input.type for input in self.unet.model.get_inputs() if input.name == 'timestep'), 'tensor(float)') timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) latent_model_input = latent_model_input.cpu().numpy() timestep = np.array([t], dtype=timestep_dtype) noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) scheduler_output = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs) latents = scheduler_output.prev_sample.numpy() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) latents = 1 / 0.18215 * latents image = np.concatenate([self.vae_decoder(latent_sample=latents[i:i + 1])[0] for i in range(latents.shape[0])]) image = np.clip(image / 2 + 0.5, 0, 1) image = image.transpose((0, 2, 3, 1)) if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='np').pixel_values.astype(image.dtype) (images, has_nsfw_concept) = ([], []) for i in range(image.shape[0]): (image_i, has_nsfw_concept_i) = self.safety_checker(clip_input=safety_checker_input[i:i + 1], images=image[i:i + 1]) images.append(image_i) has_nsfw_concept.append(has_nsfw_concept_i[0]) image = np.concatenate(images) else: has_nsfw_concept = None if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py import inspect from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTokenizer from ...configuration_utils import FrozenDict from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils import PIL_INTERPOLATION, deprecate, logging from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel from ..pipeline_utils import DiffusionPipeline from . import StableDiffusionPipelineOutput logger = logging.get_logger(__name__) NUM_UNET_INPUT_CHANNELS = 9 NUM_LATENT_CHANNELS = 4 def prepare_mask_and_masked_image(image, mask, latents_shape): image = np.array(image.convert('RGB').resize((latents_shape[1] * 8, latents_shape[0] * 8))) image = image[None].transpose(0, 3, 1, 2) image = image.astype(np.float32) / 127.5 - 1.0 image_mask = np.array(mask.convert('L').resize((latents_shape[1] * 8, latents_shape[0] * 8))) masked_image = image * (image_mask < 127.5) mask = mask.resize((latents_shape[1], latents_shape[0]), PIL_INTERPOLATION['nearest']) mask = np.array(mask.convert('L')) mask = mask.astype(np.float32) / 255.0 mask = mask[None, None] mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 return (mask, masked_image) class OnnxStableDiffusionInpaintPipeline(DiffusionPipeline): vae_encoder: OnnxRuntimeModel vae_decoder: OnnxRuntimeModel text_encoder: OnnxRuntimeModel tokenizer: CLIPTokenizer unet: OnnxRuntimeModel scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] safety_checker: OnnxRuntimeModel feature_extractor: CLIPImageProcessor _optional_components = ['safety_checker', 'feature_extractor'] _is_onnx = True def __init__(self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: OnnxRuntimeModel, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() logger.info('`OnnxStableDiffusionInpaintPipeline` is experimental and will very likely change in the future.') if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'clip_sample') and scheduler.config.clip_sample is True: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('clip_sample not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['clip_sample'] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt: Union[str, List[str]], num_images_per_prompt: Optional[int], do_classifier_free_guidance: bool, negative_prompt: Optional[str], prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='np') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='max_length', return_tensors='np').input_ids if not np.array_equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] * batch_size elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='np') negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] if do_classifier_free_guidance: negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def check_inputs(self, prompt: Union[str, List[str]], height: Optional[int], width: Optional[int], callback_steps: int, negative_prompt: Optional[str]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') @torch.no_grad() def __call__(self, prompt: Union[str, List[str]], image: PIL.Image.Image, mask_image: PIL.Image.Image, height: Optional[int]=512, width: Optional[int]=512, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[np.random.RandomState]=None, latents: Optional[np.ndarray]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, np.ndarray], None]]=None, callback_steps: int=1): self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if generator is None: generator = np.random self.scheduler.set_timesteps(num_inference_steps) do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) num_channels_latents = NUM_LATENT_CHANNELS latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // 8, width // 8) latents_dtype = prompt_embeds.dtype if latents is None: latents = generator.randn(*latents_shape).astype(latents_dtype) elif latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}') (mask, masked_image) = prepare_mask_and_masked_image(image, mask_image, latents_shape[-2:]) mask = mask.astype(latents.dtype) masked_image = masked_image.astype(latents.dtype) masked_image_latents = self.vae_encoder(sample=masked_image)[0] masked_image_latents = 0.18215 * masked_image_latents mask = mask.repeat(batch_size * num_images_per_prompt, 0) masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 0) mask = np.concatenate([mask] * 2) if do_classifier_free_guidance else mask masked_image_latents = np.concatenate([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] unet_input_channels = NUM_UNET_INPUT_CHANNELS if num_channels_latents + num_channels_mask + num_channels_masked_image != unet_input_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet` expects {unet_input_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image} = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of `pipeline.unet` or your `mask_image` or `image` input.') self.scheduler.set_timesteps(num_inference_steps) latents = latents * np.float64(self.scheduler.init_noise_sigma) accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta timestep_dtype = next((input.type for input in self.unet.model.get_inputs() if input.name == 'timestep'), 'tensor(float)') timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] for (i, t) in enumerate(self.progress_bar(self.scheduler.timesteps)): latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) latent_model_input = latent_model_input.cpu().numpy() latent_model_input = np.concatenate([latent_model_input, mask, masked_image_latents], axis=1) timestep = np.array([t], dtype=timestep_dtype) noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) scheduler_output = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs) latents = scheduler_output.prev_sample.numpy() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) latents = 1 / 0.18215 * latents image = np.concatenate([self.vae_decoder(latent_sample=latents[i:i + 1])[0] for i in range(latents.shape[0])]) image = np.clip(image / 2 + 0.5, 0, 1) image = image.transpose((0, 2, 3, 1)) if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='np').pixel_values.astype(image.dtype) (images, has_nsfw_concept) = ([], []) for i in range(image.shape[0]): (image_i, has_nsfw_concept_i) = self.safety_checker(clip_input=safety_checker_input[i:i + 1], images=image[i:i + 1]) images.append(image_i) has_nsfw_concept.append(has_nsfw_concept_i[0]) image = np.concatenate(images) else: has_nsfw_concept = None if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py import inspect from typing import Any, Callable, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTokenizer from ...configuration_utils import FrozenDict from ...schedulers import DDPMScheduler, KarrasDiffusionSchedulers from ...utils import deprecate, logging from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel from ..pipeline_utils import DiffusionPipeline from . import StableDiffusionPipelineOutput logger = logging.get_logger(__name__) def preprocess(image): if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): (w, h) = image[0].size (w, h) = (x - x % 64 for x in (w, h)) image = [np.array(i.resize((w, h)))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image class OnnxStableDiffusionUpscalePipeline(DiffusionPipeline): vae: OnnxRuntimeModel text_encoder: OnnxRuntimeModel tokenizer: CLIPTokenizer unet: OnnxRuntimeModel low_res_scheduler: DDPMScheduler scheduler: KarrasDiffusionSchedulers safety_checker: OnnxRuntimeModel feature_extractor: CLIPImageProcessor _optional_components = ['safety_checker', 'feature_extractor'] _is_onnx = True def __init__(self, vae: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: Any, unet: OnnxRuntimeModel, low_res_scheduler: DDPMScheduler, scheduler: KarrasDiffusionSchedulers, safety_checker: Optional[OnnxRuntimeModel]=None, feature_extractor: Optional[CLIPImageProcessor]=None, max_noise_level: int=350, num_latent_channels=4, num_unet_input_channels=7, requires_safety_checker: bool=True): super().__init__() if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'clip_sample') and scheduler.config.clip_sample is True: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('clip_sample not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['clip_sample'] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, low_res_scheduler=low_res_scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.register_to_config(max_noise_level=max_noise_level, num_latent_channels=num_latent_channels, num_unet_input_channels=num_unet_input_channels) def check_inputs(self, prompt: Union[str, List[str]], image, noise_level, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if not isinstance(image, torch.Tensor) and (not isinstance(image, PIL.Image.Image)) and (not isinstance(image, np.ndarray)) and (not isinstance(image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}') if isinstance(image, (list, np.ndarray)): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if isinstance(image, list): image_batch_size = len(image) else: image_batch_size = image.shape[0] if batch_size != image_batch_size: raise ValueError(f'`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}. Please make sure that passed `prompt` matches the batch size of `image`.') if noise_level > self.config.max_noise_level: raise ValueError(f'`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None): shape = (batch_size, num_channels_latents, height, width) if latents is None: latents = generator.randn(*shape).astype(dtype) elif latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') return latents def decode_latents(self, latents): latents = 1 / 0.08333 * latents image = self.vae(latent_sample=latents)[0] image = np.clip(image / 2 + 0.5, 0, 1) image = image.transpose((0, 2, 3, 1)) return image def _encode_prompt(self, prompt: Union[str, List[str]], num_images_per_prompt: Optional[int], do_classifier_free_guidance: bool, negative_prompt: Optional[str], prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='np') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='max_length', return_tensors='np').input_ids if not np.array_equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] * batch_size elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='np') negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] if do_classifier_free_guidance: negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def __call__(self, prompt: Union[str, List[str]], image: Union[np.ndarray, PIL.Image.Image, List[PIL.Image.Image]], num_inference_steps: int=75, guidance_scale: float=9.0, noise_level: int=20, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[np.random.RandomState, List[np.random.RandomState]]]=None, latents: Optional[np.ndarray]=None, prompt_embeds: Optional[np.ndarray]=None, negative_prompt_embeds: Optional[np.ndarray]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, np.ndarray], None]]=None, callback_steps: Optional[int]=1): self.check_inputs(prompt, image, noise_level, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if generator is None: generator = np.random do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) latents_dtype = prompt_embeds.dtype image = preprocess(image).cpu().numpy() (height, width) = image.shape[2:] latents = self.prepare_latents(batch_size * num_images_per_prompt, self.config.num_latent_channels, height, width, latents_dtype, generator) image = image.astype(latents_dtype) self.scheduler.set_timesteps(num_inference_steps) timesteps = self.scheduler.timesteps latents = latents * np.float64(self.scheduler.init_noise_sigma) noise_level = np.array([noise_level]).astype(np.int64) noise = generator.randn(*image.shape).astype(latents_dtype) image = self.low_res_scheduler.add_noise(torch.from_numpy(image), torch.from_numpy(noise), torch.from_numpy(noise_level)) image = image.numpy() batch_multiplier = 2 if do_classifier_free_guidance else 1 image = np.concatenate([image] * batch_multiplier * num_images_per_prompt) noise_level = np.concatenate([noise_level] * image.shape[0]) num_channels_image = image.shape[1] if self.config.num_latent_channels + num_channels_image != self.config.num_unet_input_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet` expects {self.config.num_unet_input_channels} but received `num_channels_latents`: {self.config.num_latent_channels} + `num_channels_image`: {num_channels_image} = {self.config.num_latent_channels + num_channels_image}. Please verify the config of `pipeline.unet` or your `image` input.') accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta timestep_dtype = next((input.type for input in self.unet.model.get_inputs() if input.name == 'timestep'), 'tensor(float)') timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_model_input = np.concatenate([latent_model_input, image], axis=1) timestep = np.array([t], dtype=timestep_dtype) noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, class_labels=noise_level)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs).prev_sample latents = latents.numpy() if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) image = self.decode_latents(latents) if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='np').pixel_values.astype(image.dtype) (images, has_nsfw_concept) = ([], []) for i in range(image.shape[0]): (image_i, has_nsfw_concept_i) = self.safety_checker(clip_input=safety_checker_input[i:i + 1], images=image[i:i + 1]) images.append(image_i) has_nsfw_concept.append(has_nsfw_concept_i[0]) image = np.concatenate(images) else: has_nsfw_concept = None if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_output.py from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image from ...utils import BaseOutput, is_flax_available @dataclass class StableDiffusionPipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] if is_flax_available(): import flax @flax.struct.dataclass class FlaxStableDiffusionPipelineOutput(BaseOutput): images: np.ndarray nsfw_content_detected: List[bool] # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...configuration_utils import FrozenDict from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import StableDiffusionPipelineOutput from .safety_checker import StableDiffusionSafetyChecker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionPipeline\n\n >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "a photo of an astronaut riding a horse on mars"\n >>> image = pipe(prompt).images[0]\n ```\n' def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection=None, requires_safety_checker: bool=True): super().__init__() if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'clip_sample') and scheduler.config.clip_sample is True: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('clip_sample not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['clip_sample'] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py import contextlib import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPTextModel, CLIPTokenizer, DPTForDepthEstimation, DPTImageProcessor from ...configuration_utils import FrozenDict from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def preprocess(image): deprecation_message = 'The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead' deprecate('preprocess', '1.0.0', deprecation_message, standard_warn=False) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): (w, h) = image[0].size (w, h) = (x - x % 8 for x in (w, h)) image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos']))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image class StableDiffusionDepth2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'depth_mask'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, depth_estimator: DPTForDepthEstimation, feature_extractor: DPTImageProcessor): super().__init__() is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, depth_estimator=depth_estimator, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} ') init_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: deprecation_message = f'You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def prepare_depth_map(self, image, depth_map, batch_size, do_classifier_free_guidance, dtype, device): if isinstance(image, PIL.Image.Image): image = [image] else: image = list(image) if isinstance(image[0], PIL.Image.Image): (width, height) = image[0].size elif isinstance(image[0], np.ndarray): (width, height) = image[0].shape[:-1] else: (height, width) = image[0].shape[-2:] if depth_map is None: pixel_values = self.feature_extractor(images=image, return_tensors='pt').pixel_values pixel_values = pixel_values.to(device=device, dtype=dtype) if torch.backends.mps.is_available(): autocast_ctx = contextlib.nullcontext() logger.warning('The DPT-Hybrid model uses batch-norm layers which are not compatible with fp16, but autocast is not yet supported on MPS.') else: autocast_ctx = torch.autocast(device.type, dtype=dtype) with autocast_ctx: depth_map = self.depth_estimator(pixel_values).predicted_depth else: depth_map = depth_map.to(device=device, dtype=dtype) depth_map = torch.nn.functional.interpolate(depth_map.unsqueeze(1), size=(height // self.vae_scale_factor, width // self.vae_scale_factor), mode='bicubic', align_corners=False) depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) depth_map = 2.0 * (depth_map - depth_min) / (depth_max - depth_min) - 1.0 depth_map = depth_map.to(dtype) if depth_map.shape[0] < batch_size: repeat_by = batch_size // depth_map.shape[0] depth_map = depth_map.repeat(repeat_by, 1, 1, 1) depth_map = torch.cat([depth_map] * 2) if do_classifier_free_guidance else depth_map return depth_map @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, depth_map: Optional[torch.Tensor]=None, strength: float=0.8, num_inference_steps: Optional[int]=50, guidance_scale: Optional[float]=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: Optional[float]=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') self.check_inputs(prompt, strength, callback_steps, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs if image is None: raise ValueError('`image` input cannot be undefined.') if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) depth_mask = self.prepare_depth_map(image, depth_map, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, prompt_embeds.dtype, device) image = self.image_processor.preprocess(image) self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) latents = self.prepare_latents(image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_model_input = torch.cat([latent_model_input, depth_mask], dim=1) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) depth_mask = callback_outputs.pop('depth_mask', depth_mask) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py import inspect from typing import Callable, List, Optional, Union import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from ...configuration_utils import FrozenDict from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import deprecate, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from . import StableDiffusionPipelineOutput from .safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) class StableDiffusionImageVariationPipeline(DiffusionPipeline, StableDiffusionMixin): _optional_components = ['safety_checker'] model_cpu_offload_seq = 'image_encoder->unet->vae' _exclude_from_cpu_offload = ['safety_checker'] def __init__(self, vae: AutoencoderKL, image_encoder: CLIPVisionModelWithProjection, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, image_encoder=image_encoder, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(images=image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) image_embeddings = self.image_encoder(image).image_embeds image_embeddings = image_embeddings.unsqueeze(1) (bs_embed, seq_len, _) = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(image_embeddings) image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) return image_embeddings def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, image, height, width, callback_steps): if not isinstance(image, torch.Tensor) and (not isinstance(image, PIL.Image.Image)) and (not isinstance(image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is {type(image)}') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__(self, image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor], height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(image, height, width, callback_steps) if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, list): batch_size = len(image) else: batch_size = image.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 image_embeddings = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, image_embeddings.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) self.maybe_free_model_hooks() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, image_embeddings.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...configuration_utils import FrozenDict from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from . import StableDiffusionPipelineOutput from .safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import requests\n >>> import torch\n >>> from PIL import Image\n >>> from io import BytesIO\n\n >>> from diffusers import StableDiffusionImg2ImgPipeline\n\n >>> device = "cuda"\n >>> model_id_or_path = "runwayml/stable-diffusion-v1-5"\n >>> pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"\n\n >>> response = requests.get(url)\n >>> init_image = Image.open(BytesIO(response.content)).convert("RGB")\n >>> init_image = init_image.resize((768, 512))\n\n >>> prompt = "A fantasy landscape, trending on artstation"\n\n >>> images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images\n >>> images[0].save("fantasy_landscape.png")\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def preprocess(image): deprecation_message = 'The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead' deprecate('preprocess', '1.0.0', deprecation_message, standard_warn=False) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): (w, h) = image[0].size (w, h) = (x - x % 8 for x in (w, h)) image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos']))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection=None, requires_safety_checker: bool=True): super().__init__() if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'clip_sample') and scheduler.config.clip_sample is True: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('clip_sample not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['clip_sample'] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} ') init_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: deprecation_message = f'You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, strength: float=0.8, num_inference_steps: Optional[int]=50, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: Optional[float]=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: Optional[float]=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: int=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) image = self.image_processor.preprocess(image) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) latents = self.prepare_latents(image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...configuration_utils import FrozenDict from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AsymmetricAutoencoderKL, AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from . import StableDiffusionPipelineOutput from .safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionInpaintPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'mask', 'masked_image_latents'] def __init__(self, vae: Union[AutoencoderKL, AsymmetricAutoencoderKL], text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection=None, requires_safety_checker: bool=True): super().__init__() if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'skip_prk_steps') and scheduler.config.skip_prk_steps is False: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('skip_prk_steps not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['skip_prk_steps'] = True scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) if unet.config.in_channels != 9: logger.info(f'You have loaded a UNet with {unet.config.in_channels} input channels which.') self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, image, mask_image, height, width, strength, callback_steps, output_type, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, padding_mask_crop=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if height % self.vae_scale_factor != 0 or width % self.vae_scale_factor != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if padding_mask_crop is not None: if not isinstance(image, PIL.Image.Image): raise ValueError(f'The image should be a PIL image when inpainting mask crop, but is of type {type(image)}.') if not isinstance(mask_image, PIL.Image.Image): raise ValueError(f'The mask image should be a PIL image when inpainting mask crop, but is of type {type(mask_image)}.') if output_type != 'pil': raise ValueError(f'The output type should be PIL when inpainting mask crop, but is {output_type}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, image=None, timestep=None, is_strength_max=True, return_noise=False, return_image_latents=False): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if (image is None or timestep is None) and (not is_strength_max): raise ValueError('Since strength < 1. initial latents are to be initialised as a combination of Image + Noise.However, either the image or the noise timestep has not been provided.') if return_image_latents or (latents is None and (not is_strength_max)): image = image.to(device=device, dtype=dtype) if image.shape[1] == 4: image_latents = image else: image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents else: noise = latents.to(device) latents = noise * self.scheduler.init_noise_sigma outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(image.shape[0])] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) image_latents = self.vae.config.scaling_factor * image_latents return image_latents def prepare_mask_latents(self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance): mask = torch.nn.functional.interpolate(mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)) mask = mask.to(device=device, dtype=dtype) masked_image = masked_image.to(device=device, dtype=dtype) if masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = self._encode_vae_image(masked_image, generator=generator) if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError(f"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number of masks that you pass is divisible by the total requested batch size.") mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError(f"The passed images and the required batch size don't match. Images are supposed to be duplicated to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed. Make sure the number of images that you pass is divisible by the total requested batch size.") masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask masked_image_latents = torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return (mask, masked_image_latents) def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, mask_image: PipelineImageInput=None, masked_image_latents: torch.Tensor=None, height: Optional[int]=None, width: Optional[int]=None, padding_mask_crop: Optional[int]=None, strength: float=1.0, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: int=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, image, mask_image, height, width, strength, callback_steps, output_type, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs, padding_mask_crop) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps=num_inference_steps, strength=strength, device=device) if num_inference_steps < 1: raise ValueError(f'After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipelinesteps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.') latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) is_strength_max = strength == 1.0 if padding_mask_crop is not None: crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) resize_mode = 'fill' else: crops_coords = None resize_mode = 'default' original_image = image init_image = self.image_processor.preprocess(image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode) init_image = init_image.to(dtype=torch.float32) num_channels_latents = self.vae.config.latent_channels num_channels_unet = self.unet.config.in_channels return_image_latents = num_channels_unet == 4 latents_outputs = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, image=init_image, timestep=latent_timestep, is_strength_max=is_strength_max, return_noise=True, return_image_latents=return_image_latents) if return_image_latents: (latents, noise, image_latents) = latents_outputs else: (latents, noise) = latents_outputs mask_condition = self.mask_processor.preprocess(mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords) if masked_image_latents is None: masked_image = init_image * (mask_condition < 0.5) else: masked_image = masked_image_latents (mask, masked_image_latents) = self.prepare_mask_latents(mask_condition, masked_image, batch_size * num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator, self.do_classifier_free_guidance) if num_channels_unet == 9: num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image} = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of `pipeline.unet` or your `mask_image` or `image` input.') elif num_channels_unet != 4: raise ValueError(f'The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}.') extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if num_channels_unet == 9: latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if num_channels_unet == 4: init_latents_proper = image_latents if self.do_classifier_free_guidance: (init_mask, _) = mask.chunk(2) else: init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise(init_latents_proper, noise, torch.tensor([noise_timestep])) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) mask = callback_outputs.pop('mask', mask) masked_image_latents = callback_outputs.pop('masked_image_latents', masked_image_latents) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': condition_kwargs = {} if isinstance(self.vae, AsymmetricAutoencoderKL): init_image = init_image.to(device=device, dtype=masked_image_latents.dtype) init_image_condition = init_image.clone() init_image = self._encode_vae_image(init_image, generator=generator) mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype) condition_kwargs = {'image': init_image_condition, 'mask': mask_condition} image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator, **condition_kwargs)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if padding_mask_crop is not None: image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import PIL_INTERPOLATION, deprecate, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from . import StableDiffusionPipelineOutput from .safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) def preprocess(image): deprecation_message = 'The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead' deprecate('preprocess', '1.0.0', deprecation_message, standard_warn=False) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): (w, h) = image[0].size (w, h) = (x - x % 8 for x in (w, h)) image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos']))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') class StableDiffusionInstructPix2PixPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'image_latents'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: Optional[CLIPVisionModelWithProjection]=None, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) @torch.no_grad() def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, num_inference_steps: int=100, guidance_scale: float=7.5, image_guidance_scale: float=1.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], cross_attention_kwargs: Optional[Dict[str, Any]]=None, **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._image_guidance_scale = image_guidance_scale device = self._execution_device if image is None: raise ValueError('`image` input cannot be undefined.') if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device prompt_embeds = self._encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) image = self.image_processor.preprocess(image) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps image_latents = self.prepare_image_latents(image, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, self.do_classifier_free_guidance) (height, width) = image_latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor num_channels_latents = self.vae.config.latent_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) num_channels_image = image_latents.shape[1] if num_channels_latents + num_channels_image != self.unet.config.in_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_image`: {num_channels_image} = {num_channels_latents + num_channels_image}. Please verify the config of `pipeline.unet` or your `image` input.') extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None else None num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 3) if self.do_classifier_free_guidance else latents scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1) noise_pred = self.unet(scaled_latent_model_input, t, encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_text, noise_pred_image, noise_pred_uncond) = noise_pred.chunk(3) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_image) + self.image_guidance_scale * (noise_pred_image - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) image_latents = callback_outputs.pop('image_latents', image_latents) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype else: prompt_embeds_dtype = self.unet.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds, negative_prompt_embeds]) return prompt_embeds def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') image_embeds = [] for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) single_negative_image_embeds = torch.stack([single_negative_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_image_embeds = torch.cat([single_image_embeds, single_negative_image_embeds, single_negative_image_embeds]) single_image_embeds = single_image_embeds.to(device) image_embeds.append(single_image_embeds) else: repeat_dims = [1] image_embeds = [] for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_image_embeds, single_negative_image_embeds, single_negative_image_embeds) = single_image_embeds.chunk(3) single_image_embeds = single_image_embeds.repeat(num_images_per_prompt, *repeat_dims * len(single_image_embeds.shape[1:])) single_negative_image_embeds = single_negative_image_embeds.repeat(num_images_per_prompt, *repeat_dims * len(single_negative_image_embeds.shape[1:])) single_image_embeds = torch.cat([single_image_embeds, single_negative_image_embeds, single_negative_image_embeds]) else: single_image_embeds = single_image_embeds.repeat(num_images_per_prompt, *repeat_dims * len(single_image_embeds.shape[1:])) image_embeds.append(single_image_embeds) return image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def check_inputs(self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def prepare_image_latents(self, image, batch_size, num_images_per_prompt, dtype, device, do_classifier_free_guidance, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: image_latents = image else: image_latents = retrieve_latents(self.vae.encode(image), sample_mode='argmax') if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: deprecation_message = f'You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts.') else: image_latents = torch.cat([image_latents], dim=0) if do_classifier_free_guidance: uncond_image_latents = torch.zeros_like(image_latents) image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) return image_latents @property def guidance_scale(self): return self._guidance_scale @property def image_guidance_scale(self): return self._image_guidance_scale @property def num_timesteps(self): return self._num_timesteps @property def do_classifier_free_guidance(self): return self.guidance_scale > 1.0 and self.image_guidance_scale >= 1.0 # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py import warnings from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPTextModel, CLIPTokenizer from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import EulerDiscreteScheduler from ...utils import deprecate, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin logger = logging.get_logger(__name__) def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def preprocess(image): warnings.warn('The preprocess method is deprecated and will be removed in a future version. Please use VaeImageProcessor.preprocess instead', FutureWarning) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): (w, h) = image[0].size (w, h) = (x - x % 64 for x in (w, h)) image = [np.array(i.resize((w, h)))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image class StableDiffusionLatentUpscalePipeline(DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: EulerDiscreteScheduler): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, resample='bicubic') def _encode_prompt(self, prompt, device, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, device=device, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, **kwargs) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds]) return (prompt_embeds, pooled_prompt_embeds) def encode_prompt(self, prompt, device, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None or pooled_prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_length=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_encoder_out = self.text_encoder(text_input_ids.to(device), output_hidden_states=True) prompt_embeds = text_encoder_out.hidden_states[-1] pooled_prompt_embeds = text_encoder_out.pooler_output if do_classifier_free_guidance: if negative_prompt_embeds is None or negative_pooled_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_length=True, return_tensors='pt') uncond_encoder_out = self.text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_prompt_embeds = uncond_encoder_out.hidden_states[-1] negative_pooled_prompt_embeds = uncond_encoder_out.pooler_output return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def check_inputs(self, prompt, image, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None): if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str)) and (not isinstance(prompt, list)): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if not isinstance(image, torch.Tensor) and (not isinstance(image, np.ndarray)) and (not isinstance(image, PIL.Image.Image)) and (not isinstance(image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or `list` but is {type(image)}') if isinstance(image, (list, torch.Tensor)): if prompt is not None: if isinstance(prompt, str): batch_size = 1 else: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if isinstance(image, list): image_batch_size = len(image) else: image_batch_size = image.shape[0] if image.ndim == 4 else 1 if batch_size != image_batch_size: raise ValueError(f'`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}. Please make sure that passed `prompt` matches the batch size of `image`.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height, width) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, num_inference_steps: int=75, guidance_scale: float=9.0, negative_prompt: Optional[Union[str, List[str]]]=None, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1): self.check_inputs(prompt, image, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) if prompt is not None: batch_size = 1 if isinstance(prompt, str) else len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 if guidance_scale == 0: prompt = [''] * batch_size (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt, device, do_classifier_free_guidance, negative_prompt, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds]) image = self.image_processor.preprocess(image) image = image.to(dtype=prompt_embeds.dtype, device=device) if image.shape[1] == 3: image = retrieve_latents(self.vae.encode(image), generator=generator) * self.vae.config.scaling_factor self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps batch_multiplier = 2 if do_classifier_free_guidance else 1 image = image[None, :] if image.ndim == 3 else image image = torch.cat([image] * batch_multiplier) noise_level = torch.tensor([0.0], dtype=torch.float32, device=device) noise_level = torch.cat([noise_level] * image.shape[0]) inv_noise_level = (noise_level ** 2 + 1) ** (-0.5) image_cond = F.interpolate(image, scale_factor=2, mode='nearest') * inv_noise_level[:, None, None, None] image_cond = image_cond.to(prompt_embeds.dtype) noise_level_embed = torch.cat([torch.ones(pooled_prompt_embeds.shape[0], 64, dtype=pooled_prompt_embeds.dtype, device=device), torch.zeros(pooled_prompt_embeds.shape[0], 64, dtype=pooled_prompt_embeds.dtype, device=device)], dim=1) timestep_condition = torch.cat([noise_level_embed, pooled_prompt_embeds], dim=1) (height, width) = image.shape[2:] num_channels_latents = self.vae.config.latent_channels latents = self.prepare_latents(batch_size, num_channels_latents, height * 2, width * 2, prompt_embeds.dtype, device, generator, latents) num_channels_image = image.shape[1] if num_channels_latents + num_channels_image != self.unet.config.in_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_image`: {num_channels_image} = {num_channels_latents + num_channels_image}. Please verify the config of `pipeline.unet` or your `image` input.') num_warmup_steps = 0 with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): sigma = self.scheduler.sigmas[i] latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t) scaled_model_input = torch.cat([scaled_model_input, image_cond], dim=1) timestep = torch.log(sigma) * 0.25 noise_pred = self.unet(scaled_model_input, timestep, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_condition).sample noise_pred = noise_pred[:, :-1] inv_sigma = 1 / (sigma ** 2 + 1) noise_pred = inv_sigma * latent_model_input + self.scheduler.scale_model_input(sigma, t) * noise_pred if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py import inspect import warnings from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import DDPMScheduler, KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from . import StableDiffusionPipelineOutput logger = logging.get_logger(__name__) def preprocess(image): warnings.warn('The preprocess method is deprecated and will be removed in a future version. Please use VaeImageProcessor.preprocess instead', FutureWarning) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): (w, h) = image[0].size (w, h) = (x - x % 64 for x in (w, h)) image = [np.array(i.resize((w, h)))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image class StableDiffusionUpscalePipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['watermarker', 'safety_checker', 'feature_extractor'] _exclude_from_cpu_offload = ['safety_checker'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, low_res_scheduler: DDPMScheduler, scheduler: KarrasDiffusionSchedulers, safety_checker: Optional[Any]=None, feature_extractor: Optional[CLIPImageProcessor]=None, watermarker: Optional[Any]=None, max_noise_level: int=350): super().__init__() if hasattr(vae, 'config'): is_vae_scaling_factor_set_to_0_08333 = hasattr(vae.config, 'scaling_factor') and vae.config.scaling_factor == 0.08333 if not is_vae_scaling_factor_set_to_0_08333: deprecation_message = f"The configuration file of the vae does not contain `scaling_factor` or it is set to {vae.config.scaling_factor}, which seems highly unlikely. If your checkpoint is a fine-tuned version of `stabilityai/stable-diffusion-x4-upscaler` you should change 'scaling_factor' to 0.08333 Please make sure to update the config accordingly, as not doing so might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull Request for the `vae/config.json` file" deprecate('wrong scaling_factor', '1.0.0', deprecation_message, standard_warn=False) vae.register_to_config(scaling_factor=0.08333) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, low_res_scheduler=low_res_scheduler, scheduler=scheduler, safety_checker=safety_checker, watermarker=watermarker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, resample='bicubic') self.register_to_config(max_noise_level=max_noise_level) def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, nsfw_detected, watermark_detected) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype=dtype)) else: nsfw_detected = None watermark_detected = None if hasattr(self, 'unet_offload_hook') and self.unet_offload_hook is not None: self.unet_offload_hook.offload() return (image, nsfw_detected, watermark_detected) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def check_inputs(self, prompt, image, noise_level, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if not isinstance(image, torch.Tensor) and (not isinstance(image, PIL.Image.Image)) and (not isinstance(image, np.ndarray)) and (not isinstance(image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}') if isinstance(image, (list, np.ndarray, torch.Tensor)): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if isinstance(image, list): image_batch_size = len(image) else: image_batch_size = image.shape[0] if batch_size != image_batch_size: raise ValueError(f'`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}. Please make sure that passed `prompt` matches the batch size of `image`.') if noise_level > self.config.max_noise_level: raise ValueError(f'`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height, width) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) @torch.no_grad() def __call__(self, prompt: Union[str, List[str]]=None, image: PipelineImageInput=None, num_inference_steps: int=75, guidance_scale: float=9.0, noise_level: int=20, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: int=None): self.check_inputs(prompt, image, noise_level, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if image is None: raise ValueError('`image` input cannot be undefined.') if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) image = self.image_processor.preprocess(image) image = image.to(dtype=prompt_embeds.dtype, device=device) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps noise_level = torch.tensor([noise_level], dtype=torch.long, device=device) noise = randn_tensor(image.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) image = self.low_res_scheduler.add_noise(image, noise, noise_level) batch_multiplier = 2 if do_classifier_free_guidance else 1 image = torch.cat([image] * batch_multiplier * num_images_per_prompt) noise_level = torch.cat([noise_level] * image.shape[0]) (height, width) = image.shape[2:] num_channels_latents = self.vae.config.latent_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) num_channels_image = image.shape[1] if num_channels_latents + num_channels_image != self.unet.config.in_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_image`: {num_channels_image} = {num_channels_latents + num_channels_image}. Please verify the config of `pipeline.unet` or your `image` input.') extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_model_input = torch.cat([latent_model_input, image], dim=1) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, class_labels=noise_level, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) (image, has_nsfw_concept, _) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if output_type == 'pil' and self.watermarker is not None: image = self.watermarker.apply_watermark(image) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, PriorTransformer, UNet2DConditionModel from ...models.embeddings import get_timestep_embedding from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableUnCLIPPipeline\n\n >>> pipe = StableUnCLIPPipeline.from_pretrained(\n ... "fusing/stable-unclip-2-1-l", torch_dtype=torch.float16\n ... ) # TODO update model path\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "a photo of an astronaut riding a horse on mars"\n >>> images = pipe(prompt).images\n >>> images[0].save("astronaut_horse.png")\n ```\n' class StableUnCLIPPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin): _exclude_from_cpu_offload = ['prior', 'image_normalizer'] model_cpu_offload_seq = 'text_encoder->prior_text_encoder->unet->vae' prior_tokenizer: CLIPTokenizer prior_text_encoder: CLIPTextModelWithProjection prior: PriorTransformer prior_scheduler: KarrasDiffusionSchedulers image_normalizer: StableUnCLIPImageNormalizer image_noising_scheduler: KarrasDiffusionSchedulers tokenizer: CLIPTokenizer text_encoder: CLIPTextModel unet: UNet2DConditionModel scheduler: KarrasDiffusionSchedulers vae: AutoencoderKL def __init__(self, prior_tokenizer: CLIPTokenizer, prior_text_encoder: CLIPTextModelWithProjection, prior: PriorTransformer, prior_scheduler: KarrasDiffusionSchedulers, image_normalizer: StableUnCLIPImageNormalizer, image_noising_scheduler: KarrasDiffusionSchedulers, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, vae: AutoencoderKL): super().__init__() self.register_modules(prior_tokenizer=prior_tokenizer, prior_text_encoder=prior_text_encoder, prior=prior, prior_scheduler=prior_scheduler, image_normalizer=image_normalizer, image_noising_scheduler=image_noising_scheduler, tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, vae=vae) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def _encode_prior_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]]=None, text_attention_mask: Optional[torch.Tensor]=None): if text_model_output is None: batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.prior_tokenizer(prompt, padding='max_length', max_length=self.prior_tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.prior_tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.prior_tokenizer.batch_decode(untruncated_ids[:, self.prior_tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.prior_tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids[:, :self.prior_tokenizer.model_max_length] prior_text_encoder_output = self.prior_text_encoder(text_input_ids.to(device)) prompt_embeds = prior_text_encoder_output.text_embeds text_enc_hid_states = prior_text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] (prompt_embeds, text_enc_hid_states) = (text_model_output[0], text_model_output[1]) text_mask = text_attention_mask prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [''] * batch_size uncond_input = self.prior_tokenizer(uncond_tokens, padding='max_length', max_length=self.prior_tokenizer.model_max_length, truncation=True, return_tensors='pt') uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_prior_text_encoder_output = self.prior_text_encoder(uncond_input.input_ids.to(device)) negative_prompt_embeds = negative_prompt_embeds_prior_text_encoder_output.text_embeds uncond_text_enc_hid_states = negative_prompt_embeds_prior_text_encoder_output.last_hidden_state seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_enc_hid_states.shape[1] uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) uncond_text_enc_hid_states = uncond_text_enc_hid_states.view(batch_size * num_images_per_prompt, seq_len, -1) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return (prompt_embeds, text_enc_hid_states, text_mask) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_prior_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.prior_scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.prior_scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, noise_level, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Please make sure to define only one of the two.') if prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') if prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError('Provide either `negative_prompt` or `negative_prompt_embeds`. Cannot leave both `negative_prompt` and `negative_prompt_embeds` undefined.') if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps: raise ValueError(f'`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive.') def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def noise_image_embeddings(self, image_embeds: torch.Tensor, noise_level: int, noise: Optional[torch.Tensor]=None, generator: Optional[torch.Generator]=None): if noise is None: noise = randn_tensor(image_embeds.shape, generator=generator, device=image_embeds.device, dtype=image_embeds.dtype) noise_level = torch.tensor([noise_level] * image_embeds.shape[0], device=image_embeds.device) self.image_normalizer.to(image_embeds.device) image_embeds = self.image_normalizer.scale(image_embeds) image_embeds = self.image_noising_scheduler.add_noise(image_embeds, timesteps=noise_level, noise=noise) image_embeds = self.image_normalizer.unscale(image_embeds) noise_level = get_timestep_embedding(timesteps=noise_level, embedding_dim=image_embeds.shape[-1], flip_sin_to_cos=True, downscale_freq_shift=0) noise_level = noise_level.to(image_embeds.dtype) image_embeds = torch.cat((image_embeds, noise_level), 1) return image_embeds @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=20, guidance_scale: float=10.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[torch.Generator]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, noise_level: int=0, prior_num_inference_steps: int=25, prior_guidance_scale: float=4.0, prior_latents: Optional[torch.Tensor]=None, clip_skip: Optional[int]=None): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt=prompt, height=height, width=width, callback_steps=callback_steps, noise_level=noise_level, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] batch_size = batch_size * num_images_per_prompt device = self._execution_device prior_do_classifier_free_guidance = prior_guidance_scale > 1.0 (prior_prompt_embeds, prior_text_encoder_hidden_states, prior_text_mask) = self._encode_prior_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=prior_do_classifier_free_guidance) self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) prior_timesteps_tensor = self.prior_scheduler.timesteps embedding_dim = self.prior.config.embedding_dim prior_latents = self.prepare_latents((batch_size, embedding_dim), prior_prompt_embeds.dtype, device, generator, prior_latents, self.prior_scheduler) prior_extra_step_kwargs = self.prepare_prior_extra_step_kwargs(generator, eta) for (i, t) in enumerate(self.progress_bar(prior_timesteps_tensor)): latent_model_input = torch.cat([prior_latents] * 2) if prior_do_classifier_free_guidance else prior_latents latent_model_input = self.prior_scheduler.scale_model_input(latent_model_input, t) predicted_image_embedding = self.prior(latent_model_input, timestep=t, proj_embedding=prior_prompt_embeds, encoder_hidden_states=prior_text_encoder_hidden_states, attention_mask=prior_text_mask).predicted_image_embedding if prior_do_classifier_free_guidance: (predicted_image_embedding_uncond, predicted_image_embedding_text) = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * (predicted_image_embedding_text - predicted_image_embedding_uncond) prior_latents = self.prior_scheduler.step(predicted_image_embedding, timestep=t, sample=prior_latents, **prior_extra_step_kwargs, return_dict=False)[0] if callback is not None and i % callback_steps == 0: callback(i, t, prior_latents) prior_latents = self.prior.post_process_latents(prior_latents) image_embeds = prior_latents do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) image_embeds = self.noise_image_embeddings(image_embeds=image_embeds, noise_level=noise_level, generator=generator) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(image_embeds) image_embeds = torch.cat([negative_prompt_embeds, image_embeds]) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) latents = self.prepare_latents(shape=shape, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=latents, scheduler=self.scheduler) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, class_labels=image_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.embeddings import get_timestep_embedding from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import requests\n >>> import torch\n >>> from PIL import Image\n >>> from io import BytesIO\n\n >>> from diffusers import StableUnCLIPImg2ImgPipeline\n\n >>> pipe = StableUnCLIPImg2ImgPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-2-1-unclip-small", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"\n\n >>> response = requests.get(url)\n >>> init_image = Image.open(BytesIO(response.content)).convert("RGB")\n >>> init_image = init_image.resize((768, 512))\n\n >>> prompt = "A fantasy landscape, trending on artstation"\n\n >>> images = pipe(init_image, prompt).images\n >>> images[0].save("fantasy_landscape.png")\n ```\n' class StableUnCLIPImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae' _exclude_from_cpu_offload = ['image_normalizer'] feature_extractor: CLIPImageProcessor image_encoder: CLIPVisionModelWithProjection image_normalizer: StableUnCLIPImageNormalizer image_noising_scheduler: KarrasDiffusionSchedulers tokenizer: CLIPTokenizer text_encoder: CLIPTextModel unet: UNet2DConditionModel scheduler: KarrasDiffusionSchedulers vae: AutoencoderKL def __init__(self, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection, image_normalizer: StableUnCLIPImageNormalizer, image_noising_scheduler: KarrasDiffusionSchedulers, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, vae: AutoencoderKL): super().__init__() self.register_modules(feature_extractor=feature_extractor, image_encoder=image_encoder, image_normalizer=image_normalizer, image_noising_scheduler=image_noising_scheduler, tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, vae=vae) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def _encode_image(self, image, device, batch_size, num_images_per_prompt, do_classifier_free_guidance, noise_level, generator, image_embeds): dtype = next(self.image_encoder.parameters()).dtype if isinstance(image, PIL.Image.Image): repeat_by = batch_size else: repeat_by = num_images_per_prompt if image_embeds is None: if not isinstance(image, torch.Tensor): image = self.feature_extractor(images=image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) image_embeds = self.image_encoder(image).image_embeds image_embeds = self.noise_image_embeddings(image_embeds=image_embeds, noise_level=noise_level, generator=generator) image_embeds = image_embeds.unsqueeze(1) (bs_embed, seq_len, _) = image_embeds.shape image_embeds = image_embeds.repeat(1, repeat_by, 1) image_embeds = image_embeds.view(bs_embed * repeat_by, seq_len, -1) image_embeds = image_embeds.squeeze(1) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(image_embeds) image_embeds = torch.cat([negative_prompt_embeds, image_embeds]) return image_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, image, height, width, callback_steps, noise_level, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, image_embeds=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Please make sure to define only one of the two.') if prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') if prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError('Provide either `negative_prompt` or `negative_prompt_embeds`. Cannot leave both `negative_prompt` and `negative_prompt_embeds` undefined.') if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps: raise ValueError(f'`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive.') if image is not None and image_embeds is not None: raise ValueError('Provide either `image` or `image_embeds`. Please make sure to define only one of the two.') if image is None and image_embeds is None: raise ValueError('Provide either `image` or `image_embeds`. Cannot leave both `image` and `image_embeds` undefined.') if image is not None: if not isinstance(image, torch.Tensor) and (not isinstance(image, PIL.Image.Image)) and (not isinstance(image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is {type(image)}') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def noise_image_embeddings(self, image_embeds: torch.Tensor, noise_level: int, noise: Optional[torch.Tensor]=None, generator: Optional[torch.Generator]=None): if noise is None: noise = randn_tensor(image_embeds.shape, generator=generator, device=image_embeds.device, dtype=image_embeds.dtype) noise_level = torch.tensor([noise_level] * image_embeds.shape[0], device=image_embeds.device) self.image_normalizer.to(image_embeds.device) image_embeds = self.image_normalizer.scale(image_embeds) image_embeds = self.image_noising_scheduler.add_noise(image_embeds, timesteps=noise_level, noise=noise) image_embeds = self.image_normalizer.unscale(image_embeds) noise_level = get_timestep_embedding(timesteps=noise_level, embedding_dim=image_embeds.shape[-1], flip_sin_to_cos=True, downscale_freq_shift=0) noise_level = noise_level.to(image_embeds.dtype) image_embeds = torch.cat((image_embeds, noise_level), 1) return image_embeds @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, image: Union[torch.Tensor, PIL.Image.Image]=None, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=20, guidance_scale: float=10, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[torch.Generator]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, noise_level: int=0, image_embeds: Optional[torch.Tensor]=None, clip_skip: Optional[int]=None): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor if prompt is None and prompt_embeds is None: prompt = len(image) * [''] if isinstance(image, list) else '' self.check_inputs(prompt=prompt, image=image, height=height, width=width, callback_steps=callback_steps, noise_level=noise_level, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, image_embeds=image_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] batch_size = batch_size * num_images_per_prompt device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) noise_level = torch.tensor([noise_level], device=device) image_embeds = self._encode_image(image=image, device=device, batch_size=batch_size, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, noise_level=noise_level, generator=generator, image_embeds=image_embeds) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels if latents is None: latents = self.prepare_latents(batch_size=batch_size, num_channels_latents=num_channels_latents, height=height, width=width, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) for (i, t) in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, class_labels=image_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/safety_checker.py import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging logger = logging.get_logger(__name__) def cosine_distance(image_embeds, text_embeds): normalized_image_embeds = nn.functional.normalize(image_embeds) normalized_text_embeds = nn.functional.normalize(text_embeds) return torch.mm(normalized_image_embeds, normalized_text_embeds.t()) class StableDiffusionSafetyChecker(PreTrainedModel): config_class = CLIPConfig main_input_name = 'clip_input' _no_split_modules = ['CLIPEncoderLayer'] def __init__(self, config: CLIPConfig): super().__init__(config) self.vision_model = CLIPVisionModel(config.vision_config) self.visual_projection = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=False) self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False) self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False) self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False) self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False) @torch.no_grad() def forward(self, clip_input, images): pooled_output = self.vision_model(clip_input)[1] image_embeds = self.visual_projection(pooled_output) special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).cpu().float().numpy() cos_dist = cosine_distance(image_embeds, self.concept_embeds).cpu().float().numpy() result = [] batch_size = image_embeds.shape[0] for i in range(batch_size): result_img = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []} adjustment = 0.0 for concept_idx in range(len(special_cos_dist[0])): concept_cos = special_cos_dist[i][concept_idx] concept_threshold = self.special_care_embeds_weights[concept_idx].item() result_img['special_scores'][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) if result_img['special_scores'][concept_idx] > 0: result_img['special_care'].append({concept_idx, result_img['special_scores'][concept_idx]}) adjustment = 0.01 for concept_idx in range(len(cos_dist[0])): concept_cos = cos_dist[i][concept_idx] concept_threshold = self.concept_embeds_weights[concept_idx].item() result_img['concept_scores'][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) if result_img['concept_scores'][concept_idx] > 0: result_img['bad_concepts'].append(concept_idx) result.append(result_img) has_nsfw_concepts = [len(res['bad_concepts']) > 0 for res in result] for (idx, has_nsfw_concept) in enumerate(has_nsfw_concepts): if has_nsfw_concept: if torch.is_tensor(images) or torch.is_tensor(images[0]): images[idx] = torch.zeros_like(images[idx]) else: images[idx] = np.zeros(images[idx].shape) if any(has_nsfw_concepts): logger.warning('Potential NSFW content was detected in one or more images. A black image will be returned instead. Try again with a different prompt and/or seed.') return (images, has_nsfw_concepts) @torch.no_grad() def forward_onnx(self, clip_input: torch.Tensor, images: torch.Tensor): pooled_output = self.vision_model(clip_input)[1] image_embeds = self.visual_projection(pooled_output) special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds) cos_dist = cosine_distance(image_embeds, self.concept_embeds) adjustment = 0.0 special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment special_care = torch.any(special_scores > 0, dim=1) special_adjustment = special_care * 0.01 special_adjustment = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1]) concept_scores = cos_dist - self.concept_embeds_weights + special_adjustment has_nsfw_concepts = torch.any(concept_scores > 0, dim=1) images[has_nsfw_concepts] = 0.0 return (images, has_nsfw_concepts) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/safety_checker_flax.py from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def jax_cosine_distance(emb_1, emb_2, eps=1e-12): norm_emb_1 = jnp.divide(emb_1.T, jnp.clip(jnp.linalg.norm(emb_1, axis=1), a_min=eps)).T norm_emb_2 = jnp.divide(emb_2.T, jnp.clip(jnp.linalg.norm(emb_2, axis=1), a_min=eps)).T return jnp.matmul(norm_emb_1, norm_emb_2.T) class FlaxStableDiffusionSafetyCheckerModule(nn.Module): config: CLIPConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.vision_model = FlaxCLIPVisionModule(self.config.vision_config) self.visual_projection = nn.Dense(self.config.projection_dim, use_bias=False, dtype=self.dtype) self.concept_embeds = self.param('concept_embeds', jax.nn.initializers.ones, (17, self.config.projection_dim)) self.special_care_embeds = self.param('special_care_embeds', jax.nn.initializers.ones, (3, self.config.projection_dim)) self.concept_embeds_weights = self.param('concept_embeds_weights', jax.nn.initializers.ones, (17,)) self.special_care_embeds_weights = self.param('special_care_embeds_weights', jax.nn.initializers.ones, (3,)) def __call__(self, clip_input): pooled_output = self.vision_model(clip_input)[1] image_embeds = self.visual_projection(pooled_output) special_cos_dist = jax_cosine_distance(image_embeds, self.special_care_embeds) cos_dist = jax_cosine_distance(image_embeds, self.concept_embeds) adjustment = 0.0 special_scores = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment special_scores = jnp.round(special_scores, 3) is_special_care = jnp.any(special_scores > 0, axis=1, keepdims=True) special_adjustment = is_special_care * 0.01 concept_scores = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment concept_scores = jnp.round(concept_scores, 3) has_nsfw_concepts = jnp.any(concept_scores > 0, axis=1) return has_nsfw_concepts class FlaxStableDiffusionSafetyChecker(FlaxPreTrainedModel): config_class = CLIPConfig main_input_name = 'clip_input' module_class = FlaxStableDiffusionSafetyCheckerModule def __init__(self, config: CLIPConfig, input_shape: Optional[Tuple]=None, seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): if input_shape is None: input_shape = (1, 224, 224, 3) module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.Array, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: clip_input = jax.random.normal(rng, input_shape) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, clip_input)['params'] return random_params def __call__(self, clip_input, params: dict=None): clip_input = jnp.transpose(clip_input, (0, 2, 3, 1)) return self.module.apply({'params': params or self.params}, jnp.array(clip_input, dtype=jnp.float32), rngs={}) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class StableUnCLIPImageNormalizer(ModelMixin, ConfigMixin): @register_to_config def __init__(self, embedding_dim: int=768): super().__init__() self.mean = nn.Parameter(torch.zeros(1, embedding_dim)) self.std = nn.Parameter(torch.ones(1, embedding_dim)) def to(self, torch_device: Optional[Union[str, torch.device]]=None, torch_dtype: Optional[torch.dtype]=None): self.mean = nn.Parameter(self.mean.to(torch_device).to(torch_dtype)) self.std = nn.Parameter(self.std.to(torch_device).to(torch_dtype)) return self def scale(self, embeds): embeds = (embeds - self.mean) * 1.0 / self.std return embeds def unscale(self, embeds): embeds = embeds * self.std + self.mean return embeds # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_3/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available _dummy_objects = {} _additional_imports = {} _import_structure = {'pipeline_output': ['StableDiffusion3PipelineOutput']} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_stable_diffusion_3'] = ['StableDiffusion3Pipeline'] _import_structure['pipeline_stable_diffusion_3_img2img'] = ['StableDiffusion3Img2ImgPipeline'] _import_structure['pipeline_stable_diffusion_3_inpaint'] = ['StableDiffusion3InpaintPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_3 import StableDiffusion3Pipeline from .pipeline_stable_diffusion_3_img2img import StableDiffusion3Img2ImgPipeline from .pipeline_stable_diffusion_3_inpaint import StableDiffusion3InpaintPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) for (name, value) in _additional_imports.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_3/pipeline_output.py from dataclasses import dataclass from typing import List, Union import numpy as np import PIL.Image from ...utils import BaseOutput @dataclass class StableDiffusion3PipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel, T5TokenizerFast from ...image_processor import VaeImageProcessor from ...loaders import FromSingleFileMixin, SD3LoraLoaderMixin from ...models.autoencoders import AutoencoderKL from ...models.transformers import SD3Transformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import StableDiffusion3PipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusion3Pipeline\n\n >>> pipe = StableDiffusion3Pipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n >>> prompt = "A cat holding a sign that says hello world"\n >>> image = pipe(prompt).images[0]\n >>> image.save("sd3.png")\n ```\n' def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->text_encoder_3->transformer->vae' _optional_components = [] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'negative_pooled_prompt_embeds'] def __init__(self, transformer: SD3Transformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_encoder_2: CLIPTextModelWithProjection, tokenizer_2: CLIPTokenizer, text_encoder_3: T5EncoderModel, tokenizer_3: T5TokenizerFast): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, text_encoder_3=text_encoder_3, tokenizer=tokenizer, tokenizer_2=tokenizer_2, tokenizer_3=tokenizer_3, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, 'vae') and self.vae is not None else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.tokenizer_max_length = self.tokenizer.model_max_length if hasattr(self, 'tokenizer') and self.tokenizer is not None else 77 self.default_sample_size = self.transformer.config.sample_size if hasattr(self, 'transformer') and self.transformer is not None else 128 def _get_t5_prompt_embeds(self, prompt: Union[str, List[str]]=None, num_images_per_prompt: int=1, max_sequence_length: int=256, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if self.text_encoder_3 is None: return torch.zeros((batch_size * num_images_per_prompt, self.tokenizer_max_length, self.transformer.config.joint_attention_dim), device=device, dtype=dtype) text_inputs = self.tokenizer_3(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_3(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because `max_sequence_length` is set to {max_sequence_length} tokens: {removed_text}') prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0] dtype = self.text_encoder_3.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds def _get_clip_prompt_embeds(self, prompt: Union[str, List[str]], num_images_per_prompt: int=1, device: Optional[torch.device]=None, clip_skip: Optional[int]=None, clip_model_index: int=0): device = device or self._execution_device clip_tokenizers = [self.tokenizer, self.tokenizer_2] clip_text_encoders = [self.text_encoder, self.text_encoder_2] tokenizer = clip_tokenizers[clip_model_index] text_encoder = clip_text_encoders[clip_model_index] prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = tokenizer(prompt, padding='max_length', max_length=self.tokenizer_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1) pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) return (prompt_embeds, pooled_prompt_embeds) def encode_prompt(self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], prompt_3: Union[str, List[str]], device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, negative_prompt_3: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor]=None, clip_skip: Optional[int]=None, max_sequence_length: int=256, lora_scale: Optional[float]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_3 = prompt_3 or prompt prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 (prompt_embed, pooled_prompt_embed) = self._get_clip_prompt_embeds(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=0) (prompt_2_embed, pooled_prompt_2_embed) = self._get_clip_prompt_embeds(prompt=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=1) clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1) t5_prompt_embed = self._get_t5_prompt_embeds(prompt=prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) clip_prompt_embeds = torch.nn.functional.pad(clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1])) prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt_3 = negative_prompt_3 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 negative_prompt_3 = batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') (negative_prompt_embed, negative_pooled_prompt_embed) = self._get_clip_prompt_embeds(negative_prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=0) (negative_prompt_2_embed, negative_pooled_prompt_2_embed) = self._get_clip_prompt_embeds(negative_prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=1) negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1) t5_negative_prompt_embed = self._get_t5_prompt_embeds(prompt=negative_prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) negative_clip_prompt_embeds = torch.nn.functional.pad(negative_clip_prompt_embeds, (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1])) negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2) negative_pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1) if self.text_encoder is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def check_inputs(self, prompt, prompt_2, prompt_3, height, width, negative_prompt=None, negative_prompt_2=None, negative_prompt_3=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_3 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') elif prompt_3 is not None and (not isinstance(prompt_3, str) and (not isinstance(prompt_3, list))): raise ValueError(f'`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_3 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f'`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): if latents is not None: return latents.to(device=device, dtype=dtype) shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, prompt_3: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=28, timesteps: List[int]=None, guidance_scale: float=7.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, negative_prompt_3: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, joint_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=256): height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor self.check_inputs(prompt, prompt_2, prompt_3, height, width, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.joint_attention_kwargs.get('scale', None) if self.joint_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, prompt_3=prompt_3, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, device=device, clip_skip=self.clip_skip, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents timestep = t.expand(latent_model_input.shape[0]) noise_pred = self.transformer(hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, pooled_projections=pooled_prompt_embeds, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == 'latent': image = latents else: latents = latents / self.vae.config.scaling_factor + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusion3PipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py import inspect from typing import Callable, Dict, List, Optional, Union import PIL.Image import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel, T5TokenizerFast from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import SD3LoraLoaderMixin from ...models.autoencoders import AutoencoderKL from ...models.transformers import SD3Transformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import StableDiffusion3PipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n\n >>> from diffusers import AutoPipelineForImage2Image\n >>> from diffusers.utils import load_image\n\n >>> device = "cuda"\n >>> model_id_or_path = "stabilityai/stable-diffusion-3-medium-diffusers"\n >>> pipe = AutoPipelineForImage2Image.from_pretrained(model_id_or_path, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"\n >>> init_image = load_image(url).resize((1024, 1024))\n\n >>> prompt = "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k"\n\n >>> images = pipe(prompt=prompt, image=init_image, strength=0.95, guidance_scale=7.5).images[0]\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusion3Img2ImgPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->text_encoder_2->text_encoder_3->transformer->vae' _optional_components = [] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'negative_pooled_prompt_embeds'] def __init__(self, transformer: SD3Transformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_encoder_2: CLIPTextModelWithProjection, tokenizer_2: CLIPTokenizer, text_encoder_3: T5EncoderModel, tokenizer_3: T5TokenizerFast): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, text_encoder_3=text_encoder_3, tokenizer=tokenizer, tokenizer_2=tokenizer_2, tokenizer_3=tokenizer_3, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, vae_latent_channels=self.vae.config.latent_channels) self.tokenizer_max_length = self.tokenizer.model_max_length self.default_sample_size = self.transformer.config.sample_size def _get_t5_prompt_embeds(self, prompt: Union[str, List[str]]=None, num_images_per_prompt: int=1, max_sequence_length: int=256, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if self.text_encoder_3 is None: return torch.zeros((batch_size * num_images_per_prompt, self.tokenizer_max_length, self.transformer.config.joint_attention_dim), device=device, dtype=dtype) text_inputs = self.tokenizer_3(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_3(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because `max_sequence_length` is set to {max_sequence_length} tokens: {removed_text}') prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0] dtype = self.text_encoder_3.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds def _get_clip_prompt_embeds(self, prompt: Union[str, List[str]], num_images_per_prompt: int=1, device: Optional[torch.device]=None, clip_skip: Optional[int]=None, clip_model_index: int=0): device = device or self._execution_device clip_tokenizers = [self.tokenizer, self.tokenizer_2] clip_text_encoders = [self.text_encoder, self.text_encoder_2] tokenizer = clip_tokenizers[clip_model_index] text_encoder = clip_text_encoders[clip_model_index] prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = tokenizer(prompt, padding='max_length', max_length=self.tokenizer_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1) pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) return (prompt_embeds, pooled_prompt_embeds) def encode_prompt(self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], prompt_3: Union[str, List[str]], device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, negative_prompt_3: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor]=None, clip_skip: Optional[int]=None, max_sequence_length: int=256, lora_scale: Optional[float]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_3 = prompt_3 or prompt prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 (prompt_embed, pooled_prompt_embed) = self._get_clip_prompt_embeds(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=0) (prompt_2_embed, pooled_prompt_2_embed) = self._get_clip_prompt_embeds(prompt=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=1) clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1) t5_prompt_embed = self._get_t5_prompt_embeds(prompt=prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) clip_prompt_embeds = torch.nn.functional.pad(clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1])) prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt_3 = negative_prompt_3 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 negative_prompt_3 = batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') (negative_prompt_embed, negative_pooled_prompt_embed) = self._get_clip_prompt_embeds(negative_prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=0) (negative_prompt_2_embed, negative_pooled_prompt_2_embed) = self._get_clip_prompt_embeds(negative_prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=1) negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1) t5_negative_prompt_embed = self._get_t5_prompt_embeds(prompt=negative_prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) negative_clip_prompt_embeds = torch.nn.functional.pad(negative_clip_prompt_embeds, (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1])) negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2) negative_pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1) if self.text_encoder is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def check_inputs(self, prompt, prompt_2, prompt_3, strength, negative_prompt=None, negative_prompt_2=None, negative_prompt_3=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_3 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') elif prompt_3 is not None and (not isinstance(prompt_3, str) and (not isinstance(prompt_3, list))): raise ValueError(f'`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_3 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f'`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}') def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(num_inference_steps * strength, num_inference_steps) t_start = int(max(num_inference_steps - init_timestep, 0)) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == self.vae.config.latent_channels: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): init_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) init_latents = (init_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.scale_noise(init_latents, timestep, noise) latents = init_latents.to(device=device, dtype=dtype) return latents @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, prompt_3: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, strength: float=0.6, num_inference_steps: int=50, timesteps: List[int]=None, guidance_scale: float=7.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, negative_prompt_3: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.FloatTensor]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=256): self.check_inputs(prompt, prompt_2, prompt_3, strength, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, prompt_3=prompt_3, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, device=device, clip_skip=self.clip_skip, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) image = self.image_processor.preprocess(image) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) if latents is None: latents = self.prepare_latents(image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents timestep = t.expand(latent_model_input.shape[0]) noise_pred = self.transformer(hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, pooled_projections=pooled_prompt_embeds, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == 'latent': image = latents else: latents = latents / self.vae.config.scaling_factor + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusion3PipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py import inspect from typing import Callable, Dict, List, Optional, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel, T5TokenizerFast from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, SD3LoraLoaderMixin from ...models.autoencoders import AutoencoderKL from ...models.transformers import SD3Transformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import StableDiffusion3PipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusion3InpaintPipeline\n >>> from diffusers.utils import load_image\n\n >>> pipe = StableDiffusion3InpaintPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench"\n >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"\n >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"\n >>> source = load_image(img_url)\n >>> mask = load_image(mask_url)\n >>> image = pipe(prompt=prompt, image=source, mask_image=mask).images[0]\n >>> image.save("sd3_inpainting.png")\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusion3InpaintPipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->text_encoder_3->transformer->vae' _optional_components = [] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'negative_pooled_prompt_embeds'] def __init__(self, transformer: SD3Transformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_encoder_2: CLIPTextModelWithProjection, tokenizer_2: CLIPTokenizer, text_encoder_3: T5EncoderModel, tokenizer_3: T5TokenizerFast): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, text_encoder_3=text_encoder_3, tokenizer=tokenizer, tokenizer_2=tokenizer_2, tokenizer_3=tokenizer_3, transformer=transformer, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, vae_latent_channels=self.vae.config.latent_channels) self.mask_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, vae_latent_channels=self.vae.config.latent_channels, do_normalize=False, do_binarize=True, do_convert_grayscale=True) self.tokenizer_max_length = self.tokenizer.model_max_length self.default_sample_size = self.transformer.config.sample_size def _get_t5_prompt_embeds(self, prompt: Union[str, List[str]]=None, num_images_per_prompt: int=1, max_sequence_length: int=256, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if self.text_encoder_3 is None: return torch.zeros((batch_size * num_images_per_prompt, self.tokenizer_max_length, self.transformer.config.joint_attention_dim), device=device, dtype=dtype) text_inputs = self.tokenizer_3(prompt, padding='max_length', max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_3(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because `max_sequence_length` is set to {max_sequence_length} tokens: {removed_text}') prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0] dtype = self.text_encoder_3.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds def _get_clip_prompt_embeds(self, prompt: Union[str, List[str]], num_images_per_prompt: int=1, device: Optional[torch.device]=None, clip_skip: Optional[int]=None, clip_model_index: int=0): device = device or self._execution_device clip_tokenizers = [self.tokenizer, self.tokenizer_2] clip_text_encoders = [self.text_encoder, self.text_encoder_2] tokenizer = clip_tokenizers[clip_model_index] text_encoder = clip_text_encoders[clip_model_index] prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = tokenizer(prompt, padding='max_length', max_length=self.tokenizer_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) (_, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1) pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) return (prompt_embeds, pooled_prompt_embeds) def encode_prompt(self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], prompt_3: Union[str, List[str]], device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, negative_prompt_3: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.FloatTensor]=None, negative_prompt_embeds: Optional[torch.FloatTensor]=None, pooled_prompt_embeds: Optional[torch.FloatTensor]=None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor]=None, clip_skip: Optional[int]=None, max_sequence_length: int=256, lora_scale: Optional[float]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_3 = prompt_3 or prompt prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 (prompt_embed, pooled_prompt_embed) = self._get_clip_prompt_embeds(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=0) (prompt_2_embed, pooled_prompt_2_embed) = self._get_clip_prompt_embeds(prompt=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=1) clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1) t5_prompt_embed = self._get_t5_prompt_embeds(prompt=prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) clip_prompt_embeds = torch.nn.functional.pad(clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1])) prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt_3 = negative_prompt_3 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 negative_prompt_3 = batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') (negative_prompt_embed, negative_pooled_prompt_embed) = self._get_clip_prompt_embeds(negative_prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=0) (negative_prompt_2_embed, negative_pooled_prompt_2_embed) = self._get_clip_prompt_embeds(negative_prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=1) negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1) t5_negative_prompt_embed = self._get_t5_prompt_embeds(prompt=negative_prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device) negative_clip_prompt_embeds = torch.nn.functional.pad(negative_clip_prompt_embeds, (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1])) negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2) negative_pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1) if self.text_encoder is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def check_inputs(self, prompt, prompt_2, prompt_3, strength, negative_prompt=None, negative_prompt_2=None, negative_prompt_3=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_3 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') elif prompt_3 is not None and (not isinstance(prompt_3, str) and (not isinstance(prompt_3, list))): raise ValueError(f'`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_3 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f'`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}') def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(num_inference_steps * strength, num_inference_steps) t_start = int(max(num_inference_steps - init_timestep, 0)) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, image=None, timestep=None, is_strength_max=True, return_noise=False, return_image_latents=False): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if (image is None or timestep is None) and (not is_strength_max): raise ValueError('Since strength < 1. initial latents are to be initialised as a combination of Image + Noise.However, either the image or the noise timestep has not been provided.') if return_image_latents or (latents is None and (not is_strength_max)): image = image.to(device=device, dtype=dtype) if image.shape[1] == 16: image_latents = image else: image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = noise if is_strength_max else self.scheduler.scale_noise(image_latents, timestep, noise) else: noise = latents.to(device) latents = noise outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(image.shape[0])] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor return image_latents def prepare_mask_latents(self, mask, masked_image, batch_size, num_images_per_prompt, height, width, dtype, device, generator, do_classifier_free_guidance): mask = torch.nn.functional.interpolate(mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)) mask = mask.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt masked_image = masked_image.to(device=device, dtype=dtype) if masked_image.shape[1] == 16: masked_image_latents = masked_image else: masked_image_latents = retrieve_latents(self.vae.encode(masked_image), generator=generator) masked_image_latents = (masked_image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError(f"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number of masks that you pass is divisible by the total requested batch size.") mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError(f"The passed images and the required batch size don't match. Images are supposed to be duplicated to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed. Make sure the number of images that you pass is divisible by the total requested batch size.") masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask masked_image_latents = torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return (mask, masked_image_latents) @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, prompt_3: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, mask_image: PipelineImageInput=None, masked_image_latents: PipelineImageInput=None, height: int=None, width: int=None, padding_mask_crop: Optional[int]=None, strength: float=0.6, num_inference_steps: int=50, timesteps: List[int]=None, guidance_scale: float=7.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, negative_prompt_3: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], max_sequence_length: int=256): if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, prompt_2, prompt_3, strength, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, prompt_3=prompt_3, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, device=device, clip_skip=self.clip_skip, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) if num_inference_steps < 1: raise ValueError(f'After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipelinesteps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.') latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) is_strength_max = strength == 1.0 if padding_mask_crop is not None: crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) resize_mode = 'fill' else: crops_coords = None resize_mode = 'default' original_image = image init_image = self.image_processor.preprocess(image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode) init_image = init_image.to(dtype=torch.float32) num_channels_latents = self.vae.config.latent_channels num_channels_transformer = self.transformer.config.in_channels return_image_latents = num_channels_transformer == 16 latents_outputs = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, image=init_image, timestep=latent_timestep, is_strength_max=is_strength_max, return_noise=True, return_image_latents=return_image_latents) if return_image_latents: (latents, noise, image_latents) = latents_outputs else: (latents, noise) = latents_outputs mask_condition = self.mask_processor.preprocess(mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords) if masked_image_latents is None: masked_image = init_image * (mask_condition < 0.5) else: masked_image = masked_image_latents (mask, masked_image_latents) = self.prepare_mask_latents(mask_condition, masked_image, batch_size, num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator, self.do_classifier_free_guidance) if num_channels_transformer == 33: num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != self.transformer.config.in_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.transformer`: {self.transformer.config} expects {self.transformer.config.in_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image} = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of `pipeline.transformer` or your `mask_image` or `image` input.') elif num_channels_transformer != 16: raise ValueError(f'The transformer {self.transformer.__class__} should have 16 input channels or 33 input channels, not {self.transformer.config.in_channels}.') num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents timestep = t.expand(latent_model_input.shape[0]) if num_channels_transformer == 33: latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) noise_pred = self.transformer(hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, pooled_projections=pooled_prompt_embeds, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if num_channels_transformer == 16: init_latents_proper = image_latents if self.do_classifier_free_guidance: (init_mask, _) = mask.chunk(2) else: init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.scale_noise(init_latents_proper, torch.tensor([noise_timestep]), noise) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) mask = callback_outputs.pop('mask', mask) masked_image_latents = callback_outputs.pop('masked_image_latents', masked_image_latents) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] else: image = latents do_denormalize = [True] * image.shape[0] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if padding_mask_crop is not None: image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusion3PipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_stable_diffusion_attend_and_excite'] = ['StableDiffusionAttendAndExcitePipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py import inspect import math from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from torch.nn import functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import Attention from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionAttendAndExcitePipeline\n\n >>> pipe = StableDiffusionAttendAndExcitePipeline.from_pretrained(\n ... "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16\n ... ).to("cuda")\n\n\n >>> prompt = "a cat and a frog"\n\n >>> # use get_indices function to find out indices of the tokens you want to alter\n >>> pipe.get_indices(prompt)\n {0: \'<|startoftext|>\', 1: \'a\', 2: \'cat\', 3: \'and\', 4: \'a\', 5: \'frog\', 6: \'<|endoftext|>\'}\n\n >>> token_indices = [2, 5]\n >>> seed = 6141\n >>> generator = torch.Generator("cuda").manual_seed(seed)\n\n >>> images = pipe(\n ... prompt=prompt,\n ... token_indices=token_indices,\n ... guidance_scale=7.5,\n ... generator=generator,\n ... num_inference_steps=50,\n ... max_iter_to_alter=25,\n ... ).images\n\n >>> image = images[0]\n >>> image.save(f"../images/{prompt}_{seed}.png")\n ```\n' class AttentionStore: @staticmethod def get_empty_store(): return {'down': [], 'mid': [], 'up': []} def __call__(self, attn, is_cross: bool, place_in_unet: str): if self.cur_att_layer >= 0 and is_cross: if attn.shape[1] == np.prod(self.attn_res): self.step_store[place_in_unet].append(attn) self.cur_att_layer += 1 if self.cur_att_layer == self.num_att_layers: self.cur_att_layer = 0 self.between_steps() def between_steps(self): self.attention_store = self.step_store self.step_store = self.get_empty_store() def get_average_attention(self): average_attention = self.attention_store return average_attention def aggregate_attention(self, from_where: List[str]) -> torch.Tensor: out = [] attention_maps = self.get_average_attention() for location in from_where: for item in attention_maps[location]: cross_maps = item.reshape(-1, self.attn_res[0], self.attn_res[1], item.shape[-1]) out.append(cross_maps) out = torch.cat(out, dim=0) out = out.sum(0) / out.shape[0] return out def reset(self): self.cur_att_layer = 0 self.step_store = self.get_empty_store() self.attention_store = {} def __init__(self, attn_res): self.num_att_layers = -1 self.cur_att_layer = 0 self.step_store = self.get_empty_store() self.attention_store = {} self.curr_step_index = 0 self.attn_res = attn_res class AttendExciteAttnProcessor: def __init__(self, attnstore, place_in_unet): super().__init__() self.attnstore = attnstore self.place_in_unet = place_in_unet def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None): (batch_size, sequence_length, _) = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross = encoder_hidden_states is not None encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) if attention_probs.requires_grad: self.attnstore(attention_probs, is_cross, self.place_in_unet) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor'] _exclude_from_cpu_offload = ['safety_checker'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, indices, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') indices_is_list_ints = isinstance(indices, list) and isinstance(indices[0], int) indices_is_list_list_ints = isinstance(indices, list) and isinstance(indices[0], list) and isinstance(indices[0][0], int) if not indices_is_list_ints and (not indices_is_list_list_ints): raise TypeError('`indices` must be a list of ints or a list of a list of ints') if indices_is_list_ints: indices_batch_size = 1 elif indices_is_list_list_ints: indices_batch_size = len(indices) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if indices_batch_size != prompt_batch_size: raise ValueError(f'indices batch size must be same as prompt batch size. indices batch size: {indices_batch_size}, prompt batch size: {prompt_batch_size}') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @staticmethod def _compute_max_attention_per_index(attention_maps: torch.Tensor, indices: List[int]) -> List[torch.Tensor]: attention_for_text = attention_maps[:, :, 1:-1] attention_for_text *= 100 attention_for_text = torch.nn.functional.softmax(attention_for_text, dim=-1) indices = [index - 1 for index in indices] max_indices_list = [] for i in indices: image = attention_for_text[:, :, i] smoothing = GaussianSmoothing().to(attention_maps.device) input = F.pad(image.unsqueeze(0).unsqueeze(0), (1, 1, 1, 1), mode='reflect') image = smoothing(input).squeeze(0).squeeze(0) max_indices_list.append(image.max()) return max_indices_list def _aggregate_and_get_max_attention_per_token(self, indices: List[int]): attention_maps = self.attention_store.aggregate_attention(from_where=('up', 'down', 'mid')) max_attention_per_index = self._compute_max_attention_per_index(attention_maps=attention_maps, indices=indices) return max_attention_per_index @staticmethod def _compute_loss(max_attention_per_index: List[torch.Tensor]) -> torch.Tensor: losses = [max(0, 1.0 - curr_max) for curr_max in max_attention_per_index] loss = max(losses) return loss @staticmethod def _update_latent(latents: torch.Tensor, loss: torch.Tensor, step_size: float) -> torch.Tensor: grad_cond = torch.autograd.grad(loss.requires_grad_(True), [latents], retain_graph=True)[0] latents = latents - step_size * grad_cond return latents def _perform_iterative_refinement_step(self, latents: torch.Tensor, indices: List[int], loss: torch.Tensor, threshold: float, text_embeddings: torch.Tensor, step_size: float, t: int, max_refinement_steps: int=20): iteration = 0 target_loss = max(0, 1.0 - threshold) while loss > target_loss: iteration += 1 latents = latents.clone().detach().requires_grad_(True) self.unet(latents, t, encoder_hidden_states=text_embeddings).sample self.unet.zero_grad() max_attention_per_index = self._aggregate_and_get_max_attention_per_token(indices=indices) loss = self._compute_loss(max_attention_per_index) if loss != 0: latents = self._update_latent(latents, loss, step_size) logger.info(f'\t Try {iteration}. loss: {loss}') if iteration >= max_refinement_steps: logger.info(f'\t Exceeded max number of iterations ({max_refinement_steps})! ') break latents = latents.clone().detach().requires_grad_(True) _ = self.unet(latents, t, encoder_hidden_states=text_embeddings).sample self.unet.zero_grad() max_attention_per_index = self._aggregate_and_get_max_attention_per_token(indices=indices) loss = self._compute_loss(max_attention_per_index) logger.info(f'\t Finished with loss of: {loss}') return (loss, latents, max_attention_per_index) def register_attention_control(self): attn_procs = {} cross_att_count = 0 for name in self.unet.attn_processors.keys(): if name.startswith('mid_block'): place_in_unet = 'mid' elif name.startswith('up_blocks'): place_in_unet = 'up' elif name.startswith('down_blocks'): place_in_unet = 'down' else: continue cross_att_count += 1 attn_procs[name] = AttendExciteAttnProcessor(attnstore=self.attention_store, place_in_unet=place_in_unet) self.unet.set_attn_processor(attn_procs) self.attention_store.num_att_layers = cross_att_count def get_indices(self, prompt: str) -> Dict[str, int]: ids = self.tokenizer(prompt).input_ids indices = {i: tok for (tok, i) in zip(self.tokenizer.convert_ids_to_tokens(ids), range(len(ids)))} return indices @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]], token_indices: Union[List[int], List[List[int]]], height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: int=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, max_iter_to_alter: int=25, thresholds: dict={0: 0.05, 10: 0.5, 20: 0.8}, scale_factor: int=20, attn_res: Optional[Tuple[int]]=(16, 16), clip_skip: Optional[int]=None): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, token_indices, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if attn_res is None: attn_res = (int(np.ceil(width / 32)), int(np.ceil(height / 32))) self.attention_store = AttentionStore(attn_res) original_attn_proc = self.unet.attn_processors self.register_attention_control() scale_range = np.linspace(1.0, 0.5, len(self.scheduler.timesteps)) step_size = scale_factor * np.sqrt(scale_range) text_embeddings = prompt_embeds[batch_size * num_images_per_prompt:] if do_classifier_free_guidance else prompt_embeds if isinstance(token_indices[0], int): token_indices = [token_indices] indices = [] for ind in token_indices: indices = indices + [ind] * num_images_per_prompt num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): with torch.enable_grad(): latents = latents.clone().detach().requires_grad_(True) updated_latents = [] for (latent, index, text_embedding) in zip(latents, indices, text_embeddings): latent = latent.unsqueeze(0) text_embedding = text_embedding.unsqueeze(0) self.unet(latent, t, encoder_hidden_states=text_embedding, cross_attention_kwargs=cross_attention_kwargs).sample self.unet.zero_grad() max_attention_per_index = self._aggregate_and_get_max_attention_per_token(indices=index) loss = self._compute_loss(max_attention_per_index=max_attention_per_index) if i in thresholds.keys() and loss > 1.0 - thresholds[i]: (loss, latent, max_attention_per_index) = self._perform_iterative_refinement_step(latents=latent, indices=index, loss=loss, threshold=thresholds[i], text_embeddings=text_embedding, step_size=step_size[i], t=t) if i < max_iter_to_alter: if loss != 0: latent = self._update_latent(latents=latent, loss=loss, step_size=step_size[i]) logger.info(f'Iteration {i} | Loss: {loss:0.4f}') updated_latents.append(latent) latents = torch.cat(updated_latents, dim=0) latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) class GaussianSmoothing(torch.nn.Module): def __init__(self, channels: int=1, kernel_size: int=3, sigma: float=0.5, dim: int=2): super().__init__() if isinstance(kernel_size, int): kernel_size = [kernel_size] * dim if isinstance(sigma, float): sigma = [sigma] * dim kernel = 1 meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size]) for (size, std, mgrid) in zip(kernel_size, sigma, meshgrids): mean = (size - 1) / 2 kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-((mgrid - mean) / (2 * std)) ** 2) kernel = kernel / torch.sum(kernel) kernel = kernel.view(1, 1, *kernel.size()) kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1)) self.register_buffer('weight', kernel) self.groups = channels if dim == 1: self.conv = F.conv1d elif dim == 2: self.conv = F.conv2d elif dim == 3: self.conv = F.conv3d else: raise RuntimeError('Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)) def forward(self, input): return self.conv(input, weight=self.weight.to(input.dtype), groups=self.groups) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_diffedit/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_stable_diffusion_diffedit'] = ['StableDiffusionDiffEditPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py import inspect from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...configuration_utils import FrozenDict from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import DDIMInverseScheduler, KarrasDiffusionSchedulers from ...utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, BaseOutput, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) @dataclass class DiffEditInversionPipelineOutput(BaseOutput): latents: torch.Tensor images: Union[List[PIL.Image.Image], np.ndarray] EXAMPLE_DOC_STRING = '\n\n ```py\n >>> import PIL\n >>> import requests\n >>> import torch\n >>> from io import BytesIO\n\n >>> from diffusers import StableDiffusionDiffEditPipeline\n\n\n >>> def download_image(url):\n ... response = requests.get(url)\n ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")\n\n\n >>> img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"\n\n >>> init_image = download_image(img_url).resize((768, 768))\n\n >>> pipeline = StableDiffusionDiffEditPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16\n ... )\n\n >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)\n >>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)\n >>> pipeline.enable_model_cpu_offload()\n\n >>> mask_prompt = "A bowl of fruits"\n >>> prompt = "A bowl of pears"\n\n >>> mask_image = pipeline.generate_mask(image=init_image, source_prompt=prompt, target_prompt=mask_prompt)\n >>> image_latents = pipeline.invert(image=init_image, prompt=mask_prompt).latents\n >>> image = pipeline(prompt=prompt, mask_image=mask_image, image_latents=image_latents).images[0]\n ```\n' EXAMPLE_INVERT_DOC_STRING = '\n ```py\n >>> import PIL\n >>> import requests\n >>> import torch\n >>> from io import BytesIO\n\n >>> from diffusers import StableDiffusionDiffEditPipeline\n\n\n >>> def download_image(url):\n ... response = requests.get(url)\n ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")\n\n\n >>> img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"\n\n >>> init_image = download_image(img_url).resize((768, 768))\n\n >>> pipeline = StableDiffusionDiffEditPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16\n ... )\n\n >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)\n >>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)\n >>> pipeline.enable_model_cpu_offload()\n\n >>> prompt = "A bowl of fruits"\n\n >>> inverted_latents = pipeline.invert(image=init_image, prompt=prompt).latents\n ```\n' def auto_corr_loss(hidden_states, generator=None): reg_loss = 0.0 for i in range(hidden_states.shape[0]): for j in range(hidden_states.shape[1]): noise = hidden_states[i:i + 1, j:j + 1, :, :] while True: roll_amount = torch.randint(noise.shape[2] // 2, (1,), generator=generator).item() reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=2)).mean() ** 2 reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=3)).mean() ** 2 if noise.shape[2] <= 8: break noise = torch.nn.functional.avg_pool2d(noise, kernel_size=2) return reg_loss def kl_divergence(hidden_states): return hidden_states.var() + hidden_states.mean() ** 2 - 1 - torch.log(hidden_states.var() + 1e-07) def preprocess(image): deprecation_message = 'The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead' deprecate('preprocess', '1.0.0', deprecation_message, standard_warn=False) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): (w, h) = image[0].size (w, h) = (x - x % 8 for x in (w, h)) image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos']))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image def preprocess_mask(mask, batch_size: int=1): if not isinstance(mask, torch.Tensor): if isinstance(mask, (PIL.Image.Image, np.ndarray)): mask = [mask] if isinstance(mask, list): if isinstance(mask[0], PIL.Image.Image): mask = [np.array(m.convert('L')).astype(np.float32) / 255.0 for m in mask] if isinstance(mask[0], np.ndarray): mask = np.stack(mask, axis=0) if mask[0].ndim < 3 else np.concatenate(mask, axis=0) mask = torch.from_numpy(mask) elif isinstance(mask[0], torch.Tensor): mask = torch.stack(mask, dim=0) if mask[0].ndim < 3 else torch.cat(mask, dim=0) if mask.ndim == 2: mask = mask.unsqueeze(0).unsqueeze(0) if mask.ndim == 3: if mask.shape[0] == 1: mask = mask.unsqueeze(0) else: mask = mask.unsqueeze(1) if batch_size > 1: if mask.shape[0] == 1: mask = torch.cat([mask] * batch_size) elif mask.shape[0] > 1 and mask.shape[0] != batch_size: raise ValueError(f'`mask_image` with batch size {mask.shape[0]} cannot be broadcasted to batch size {batch_size} inferred by prompt inputs') if mask.shape[1] != 1: raise ValueError(f'`mask_image` must have 1 channel, but has {mask.shape[1]} channels') if mask.min() < 0 or mask.max() > 1: raise ValueError('`mask_image` should be in [0, 1] range') mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 return mask class StableDiffusionDiffEditPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'inverse_scheduler'] _exclude_from_cpu_offload = ['safety_checker'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, inverse_scheduler: DDIMInverseScheduler, requires_safety_checker: bool=True): super().__init__() if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'skip_prk_steps') and scheduler.config.skip_prk_steps is False: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('skip_prk_steps not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['skip_prk_steps'] = True scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, inverse_scheduler=inverse_scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def check_inputs(self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if strength is None or (strength is not None and (strength < 0 or strength > 1)): raise ValueError(f'The value of `strength` should in [0.0, 1.0] but is, but is {strength} of type {type(strength)}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def check_source_inputs(self, source_prompt=None, source_negative_prompt=None, source_prompt_embeds=None, source_negative_prompt_embeds=None): if source_prompt is not None and source_prompt_embeds is not None: raise ValueError(f'Cannot forward both `source_prompt`: {source_prompt} and `source_prompt_embeds`: {source_prompt_embeds}. Please make sure to only forward one of the two.') elif source_prompt is None and source_prompt_embeds is None: raise ValueError('Provide either `source_image` or `source_prompt_embeds`. Cannot leave all both of the arguments undefined.') elif source_prompt is not None and (not isinstance(source_prompt, str) and (not isinstance(source_prompt, list))): raise ValueError(f'`source_prompt` has to be of type `str` or `list` but is {type(source_prompt)}') if source_negative_prompt is not None and source_negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `source_negative_prompt`: {source_negative_prompt} and `source_negative_prompt_embeds`: {source_negative_prompt_embeds}. Please make sure to only forward one of the two.') if source_prompt_embeds is not None and source_negative_prompt_embeds is not None: if source_prompt_embeds.shape != source_negative_prompt_embeds.shape: raise ValueError(f'`source_prompt_embeds` and `source_negative_prompt_embeds` must have the same shape when passed directly, but got: `source_prompt_embeds` {source_prompt_embeds.shape} != `source_negative_prompt_embeds` {source_negative_prompt_embeds.shape}.') def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] return (timesteps, num_inference_steps - t_start) def get_inverse_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) if t_start == 0: return (self.inverse_scheduler.timesteps, num_inference_steps) timesteps = self.inverse_scheduler.timesteps[:-t_start] return (timesteps, num_inference_steps - t_start) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def prepare_image_latents(self, image, batch_size, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) if image.shape[1] == 4: latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if isinstance(generator, list): latents = [self.vae.encode(image[i:i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)] latents = torch.cat(latents, dim=0) else: latents = self.vae.encode(image).latent_dist.sample(generator) latents = self.vae.config.scaling_factor * latents if batch_size != latents.shape[0]: if batch_size % latents.shape[0] == 0: deprecation_message = f'You have passed {batch_size} text prompts (`prompt`), but only {latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_latents_per_image = batch_size // latents.shape[0] latents = torch.cat([latents] * additional_latents_per_image, dim=0) else: raise ValueError(f'Cannot duplicate `image` of batch size {latents.shape[0]} to {batch_size} text prompts.') else: latents = torch.cat([latents], dim=0) return latents def get_epsilon(self, model_output: torch.Tensor, sample: torch.Tensor, timestep: int): pred_type = self.inverse_scheduler.config.prediction_type alpha_prod_t = self.inverse_scheduler.alphas_cumprod[timestep] beta_prod_t = 1 - alpha_prod_t if pred_type == 'epsilon': return model_output elif pred_type == 'sample': return (sample - alpha_prod_t ** 0.5 * model_output) / beta_prod_t ** 0.5 elif pred_type == 'v_prediction': return alpha_prod_t ** 0.5 * model_output + beta_prod_t ** 0.5 * sample else: raise ValueError(f'prediction_type given as {pred_type} must be one of `epsilon`, `sample`, or `v_prediction`') @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def generate_mask(self, image: Union[torch.Tensor, PIL.Image.Image]=None, target_prompt: Optional[Union[str, List[str]]]=None, target_negative_prompt: Optional[Union[str, List[str]]]=None, target_prompt_embeds: Optional[torch.Tensor]=None, target_negative_prompt_embeds: Optional[torch.Tensor]=None, source_prompt: Optional[Union[str, List[str]]]=None, source_negative_prompt: Optional[Union[str, List[str]]]=None, source_prompt_embeds: Optional[torch.Tensor]=None, source_negative_prompt_embeds: Optional[torch.Tensor]=None, num_maps_per_mask: Optional[int]=10, mask_encode_strength: Optional[float]=0.5, mask_thresholding_ratio: Optional[float]=3.0, num_inference_steps: int=50, guidance_scale: float=7.5, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, output_type: Optional[str]='np', cross_attention_kwargs: Optional[Dict[str, Any]]=None): self.check_inputs(target_prompt, mask_encode_strength, 1, target_negative_prompt, target_prompt_embeds, target_negative_prompt_embeds) self.check_source_inputs(source_prompt, source_negative_prompt, source_prompt_embeds, source_negative_prompt_embeds) if num_maps_per_mask is None or (num_maps_per_mask is not None and (not isinstance(num_maps_per_mask, int) or num_maps_per_mask <= 0)): raise ValueError(f'`num_maps_per_mask` has to be a positive integer but is {num_maps_per_mask} of type {type(num_maps_per_mask)}.') if mask_thresholding_ratio is None or mask_thresholding_ratio <= 0: raise ValueError(f'`mask_thresholding_ratio` has to be positive but is {mask_thresholding_ratio} of type {type(mask_thresholding_ratio)}.') if target_prompt is not None and isinstance(target_prompt, str): batch_size = 1 elif target_prompt is not None and isinstance(target_prompt, list): batch_size = len(target_prompt) else: batch_size = target_prompt_embeds.shape[0] if cross_attention_kwargs is None: cross_attention_kwargs = {} device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (target_negative_prompt_embeds, target_prompt_embeds) = self.encode_prompt(target_prompt, device, num_maps_per_mask, do_classifier_free_guidance, target_negative_prompt, prompt_embeds=target_prompt_embeds, negative_prompt_embeds=target_negative_prompt_embeds) if do_classifier_free_guidance: target_prompt_embeds = torch.cat([target_negative_prompt_embeds, target_prompt_embeds]) (source_negative_prompt_embeds, source_prompt_embeds) = self.encode_prompt(source_prompt, device, num_maps_per_mask, do_classifier_free_guidance, source_negative_prompt, prompt_embeds=source_prompt_embeds, negative_prompt_embeds=source_negative_prompt_embeds) if do_classifier_free_guidance: source_prompt_embeds = torch.cat([source_negative_prompt_embeds, source_prompt_embeds]) image = self.image_processor.preprocess(image).repeat_interleave(num_maps_per_mask, dim=0) self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, _) = self.get_timesteps(num_inference_steps, mask_encode_strength, device) encode_timestep = timesteps[0] image_latents = self.prepare_image_latents(image, batch_size * num_maps_per_mask, self.vae.dtype, device, generator) noise = randn_tensor(image_latents.shape, generator=generator, device=device, dtype=self.vae.dtype) image_latents = self.scheduler.add_noise(image_latents, noise, encode_timestep) latent_model_input = torch.cat([image_latents] * (4 if do_classifier_free_guidance else 2)) latent_model_input = self.scheduler.scale_model_input(latent_model_input, encode_timestep) prompt_embeds = torch.cat([source_prompt_embeds, target_prompt_embeds]) noise_pred = self.unet(latent_model_input, encode_timestep, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs).sample if do_classifier_free_guidance: (noise_pred_neg_src, noise_pred_source, noise_pred_uncond, noise_pred_target) = noise_pred.chunk(4) noise_pred_source = noise_pred_neg_src + guidance_scale * (noise_pred_source - noise_pred_neg_src) noise_pred_target = noise_pred_uncond + guidance_scale * (noise_pred_target - noise_pred_uncond) else: (noise_pred_source, noise_pred_target) = noise_pred.chunk(2) mask_guidance_map = torch.abs(noise_pred_target - noise_pred_source).reshape(batch_size, num_maps_per_mask, *noise_pred_target.shape[-3:]).mean([1, 2]) clamp_magnitude = mask_guidance_map.mean() * mask_thresholding_ratio semantic_mask_image = mask_guidance_map.clamp(0, clamp_magnitude) / clamp_magnitude semantic_mask_image = torch.where(semantic_mask_image <= 0.5, 0, 1) mask_image = semantic_mask_image.cpu().numpy() if output_type == 'pil': mask_image = self.image_processor.numpy_to_pil(mask_image) self.maybe_free_model_hooks() return mask_image @torch.no_grad() @replace_example_docstring(EXAMPLE_INVERT_DOC_STRING) def invert(self, prompt: Optional[Union[str, List[str]]]=None, image: Union[torch.Tensor, PIL.Image.Image]=None, num_inference_steps: int=50, inpaint_strength: float=0.8, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, decode_latents: bool=False, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: Optional[int]=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, lambda_auto_corr: float=20.0, lambda_kl: float=20.0, num_reg_steps: int=0, num_auto_corr_rolls: int=5): self.check_inputs(prompt, inpaint_strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if image is None: raise ValueError('`image` input cannot be undefined.') if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if cross_attention_kwargs is None: cross_attention_kwargs = {} device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 image = self.image_processor.preprocess(image) num_images_per_prompt = 1 latents = self.prepare_image_latents(image, batch_size * num_images_per_prompt, self.vae.dtype, device, generator) (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) self.inverse_scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_inverse_timesteps(num_inference_steps, inpaint_strength, device) num_warmup_steps = len(timesteps) - num_inference_steps * self.inverse_scheduler.order inverted_latents = [] with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.inverse_scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if num_reg_steps > 0: with torch.enable_grad(): for _ in range(num_reg_steps): if lambda_auto_corr > 0: for _ in range(num_auto_corr_rolls): var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True) var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t) l_ac = auto_corr_loss(var_epsilon, generator=generator) l_ac.backward() grad = var.grad.detach() / num_auto_corr_rolls noise_pred = noise_pred - lambda_auto_corr * grad if lambda_kl > 0: var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True) var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t) l_kld = kl_divergence(var_epsilon) l_kld.backward() grad = var.grad.detach() noise_pred = noise_pred - lambda_kl * grad noise_pred = noise_pred.detach() latents = self.inverse_scheduler.step(noise_pred, t, latents).prev_sample inverted_latents.append(latents.detach().clone()) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.inverse_scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) assert len(inverted_latents) == len(timesteps) latents = torch.stack(list(reversed(inverted_latents)), 1) image = None if decode_latents: image = self.decode_latents(latents.flatten(0, 1)) if decode_latents and output_type == 'pil': image = self.image_processor.numpy_to_pil(image) self.maybe_free_model_hooks() if not return_dict: return (latents, image) return DiffEditInversionPipelineOutput(latents=latents, images=image) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[str, List[str]]]=None, mask_image: Union[torch.Tensor, PIL.Image.Image]=None, image_latents: Union[torch.Tensor, PIL.Image.Image]=None, inpaint_strength: Optional[float]=0.8, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: int=None): self.check_inputs(prompt, inpaint_strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if mask_image is None: raise ValueError('`mask_image` input cannot be undefined. Use `generate_mask()` to compute `mask_image` from text prompts.') if image_latents is None: raise ValueError('`image_latents` input cannot be undefined. Use `invert()` to compute `image_latents` from input images.') if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if cross_attention_kwargs is None: cross_attention_kwargs = {} device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) mask_image = preprocess_mask(mask_image, batch_size) (latent_height, latent_width) = mask_image.shape[-2:] mask_image = torch.cat([mask_image] * num_images_per_prompt) mask_image = mask_image.to(device=device, dtype=prompt_embeds.dtype) self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, inpaint_strength, device) if isinstance(image_latents, list) and any((isinstance(l, torch.Tensor) and l.ndim == 5 for l in image_latents)): image_latents = torch.cat(image_latents).detach() elif isinstance(image_latents, torch.Tensor) and image_latents.ndim == 5: image_latents = image_latents.detach() else: image_latents = self.image_processor.preprocess(image_latents).detach() latent_shape = (self.vae.config.latent_channels, latent_height, latent_width) if image_latents.shape[-3:] != latent_shape: raise ValueError(f'Each latent image in `image_latents` must have shape {latent_shape}, but has shape {image_latents.shape[-3:]}') if image_latents.ndim == 4: image_latents = image_latents.reshape(batch_size, len(timesteps), *latent_shape) if image_latents.shape[:2] != (batch_size, len(timesteps)): raise ValueError(f'`image_latents` must have batch size {batch_size} with latent images from {len(timesteps)} timesteps, but has batch size {image_latents.shape[0]} with latent images from {image_latents.shape[1]} timesteps.') image_latents = image_latents.transpose(0, 1).repeat_interleave(num_images_per_prompt, dim=1) image_latents = image_latents.to(device=device, dtype=prompt_embeds.dtype) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) latents = image_latents[0].clone() num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample latents = latents * mask_image + image_latents[i] * (1 - mask_image) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_gligen/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_stable_diffusion_gligen'] = ['StableDiffusionGLIGENPipeline'] _import_structure['pipeline_stable_diffusion_gligen_text_image'] = ['StableDiffusionGLIGENTextImagePipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_gligen import StableDiffusionGLIGENPipeline from .pipeline_stable_diffusion_gligen_text_image import StableDiffusionGLIGENTextImagePipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py import inspect import warnings from typing import Any, Callable, Dict, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention import GatedSelfAttentionDense from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionGLIGENPipeline\n >>> from diffusers.utils import load_image\n\n >>> # Insert objects described by text at the region defined by bounding boxes\n >>> pipe = StableDiffusionGLIGENPipeline.from_pretrained(\n ... "masterful/gligen-1-4-inpainting-text-box", variant="fp16", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> input_image = load_image(\n ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/livingroom_modern.png"\n ... )\n >>> prompt = "a birthday cake"\n >>> boxes = [[0.2676, 0.6088, 0.4773, 0.7183]]\n >>> phrases = ["a birthday cake"]\n\n >>> images = pipe(\n ... prompt=prompt,\n ... gligen_phrases=phrases,\n ... gligen_inpaint_image=input_image,\n ... gligen_boxes=boxes,\n ... gligen_scheduled_sampling_beta=1,\n ... output_type="pil",\n ... num_inference_steps=50,\n ... ).images\n\n >>> images[0].save("./gligen-1-4-inpainting-text-box.jpg")\n\n >>> # Generate an image described by the prompt and\n >>> # insert objects described by text at the region defined by bounding boxes\n >>> pipe = StableDiffusionGLIGENPipeline.from_pretrained(\n ... "masterful/gligen-1-4-generation-text-box", variant="fp16", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "a waterfall and a modern high speed train running through the tunnel in a beautiful forest with fall foliage"\n >>> boxes = [[0.1387, 0.2051, 0.4277, 0.7090], [0.4980, 0.4355, 0.8516, 0.7266]]\n >>> phrases = ["a waterfall", "a modern high speed train running through the tunnel"]\n\n >>> images = pipe(\n ... prompt=prompt,\n ... gligen_phrases=phrases,\n ... gligen_boxes=boxes,\n ... gligen_scheduled_sampling_beta=1,\n ... output_type="pil",\n ... num_inference_steps=50,\n ... ).images\n\n >>> images[0].save("./gligen-1-4-generation-text-box.jpg")\n ```\n' class StableDiffusionGLIGENPipeline(DiffusionPipeline, StableDiffusionMixin): _optional_components = ['safety_checker', 'feature_extractor'] model_cpu_offload_seq = 'text_encoder->unet->vae' _exclude_from_cpu_offload = ['safety_checker'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, gligen_phrases, gligen_boxes, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if len(gligen_phrases) != len(gligen_boxes): raise ValueError(f'length of `gligen_phrases` and `gligen_boxes` has to be same, but got: `gligen_phrases` {len(gligen_phrases)} != `gligen_boxes` {len(gligen_boxes)}') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def enable_fuser(self, enabled=True): for module in self.unet.modules(): if type(module) is GatedSelfAttentionDense: module.enabled = enabled def draw_inpaint_mask_from_boxes(self, boxes, size): inpaint_mask = torch.ones(size[0], size[1]) for box in boxes: (x0, x1) = (box[0] * size[0], box[2] * size[0]) (y0, y1) = (box[1] * size[1], box[3] * size[1]) inpaint_mask[int(y0):int(y1), int(x0):int(x1)] = 0 return inpaint_mask def crop(self, im, new_width, new_height): (width, height) = im.size left = (width - new_width) / 2 top = (height - new_height) / 2 right = (width + new_width) / 2 bottom = (height + new_height) / 2 return im.crop((left, top, right, bottom)) def target_size_center_crop(self, im, new_hw): (width, height) = im.size if width != height: im = self.crop(im, min(height, width), min(height, width)) return im.resize((new_hw, new_hw), PIL.Image.LANCZOS) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, gligen_scheduled_sampling_beta: float=0.3, gligen_phrases: List[str]=None, gligen_boxes: List[List[float]]=None, gligen_inpaint_image: Optional[PIL.Image.Image]=None, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, gligen_phrases, gligen_boxes, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) max_objs = 30 if len(gligen_boxes) > max_objs: warnings.warn(f'More that {max_objs} objects found. Only first {max_objs} objects will be processed.', FutureWarning) gligen_phrases = gligen_phrases[:max_objs] gligen_boxes = gligen_boxes[:max_objs] tokenizer_inputs = self.tokenizer(gligen_phrases, padding=True, return_tensors='pt').to(device) _text_embeddings = self.text_encoder(**tokenizer_inputs).pooler_output n_objs = len(gligen_boxes) boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) boxes[:n_objs] = torch.tensor(gligen_boxes) text_embeddings = torch.zeros(max_objs, self.unet.config.cross_attention_dim, device=device, dtype=self.text_encoder.dtype) text_embeddings[:n_objs] = _text_embeddings masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) masks[:n_objs] = 1 repeat_batch = batch_size * num_images_per_prompt boxes = boxes.unsqueeze(0).expand(repeat_batch, -1, -1).clone() text_embeddings = text_embeddings.unsqueeze(0).expand(repeat_batch, -1, -1).clone() masks = masks.unsqueeze(0).expand(repeat_batch, -1).clone() if do_classifier_free_guidance: repeat_batch = repeat_batch * 2 boxes = torch.cat([boxes] * 2) text_embeddings = torch.cat([text_embeddings] * 2) masks = torch.cat([masks] * 2) masks[:repeat_batch // 2] = 0 if cross_attention_kwargs is None: cross_attention_kwargs = {} cross_attention_kwargs['gligen'] = {'boxes': boxes, 'positive_embeddings': text_embeddings, 'masks': masks} if gligen_inpaint_image is not None: if gligen_inpaint_image.size != (self.vae.sample_size, self.vae.sample_size): gligen_inpaint_image = self.target_size_center_crop(gligen_inpaint_image, self.vae.sample_size) gligen_inpaint_image = self.image_processor.preprocess(gligen_inpaint_image) gligen_inpaint_image = gligen_inpaint_image.to(dtype=self.vae.dtype, device=self.vae.device) gligen_inpaint_latent = self.vae.encode(gligen_inpaint_image).latent_dist.sample() gligen_inpaint_latent = self.vae.config.scaling_factor * gligen_inpaint_latent gligen_inpaint_mask = self.draw_inpaint_mask_from_boxes(gligen_boxes, gligen_inpaint_latent.shape[2:]) gligen_inpaint_mask = gligen_inpaint_mask.to(dtype=gligen_inpaint_latent.dtype, device=gligen_inpaint_latent.device) gligen_inpaint_mask = gligen_inpaint_mask[None, None] gligen_inpaint_mask_addition = torch.cat((gligen_inpaint_latent * gligen_inpaint_mask, gligen_inpaint_mask), dim=1) gligen_inpaint_mask_addition = gligen_inpaint_mask_addition.expand(repeat_batch, -1, -1, -1).clone() num_grounding_steps = int(gligen_scheduled_sampling_beta * len(timesteps)) self.enable_fuser(True) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if i == num_grounding_steps: self.enable_fuser(False) if latents.shape[1] != 4: latents = torch.randn_like(latents[:, :4]) if gligen_inpaint_image is not None: gligen_inpaint_latent_with_noise = self.scheduler.add_noise(gligen_inpaint_latent, torch.randn_like(gligen_inpaint_latent), torch.tensor([t])).expand(latents.shape[0], -1, -1, -1).clone() latents = gligen_inpaint_latent_with_noise * gligen_inpaint_mask + latents * (1 - gligen_inpaint_mask) latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if gligen_inpaint_image is not None: latent_model_input = torch.cat((latent_model_input, gligen_inpaint_mask_addition), dim=1) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py import inspect import warnings from typing import Any, Callable, Dict, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention import GatedSelfAttentionDense from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.clip_image_project_model import CLIPImageProjection from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionGLIGENTextImagePipeline\n >>> from diffusers.utils import load_image\n\n >>> # Insert objects described by image at the region defined by bounding boxes\n >>> pipe = StableDiffusionGLIGENTextImagePipeline.from_pretrained(\n ... "anhnct/Gligen_Inpainting_Text_Image", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> input_image = load_image(\n ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/livingroom_modern.png"\n ... )\n >>> prompt = "a backpack"\n >>> boxes = [[0.2676, 0.4088, 0.4773, 0.7183]]\n >>> phrases = None\n >>> gligen_image = load_image(\n ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/backpack.jpeg"\n ... )\n\n >>> images = pipe(\n ... prompt=prompt,\n ... gligen_phrases=phrases,\n ... gligen_inpaint_image=input_image,\n ... gligen_boxes=boxes,\n ... gligen_images=[gligen_image],\n ... gligen_scheduled_sampling_beta=1,\n ... output_type="pil",\n ... num_inference_steps=50,\n ... ).images\n\n >>> images[0].save("./gligen-inpainting-text-image-box.jpg")\n\n >>> # Generate an image described by the prompt and\n >>> # insert objects described by text and image at the region defined by bounding boxes\n >>> pipe = StableDiffusionGLIGENTextImagePipeline.from_pretrained(\n ... "anhnct/Gligen_Text_Image", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "a flower sitting on the beach"\n >>> boxes = [[0.0, 0.09, 0.53, 0.76]]\n >>> phrases = ["flower"]\n >>> gligen_image = load_image(\n ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/pexels-pixabay-60597.jpg"\n ... )\n\n >>> images = pipe(\n ... prompt=prompt,\n ... gligen_phrases=phrases,\n ... gligen_images=[gligen_image],\n ... gligen_boxes=boxes,\n ... gligen_scheduled_sampling_beta=1,\n ... output_type="pil",\n ... num_inference_steps=50,\n ... ).images\n\n >>> images[0].save("./gligen-generation-text-image-box.jpg")\n\n >>> # Generate an image described by the prompt and\n >>> # transfer style described by image at the region defined by bounding boxes\n >>> pipe = StableDiffusionGLIGENTextImagePipeline.from_pretrained(\n ... "anhnct/Gligen_Text_Image", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "a dragon flying on the sky"\n >>> boxes = [[0.4, 0.2, 1.0, 0.8], [0.0, 1.0, 0.0, 1.0]] # Set `[0.0, 1.0, 0.0, 1.0]` for the style\n\n >>> gligen_image = load_image(\n ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/landscape.png"\n ... )\n\n >>> gligen_placeholder = load_image(\n ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/landscape.png"\n ... )\n\n >>> images = pipe(\n ... prompt=prompt,\n ... gligen_phrases=[\n ... "dragon",\n ... "placeholder",\n ... ], # Can use any text instead of `placeholder` token, because we will use mask here\n ... gligen_images=[\n ... gligen_placeholder,\n ... gligen_image,\n ... ], # Can use any image in gligen_placeholder, because we will use mask here\n ... input_phrases_mask=[1, 0], # Set 0 for the placeholder token\n ... input_images_mask=[0, 1], # Set 0 for the placeholder image\n ... gligen_boxes=boxes,\n ... gligen_scheduled_sampling_beta=1,\n ... output_type="pil",\n ... num_inference_steps=50,\n ... ).images\n\n >>> images[0].save("./gligen-generation-text-image-box-style-transfer.jpg")\n ```\n' class StableDiffusionGLIGENTextImagePipeline(DiffusionPipeline, StableDiffusionMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor'] _exclude_from_cpu_offload = ['safety_checker'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, processor: CLIPProcessor, image_encoder: CLIPVisionModelWithProjection, image_project: CLIPImageProjection, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, image_encoder=image_encoder, processor=processor, image_project=image_project, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.register_to_config(requires_safety_checker=requires_safety_checker) def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def enable_fuser(self, enabled=True): for module in self.unet.modules(): if type(module) is GatedSelfAttentionDense: module.enabled = enabled def draw_inpaint_mask_from_boxes(self, boxes, size): inpaint_mask = torch.ones(size[0], size[1]) for box in boxes: (x0, x1) = (box[0] * size[0], box[2] * size[0]) (y0, y1) = (box[1] * size[1], box[3] * size[1]) inpaint_mask[int(y0):int(y1), int(x0):int(x1)] = 0 return inpaint_mask def crop(self, im, new_width, new_height): (width, height) = im.size left = (width - new_width) / 2 top = (height - new_height) / 2 right = (width + new_width) / 2 bottom = (height + new_height) / 2 return im.crop((left, top, right, bottom)) def target_size_center_crop(self, im, new_hw): (width, height) = im.size if width != height: im = self.crop(im, min(height, width), min(height, width)) return im.resize((new_hw, new_hw), PIL.Image.LANCZOS) def complete_mask(self, has_mask, max_objs, device): mask = torch.ones(1, max_objs).type(self.text_encoder.dtype).to(device) if has_mask is None: return mask if isinstance(has_mask, int): return mask * has_mask else: for (idx, value) in enumerate(has_mask): mask[0, idx] = value return mask def get_clip_feature(self, input, normalize_constant, device, is_image=False): if is_image: if input is None: return None inputs = self.processor(images=[input], return_tensors='pt').to(device) inputs['pixel_values'] = inputs['pixel_values'].to(self.image_encoder.dtype) outputs = self.image_encoder(**inputs) feature = outputs.image_embeds feature = self.image_project(feature).squeeze(0) feature = feature / feature.norm() * normalize_constant feature = feature.unsqueeze(0) else: if input is None: return None inputs = self.tokenizer(input, return_tensors='pt', padding=True).to(device) outputs = self.text_encoder(**inputs) feature = outputs.pooler_output return feature def get_cross_attention_kwargs_with_grounded(self, hidden_size, gligen_phrases, gligen_images, gligen_boxes, input_phrases_mask, input_images_mask, repeat_batch, normalize_constant, max_objs, device): (phrases, images) = (gligen_phrases, gligen_images) images = [None] * len(phrases) if images is None else images phrases = [None] * len(images) if phrases is None else phrases boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) phrases_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) image_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) phrases_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) image_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) text_features = [] image_features = [] for (phrase, image) in zip(phrases, images): text_features.append(self.get_clip_feature(phrase, normalize_constant, device, is_image=False)) image_features.append(self.get_clip_feature(image, normalize_constant, device, is_image=True)) for (idx, (box, text_feature, image_feature)) in enumerate(zip(gligen_boxes, text_features, image_features)): boxes[idx] = torch.tensor(box) masks[idx] = 1 if text_feature is not None: phrases_embeddings[idx] = text_feature phrases_masks[idx] = 1 if image_feature is not None: image_embeddings[idx] = image_feature image_masks[idx] = 1 input_phrases_mask = self.complete_mask(input_phrases_mask, max_objs, device) phrases_masks = phrases_masks.unsqueeze(0).repeat(repeat_batch, 1) * input_phrases_mask input_images_mask = self.complete_mask(input_images_mask, max_objs, device) image_masks = image_masks.unsqueeze(0).repeat(repeat_batch, 1) * input_images_mask boxes = boxes.unsqueeze(0).repeat(repeat_batch, 1, 1) masks = masks.unsqueeze(0).repeat(repeat_batch, 1) phrases_embeddings = phrases_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1) image_embeddings = image_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1) out = {'boxes': boxes, 'masks': masks, 'phrases_masks': phrases_masks, 'image_masks': image_masks, 'phrases_embeddings': phrases_embeddings, 'image_embeddings': image_embeddings} return out def get_cross_attention_kwargs_without_grounded(self, hidden_size, repeat_batch, max_objs, device): boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) phrases_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) image_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) phrases_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) image_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) out = {'boxes': boxes.unsqueeze(0).repeat(repeat_batch, 1, 1), 'masks': masks.unsqueeze(0).repeat(repeat_batch, 1), 'phrases_masks': phrases_masks.unsqueeze(0).repeat(repeat_batch, 1), 'image_masks': image_masks.unsqueeze(0).repeat(repeat_batch, 1), 'phrases_embeddings': phrases_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1), 'image_embeddings': image_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1)} return out @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, gligen_scheduled_sampling_beta: float=0.3, gligen_phrases: List[str]=None, gligen_images: List[PIL.Image.Image]=None, input_phrases_mask: Union[int, List[int]]=None, input_images_mask: Union[int, List[int]]=None, gligen_boxes: List[List[float]]=None, gligen_inpaint_image: Optional[PIL.Image.Image]=None, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, gligen_normalize_constant: float=28.7, clip_skip: int=None): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) max_objs = 30 if len(gligen_boxes) > max_objs: warnings.warn(f'More that {max_objs} objects found. Only first {max_objs} objects will be processed.', FutureWarning) gligen_phrases = gligen_phrases[:max_objs] gligen_boxes = gligen_boxes[:max_objs] gligen_images = gligen_images[:max_objs] repeat_batch = batch_size * num_images_per_prompt if do_classifier_free_guidance: repeat_batch = repeat_batch * 2 if cross_attention_kwargs is None: cross_attention_kwargs = {} hidden_size = prompt_embeds.shape[2] cross_attention_kwargs['gligen'] = self.get_cross_attention_kwargs_with_grounded(hidden_size=hidden_size, gligen_phrases=gligen_phrases, gligen_images=gligen_images, gligen_boxes=gligen_boxes, input_phrases_mask=input_phrases_mask, input_images_mask=input_images_mask, repeat_batch=repeat_batch, normalize_constant=gligen_normalize_constant, max_objs=max_objs, device=device) cross_attention_kwargs_without_grounded = {} cross_attention_kwargs_without_grounded['gligen'] = self.get_cross_attention_kwargs_without_grounded(hidden_size=hidden_size, repeat_batch=repeat_batch, max_objs=max_objs, device=device) if gligen_inpaint_image is not None: if gligen_inpaint_image.size != (self.vae.sample_size, self.vae.sample_size): gligen_inpaint_image = self.target_size_center_crop(gligen_inpaint_image, self.vae.sample_size) gligen_inpaint_image = self.image_processor.preprocess(gligen_inpaint_image) gligen_inpaint_image = gligen_inpaint_image.to(dtype=self.vae.dtype, device=self.vae.device) gligen_inpaint_latent = self.vae.encode(gligen_inpaint_image).latent_dist.sample() gligen_inpaint_latent = self.vae.config.scaling_factor * gligen_inpaint_latent gligen_inpaint_mask = self.draw_inpaint_mask_from_boxes(gligen_boxes, gligen_inpaint_latent.shape[2:]) gligen_inpaint_mask = gligen_inpaint_mask.to(dtype=gligen_inpaint_latent.dtype, device=gligen_inpaint_latent.device) gligen_inpaint_mask = gligen_inpaint_mask[None, None] gligen_inpaint_mask_addition = torch.cat((gligen_inpaint_latent * gligen_inpaint_mask, gligen_inpaint_mask), dim=1) gligen_inpaint_mask_addition = gligen_inpaint_mask_addition.expand(repeat_batch, -1, -1, -1).clone() int(gligen_scheduled_sampling_beta * len(timesteps)) self.enable_fuser(True) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if latents.shape[1] != 4: latents = torch.randn_like(latents[:, :4]) if gligen_inpaint_image is not None: gligen_inpaint_latent_with_noise = self.scheduler.add_noise(gligen_inpaint_latent, torch.randn_like(gligen_inpaint_latent), torch.tensor([t])).expand(latents.shape[0], -1, -1, -1).clone() latents = gligen_inpaint_latent_with_noise * gligen_inpaint_mask + latents * (1 - gligen_inpaint_mask) latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if gligen_inpaint_image is not None: latent_model_input = torch.cat((latent_model_input, gligen_inpaint_mask_addition), dim=1) noise_pred_with_grounding = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs).sample noise_pred_without_grounding = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs_without_grounded).sample if do_classifier_free_guidance: (_, noise_pred_text) = noise_pred_with_grounding.chunk(2) (noise_pred_uncond, _) = noise_pred_without_grounding.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) else: noise_pred = noise_pred_with_grounding latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_k_diffusion_available, is_k_diffusion_version, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_k_diffusion_available() and is_k_diffusion_version('>=', '0.0.12')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_and_k_diffusion_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_k_diffusion_objects)) else: _import_structure['pipeline_stable_diffusion_k_diffusion'] = ['StableDiffusionKDiffusionPipeline'] _import_structure['pipeline_stable_diffusion_xl_k_diffusion'] = ['StableDiffusionXLKDiffusionPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_k_diffusion_available() and is_k_diffusion_version('>=', '0.0.12')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline from .pipeline_stable_diffusion_xl_k_diffusion import StableDiffusionXLKDiffusionPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py import importlib import inspect from typing import Callable, List, Optional, Union import torch from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser from k_diffusion.sampling import BrownianTreeNoiseSampler, get_sigmas_karras from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import LMSDiscreteScheduler from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput logger = logging.get_logger(__name__) class ModelWrapper: def __init__(self, model, alphas_cumprod): self.model = model self.alphas_cumprod = alphas_cumprod def apply_model(self, *args, **kwargs): if len(args) == 3: encoder_hidden_states = args[-1] args = args[:2] if kwargs.get('cond', None) is not None: encoder_hidden_states = kwargs.pop('cond') return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample class StableDiffusionKDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor'] _exclude_from_cpu_offload = ['safety_checker'] def __init__(self, vae, text_encoder, tokenizer, unet, scheduler, safety_checker, feature_extractor, requires_safety_checker: bool=True): super().__init__() logger.info(f'{self.__class__} is an experimntal pipeline and is likely to change in the future. We recommend to use this pipeline for fast experimentation / iteration if needed, but advice to rely on existing pipelines as defined in https://huggingface.co/docs/diffusers/api/schedulers#implemented-schedulers for production settings.') scheduler = LMSDiscreteScheduler.from_config(scheduler.config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.register_to_config(requires_safety_checker=requires_safety_checker) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) model = ModelWrapper(unet, scheduler.alphas_cumprod) if scheduler.config.prediction_type == 'v_prediction': self.k_diffusion_model = CompVisVDenoiser(model) else: self.k_diffusion_model = CompVisDenoiser(model) def set_scheduler(self, scheduler_type: str): library = importlib.import_module('k_diffusion') sampling = getattr(library, 'sampling') try: self.sampler = getattr(sampling, scheduler_type) except Exception: valid_samplers = [] for s in dir(sampling): if 'sample_' in s: valid_samplers.append(s) raise ValueError(f'Invalid scheduler type {scheduler_type}. Please choose one of {valid_samplers}.') def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) return latents @torch.no_grad() def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, use_karras_sigmas: Optional[bool]=False, noise_sampler_seed: Optional[int]=None, clip_skip: int=None): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = True if guidance_scale <= 1.0: raise ValueError('has to use guidance_scale') (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) self.scheduler.set_timesteps(num_inference_steps, device=prompt_embeds.device) if use_karras_sigmas: sigma_min: float = self.k_diffusion_model.sigmas[0].item() sigma_max: float = self.k_diffusion_model.sigmas[-1].item() sigmas = get_sigmas_karras(n=num_inference_steps, sigma_min=sigma_min, sigma_max=sigma_max) else: sigmas = self.scheduler.sigmas sigmas = sigmas.to(device) sigmas = sigmas.to(prompt_embeds.dtype) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) latents = latents * sigmas[0] self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device) self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device) def model_fn(x, t): latent_model_input = torch.cat([x] * 2) t = torch.cat([t] * 2) noise_pred = self.k_diffusion_model(latent_model_input, t, cond=prompt_embeds) (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) return noise_pred sampler_kwargs = {} if 'noise_sampler' in inspect.signature(self.sampler).parameters: (min_sigma, max_sigma) = (sigmas[sigmas > 0].min(), sigmas.max()) noise_sampler = BrownianTreeNoiseSampler(latents, min_sigma, max_sigma, noise_sampler_seed) sampler_kwargs['noise_sampler'] = noise_sampler if 'generator' in inspect.signature(self.sampler).parameters: sampler_kwargs['generator'] = generator latents = self.sampler(model_fn, latents, sigmas, **sampler_kwargs) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py import importlib import inspect from typing import List, Optional, Tuple, Union import torch from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser from k_diffusion.sampling import BrownianTreeNoiseSampler, get_sigmas_karras from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from ...image_processor import VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler from ...utils import USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionXLKDiffusionPipeline\n\n >>> pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n >>> pipe.set_scheduler("sample_dpmpp_2m_sde")\n\n >>> prompt = "a photo of an astronaut riding a horse on mars"\n >>> image = pipe(prompt).images[0]\n ```\n' class ModelWrapper: def __init__(self, model, alphas_cumprod): self.model = model self.alphas_cumprod = alphas_cumprod def apply_model(self, *args, **kwargs): if len(args) == 3: encoder_hidden_states = args[-1] args = args[:2] if kwargs.get('cond', None) is not None: encoder_hidden_states = kwargs.pop('cond') return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample class StableDiffusionXLKDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, IPAdapterMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'feature_extractor'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool=True): super().__init__() scheduler = LMSDiscreteScheduler.from_config(scheduler.config) self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = self.unet.config.sample_size model = ModelWrapper(unet, scheduler.alphas_cumprod) if scheduler.config.prediction_type == 'v_prediction': self.k_diffusion_model = CompVisVDenoiser(model) else: self.k_diffusion_model = CompVisDenoiser(model) def set_scheduler(self, scheduler_type: str): library = importlib.import_module('k_diffusion') sampling = getattr(library, 'sampling') try: self.sampler = getattr(sampling, scheduler_type) except Exception: valid_samplers = [] for s in dir(sampling): if 'sample_' in s: valid_samplers.append(s) raise ValueError(f'Invalid scheduler type {scheduler_type}. Please choose one of {valid_samplers}.') def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def check_inputs(self, prompt, prompt_2, height, width, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, original_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Optional[Tuple[int, int]]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, use_karras_sigmas: Optional[bool]=False, noise_sampler_seed: Optional[int]=None, clip_skip: Optional[int]=None): height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) self.check_inputs(prompt, prompt_2, height, width, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) if guidance_scale <= 1.0: raise ValueError('has to use guidance_scale') self._guidance_scale = guidance_scale self._clip_skip = clip_skip if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip) self.scheduler.set_timesteps(num_inference_steps, device=prompt_embeds.device) if use_karras_sigmas: sigma_min: float = self.k_diffusion_model.sigmas[0].item() sigma_max: float = self.k_diffusion_model.sigmas[-1].item() sigmas = get_sigmas_karras(n=num_inference_steps, sigma_min=sigma_min, sigma_max=sigma_max) else: sigmas = self.scheduler.sigmas sigmas = sigmas.to(dtype=prompt_embeds.dtype, device=device) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) latents = latents * sigmas[0] self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device) self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids(negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) def model_fn(x, t): latent_model_input = torch.cat([x] * 2) t = torch.cat([t] * 2) noise_pred = self.k_diffusion_model(latent_model_input, t, cond=prompt_embeds, timestep_cond=timestep_cond, added_cond_kwargs=added_cond_kwargs) (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) return noise_pred sampler_kwargs = {} if 'noise_sampler' in inspect.signature(self.sampler).parameters: (min_sigma, max_sigma) = (sigmas[sigmas > 0].min(), sigmas.max()) noise_sampler = BrownianTreeNoiseSampler(latents, min_sigma, max_sigma, noise_sampler_seed) sampler_kwargs['noise_sampler'] = noise_sampler if 'generator' in inspect.signature(self.sampler).parameters: sampler_kwargs['generator'] = generator latents = self.sampler(model_fn, latents, sigmas, **sampler_kwargs) if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == 'latent': image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_stable_diffusion_ldm3d'] = ['StableDiffusionLDM3DPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py import inspect from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessorLDM3D from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```python\n >>> from diffusers import StableDiffusionLDM3DPipeline\n\n >>> pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d-4c")\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "a photo of an astronaut riding a horse on mars"\n >>> output = pipe(prompt)\n >>> rgb_image, depth_image = output.rgb, output.depth\n >>> rgb_image[0].save("astronaut_ldm3d_rgb.jpg")\n >>> depth_image[0].save("astronaut_ldm3d_depth.png")\n ```\n' def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) @dataclass class LDM3DPipelineOutput(BaseOutput): rgb: Union[List[PIL.Image.Image], np.ndarray] depth: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] class StableDiffusionLDM3DPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: Optional[CLIPVisionModelWithProjection], requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessorLDM3D(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) rgb_feature_extractor_input = feature_extractor_input[0] safety_checker_input = self.feature_extractor(rgb_feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=49, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None else None timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] (rgb, depth) = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return ((rgb, depth), has_nsfw_concept) return LDM3DPipelineOutput(rgb=rgb, depth=depth, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_panorama/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_stable_diffusion_panorama'] = ['StableDiffusionPanoramaPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py import copy import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import DDIMScheduler from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionPanoramaPipeline, DDIMScheduler\n\n >>> model_ckpt = "stabilityai/stable-diffusion-2-base"\n >>> scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")\n >>> pipe = StableDiffusionPanoramaPipeline.from_pretrained(\n ... model_ckpt, scheduler=scheduler, torch_dtype=torch.float16\n ... )\n\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "a photo of the dolomites"\n >>> image = pipe(prompt).images[0]\n ```\n' def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionPanoramaPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: DDIMScheduler, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: Optional[CLIPVisionModelWithProjection]=None, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def decode_latents_with_padding(self, latents: torch.Tensor, padding: int=8) -> torch.Tensor: latents = 1 / self.vae.config.scaling_factor * latents latents_left = latents[..., :padding] latents_right = latents[..., -padding:] latents = torch.cat((latents_right, latents, latents_left), axis=-1) image = self.vae.decode(latents, return_dict=False)[0] padding_pix = self.vae_scale_factor * padding image = image[..., padding_pix:-padding_pix] return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb def get_views(self, panorama_height: int, panorama_width: int, window_size: int=64, stride: int=8, circular_padding: bool=False) -> List[Tuple[int, int, int, int]]: panorama_height /= 8 panorama_width /= 8 num_blocks_height = (panorama_height - window_size) // stride + 1 if panorama_height > window_size else 1 if circular_padding: num_blocks_width = panorama_width // stride if panorama_width > window_size else 1 else: num_blocks_width = (panorama_width - window_size) // stride + 1 if panorama_width > window_size else 1 total_num_blocks = int(num_blocks_height * num_blocks_width) views = [] for i in range(total_num_blocks): h_start = int(i // num_blocks_width * stride) h_end = h_start + window_size w_start = int(i % num_blocks_width * stride) w_end = w_start + window_size views.append((h_start, h_end, w_start, w_end)) return views @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return False @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=512, width: Optional[int]=2048, num_inference_steps: int=50, timesteps: List[int]=None, guidance_scale: float=7.5, view_batch_size: int=1, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, circular_padding: bool=False, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs: Any): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`') height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) views = self.get_views(height, width, circular_padding=circular_padding) views_batch = [views[i:i + view_batch_size] for i in range(0, len(views), view_batch_size)] views_scheduler_status = [copy.deepcopy(self.scheduler.__dict__)] * len(views_batch) count = torch.zeros_like(latents) value = torch.zeros_like(latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue count.zero_() value.zero_() for (j, batch_view) in enumerate(views_batch): vb_size = len(batch_view) if circular_padding: latents_for_view = [] for (h_start, h_end, w_start, w_end) in batch_view: if w_end > latents.shape[3]: latent_view = torch.cat((latents[:, :, h_start:h_end, w_start:], latents[:, :, h_start:h_end, :w_end - latents.shape[3]]), axis=-1) else: latent_view = latents[:, :, h_start:h_end, w_start:w_end] latents_for_view.append(latent_view) latents_for_view = torch.cat(latents_for_view) else: latents_for_view = torch.cat([latents[:, :, h_start:h_end, w_start:w_end] for (h_start, h_end, w_start, w_end) in batch_view]) self.scheduler.__dict__.update(views_scheduler_status[j]) latent_model_input = latents_for_view.repeat_interleave(2, dim=0) if do_classifier_free_guidance else latents_for_view latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) prompt_embeds_input = torch.cat([prompt_embeds] * vb_size) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds_input, timestep_cond=timestep_cond, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = (noise_pred[::2], noise_pred[1::2]) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) latents_denoised_batch = self.scheduler.step(noise_pred, t, latents_for_view, **extra_step_kwargs).prev_sample views_scheduler_status[j] = copy.deepcopy(self.scheduler.__dict__) for (latents_view_denoised, (h_start, h_end, w_start, w_end)) in zip(latents_denoised_batch.chunk(vb_size), batch_view): if circular_padding and w_end > latents.shape[3]: value[:, :, h_start:h_end, w_start:] += latents_view_denoised[:, :, h_start:h_end, :latents.shape[3] - w_start] value[:, :, h_start:h_end, :w_end - latents.shape[3]] += latents_view_denoised[:, :, h_start:h_end, latents.shape[3] - w_start:] count[:, :, h_start:h_end, w_start:] += 1 count[:, :, h_start:h_end, :w_end - latents.shape[3]] += 1 else: value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised count[:, :, h_start:h_end, w_start:w_end] += 1 latents = torch.where(count > 0, value / count, value) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if output_type != 'latent': if circular_padding: image = self.decode_latents_with_padding(latents) else: image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_safe/__init__.py from dataclasses import dataclass from enum import Enum from typing import TYPE_CHECKING, List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import DIFFUSERS_SLOW_IMPORT, BaseOutput, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available @dataclass class SafetyConfig(object): WEAK = {'sld_warmup_steps': 15, 'sld_guidance_scale': 20, 'sld_threshold': 0.0, 'sld_momentum_scale': 0.0, 'sld_mom_beta': 0.0} MEDIUM = {'sld_warmup_steps': 10, 'sld_guidance_scale': 1000, 'sld_threshold': 0.01, 'sld_momentum_scale': 0.3, 'sld_mom_beta': 0.4} STRONG = {'sld_warmup_steps': 7, 'sld_guidance_scale': 2000, 'sld_threshold': 0.025, 'sld_momentum_scale': 0.5, 'sld_mom_beta': 0.7} MAX = {'sld_warmup_steps': 0, 'sld_guidance_scale': 5000, 'sld_threshold': 1.0, 'sld_momentum_scale': 0.5, 'sld_mom_beta': 0.7} _dummy_objects = {} _additional_imports = {} _import_structure = {} _additional_imports.update({'SafetyConfig': SafetyConfig}) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure.update({'pipeline_output': ['StableDiffusionSafePipelineOutput'], 'pipeline_stable_diffusion_safe': ['StableDiffusionPipelineSafe'], 'safety_checker': ['StableDiffusionSafetyChecker']}) if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_output import StableDiffusionSafePipelineOutput from .pipeline_stable_diffusion_safe import StableDiffusionPipelineSafe from .safety_checker import SafeStableDiffusionSafetyChecker else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) for (name, value) in _additional_imports.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image from ...utils import BaseOutput @dataclass class StableDiffusionSafePipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] unsafe_images: Optional[Union[List[PIL.Image.Image], np.ndarray]] applied_safety_concept: Optional[str] # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py import inspect import warnings from typing import Callable, List, Optional, Union import numpy as np import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...configuration_utils import FrozenDict from ...image_processor import PipelineImageInput from ...loaders import IPAdapterMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import deprecate, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from . import StableDiffusionSafePipelineOutput from .safety_checker import SafeStableDiffusionSafetyChecker logger = logging.get_logger(__name__) class StableDiffusionPipelineSafe(DiffusionPipeline, StableDiffusionMixin, IPAdapterMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: SafeStableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: Optional[CLIPVisionModelWithProjection]=None, requires_safety_checker: bool=True): super().__init__() safety_concept: Optional[str] = 'an image showing hate, harassment, violence, suffering, humiliation, harm, suicide, sexual, nudity, bodily fluids, blood, obscene gestures, illegal activity, drug use, theft, vandalism, weapons, child abuse, brutality, cruelty' if hasattr(scheduler.config, 'steps_offset') and scheduler.config.steps_offset != 1: deprecation_message = f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('steps_offset!=1', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['steps_offset'] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, 'clip_sample') and scheduler.config.clip_sample is True: deprecation_message = f'The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file' deprecate('clip_sample not set', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config['clip_sample'] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") is_unet_version_less_0_9_0 = hasattr(unet.config, '_diffusers_version') and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse('0.9.0.dev0') is_unet_sample_size_less_64 = hasattr(unet.config, 'sample_size') and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = "The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n- CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5 \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file" deprecate('sample_size<64', '1.0.0', deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config['sample_size'] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self._safety_text_concept = safety_concept self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.register_to_config(requires_safety_checker=requires_safety_checker) @property def safety_concept(self): return self._safety_text_concept @safety_concept.setter def safety_concept(self, concept): self._safety_text_concept = concept def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, enable_safety_guidance): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='max_length', return_tensors='pt').input_ids if not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if enable_safety_guidance: safety_concept_input = self.tokenizer([self._safety_text_concept], padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') safety_embeddings = self.text_encoder(safety_concept_input.input_ids.to(self.device))[0] seq_len = safety_embeddings.shape[1] safety_embeddings = safety_embeddings.repeat(batch_size, num_images_per_prompt, 1) safety_embeddings = safety_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds, safety_embeddings]) else: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def run_safety_checker(self, image, device, dtype, enable_safety_guidance): if self.safety_checker is not None: images = image.copy() safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) flagged_images = np.zeros((2, *image.shape[1:])) if any(has_nsfw_concept): logger.warning(f"Potential NSFW content was detected in one or more images. A black image will be returned instead.{('You may look at this images in the `unsafe_images` variable of the output at your own discretion.' if enable_safety_guidance else 'Try again with a different prompt and/or seed.')}") for (idx, has_nsfw_concept) in enumerate(has_nsfw_concept): if has_nsfw_concept: flagged_images[idx] = images[idx] image[idx] = np.zeros(image[idx].shape) else: has_nsfw_concept = None flagged_images = None return (image, has_nsfw_concept, flagged_images) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def perform_safety_guidance(self, enable_safety_guidance, safety_momentum, noise_guidance, noise_pred_out, i, sld_guidance_scale, sld_warmup_steps, sld_threshold, sld_momentum_scale, sld_mom_beta): if enable_safety_guidance: if safety_momentum is None: safety_momentum = torch.zeros_like(noise_guidance) (noise_pred_text, noise_pred_uncond) = (noise_pred_out[0], noise_pred_out[1]) noise_pred_safety_concept = noise_pred_out[2] scale = torch.clamp(torch.abs(noise_pred_text - noise_pred_safety_concept) * sld_guidance_scale, max=1.0) safety_concept_scale = torch.where(noise_pred_text - noise_pred_safety_concept >= sld_threshold, torch.zeros_like(scale), scale) noise_guidance_safety = torch.mul(noise_pred_safety_concept - noise_pred_uncond, safety_concept_scale) noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety if i >= sld_warmup_steps: noise_guidance = noise_guidance - noise_guidance_safety return (noise_guidance, safety_momentum) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) @torch.no_grad() def __call__(self, prompt: Union[str, List[str]], height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, sld_guidance_scale: Optional[float]=1000, sld_warmup_steps: Optional[int]=10, sld_threshold: Optional[float]=0.01, sld_momentum_scale: Optional[float]=0.3, sld_mom_beta: Optional[float]=0.4): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps) batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 enable_safety_guidance = sld_guidance_scale > 1.0 and do_classifier_free_guidance if not enable_safety_guidance: warnings.warn('Safety checker disabled!') if ip_adapter_image is not None: output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True (image_embeds, negative_image_embeds) = self.encode_image(ip_adapter_image, device, num_images_per_prompt, output_hidden_state) if do_classifier_free_guidance: if enable_safety_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds, image_embeds]) else: image_embeds = torch.cat([negative_image_embeds, image_embeds]) prompt_embeds = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, enable_safety_guidance) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None else None safety_momentum = None num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * (3 if enable_safety_guidance else 2)) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs).sample if do_classifier_free_guidance: noise_pred_out = noise_pred.chunk(3 if enable_safety_guidance else 2) (noise_pred_uncond, noise_pred_text) = (noise_pred_out[0], noise_pred_out[1]) noise_guidance = noise_pred_text - noise_pred_uncond if enable_safety_guidance: if safety_momentum is None: safety_momentum = torch.zeros_like(noise_guidance) noise_pred_safety_concept = noise_pred_out[2] scale = torch.clamp(torch.abs(noise_pred_text - noise_pred_safety_concept) * sld_guidance_scale, max=1.0) safety_concept_scale = torch.where(noise_pred_text - noise_pred_safety_concept >= sld_threshold, torch.zeros_like(scale), scale) noise_guidance_safety = torch.mul(noise_pred_safety_concept - noise_pred_uncond, safety_concept_scale) noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety if i >= sld_warmup_steps: noise_guidance = noise_guidance - noise_guidance_safety noise_pred = noise_pred_uncond + guidance_scale * noise_guidance latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) image = self.decode_latents(latents) (image, has_nsfw_concept, flagged_images) = self.run_safety_checker(image, device, prompt_embeds.dtype, enable_safety_guidance) if output_type == 'pil': image = self.numpy_to_pil(image) if flagged_images is not None: flagged_images = self.numpy_to_pil(flagged_images) if not return_dict: return (image, has_nsfw_concept, self._safety_text_concept if enable_safety_guidance else None, flagged_images) return StableDiffusionSafePipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept, applied_safety_concept=self._safety_text_concept if enable_safety_guidance else None, unsafe_images=flagged_images) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging logger = logging.get_logger(__name__) def cosine_distance(image_embeds, text_embeds): normalized_image_embeds = nn.functional.normalize(image_embeds) normalized_text_embeds = nn.functional.normalize(text_embeds) return torch.mm(normalized_image_embeds, normalized_text_embeds.t()) class SafeStableDiffusionSafetyChecker(PreTrainedModel): config_class = CLIPConfig _no_split_modules = ['CLIPEncoderLayer'] def __init__(self, config: CLIPConfig): super().__init__(config) self.vision_model = CLIPVisionModel(config.vision_config) self.visual_projection = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=False) self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False) self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False) self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False) self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False) @torch.no_grad() def forward(self, clip_input, images): pooled_output = self.vision_model(clip_input)[1] image_embeds = self.visual_projection(pooled_output) special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).cpu().float().numpy() cos_dist = cosine_distance(image_embeds, self.concept_embeds).cpu().float().numpy() result = [] batch_size = image_embeds.shape[0] for i in range(batch_size): result_img = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []} adjustment = 0.0 for concept_idx in range(len(special_cos_dist[0])): concept_cos = special_cos_dist[i][concept_idx] concept_threshold = self.special_care_embeds_weights[concept_idx].item() result_img['special_scores'][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) if result_img['special_scores'][concept_idx] > 0: result_img['special_care'].append({concept_idx, result_img['special_scores'][concept_idx]}) adjustment = 0.01 for concept_idx in range(len(cos_dist[0])): concept_cos = cos_dist[i][concept_idx] concept_threshold = self.concept_embeds_weights[concept_idx].item() result_img['concept_scores'][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) if result_img['concept_scores'][concept_idx] > 0: result_img['bad_concepts'].append(concept_idx) result.append(result_img) has_nsfw_concepts = [len(res['bad_concepts']) > 0 for res in result] return (images, has_nsfw_concepts) @torch.no_grad() def forward_onnx(self, clip_input: torch.Tensor, images: torch.Tensor): pooled_output = self.vision_model(clip_input)[1] image_embeds = self.visual_projection(pooled_output) special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds) cos_dist = cosine_distance(image_embeds, self.concept_embeds) adjustment = 0.0 special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment special_care = torch.any(special_scores > 0, dim=1) special_adjustment = special_care * 0.01 special_adjustment = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1]) concept_scores = cos_dist - self.concept_embeds_weights + special_adjustment has_nsfw_concepts = torch.any(concept_scores > 0, dim=1) return (images, has_nsfw_concepts) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_sag/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_stable_diffusion_sag'] = ['StableDiffusionSAGPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionSAGPipeline\n\n >>> pipe = StableDiffusionSAGPipeline.from_pretrained(\n ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "a photo of an astronaut riding a horse on mars"\n >>> image = pipe(prompt, sag_scale=0.75).images[0]\n ```\n' class CrossAttnStoreProcessor: def __init__(self): self.attention_probs = None def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): (batch_size, sequence_length, _) = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) self.attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(self.attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states class StableDiffusionSAGPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' _optional_components = ['safety_checker', 'feature_extractor', 'image_encoder'] _exclude_from_cpu_offload = ['safety_checker'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: Optional[CLIPVisionModelWithProjection]=None, requires_safety_checker: bool=True): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') image_embeds = [] for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) single_negative_image_embeds = torch.stack([single_negative_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) single_image_embeds = single_image_embeds.to(device) image_embeds.append(single_image_embeds) else: image_embeds = ip_adapter_image_embeds return image_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, sag_scale: float=0.75, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: Optional[int]=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 do_self_attention_guidance = sag_scale > 0.0 if ip_adapter_image is not None or ip_adapter_image_embeds is not None: ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, do_classifier_free_guidance) if do_classifier_free_guidance: image_embeds = [] negative_image_embeds = [] for tmp_image_embeds in ip_adapter_image_embeds: (single_negative_image_embeds, single_image_embeds) = tmp_image_embeds.chunk(2) image_embeds.append(single_image_embeds) negative_image_embeds.append(single_negative_image_embeds) else: image_embeds = ip_adapter_image_embeds (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps if timesteps.dtype not in [torch.int16, torch.int32, torch.int64]: raise ValueError(f"{self.__class__.__name__} does not support using a scheduler of type {self.scheduler.__class__.__name__}. Please make sure to use one of 'DDIMScheduler, PNDMScheduler, DDPMScheduler, DEISMultistepScheduler, UniPCMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler'.") num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) added_cond_kwargs = {'image_embeds': image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None if do_classifier_free_guidance: added_uncond_kwargs = {'image_embeds': negative_image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None original_attn_proc = self.unet.attn_processors store_processor = CrossAttnStoreProcessor() self.unet.mid_block.attentions[0].transformer_blocks[0].attn1.processor = store_processor num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order map_size = None def get_map_size(module, input, output): nonlocal map_size map_size = output[0].shape[-2:] with self.unet.mid_block.attentions[0].register_forward_hook(get_map_size): with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if do_self_attention_guidance: if do_classifier_free_guidance: pred_x0 = self.pred_x0(latents, noise_pred_uncond, t) (uncond_attn, cond_attn) = store_processor.attention_probs.chunk(2) degraded_latents = self.sag_masking(pred_x0, uncond_attn, map_size, t, self.pred_epsilon(latents, noise_pred_uncond, t)) (uncond_emb, _) = prompt_embeds.chunk(2) degraded_pred = self.unet(degraded_latents, t, encoder_hidden_states=uncond_emb, added_cond_kwargs=added_uncond_kwargs).sample noise_pred += sag_scale * (noise_pred_uncond - degraded_pred) else: pred_x0 = self.pred_x0(latents, noise_pred, t) cond_attn = store_processor.attention_probs degraded_latents = self.sag_masking(pred_x0, cond_attn, map_size, t, self.pred_epsilon(latents, noise_pred, t)) degraded_pred = self.unet(degraded_latents, t, encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs).sample noise_pred += sag_scale * (noise_pred - degraded_pred) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def sag_masking(self, original_latents, attn_map, map_size, t, eps): (bh, hw1, hw2) = attn_map.shape (b, latent_channel, latent_h, latent_w) = original_latents.shape h = self.unet.config.attention_head_dim if isinstance(h, list): h = h[-1] attn_map = attn_map.reshape(b, h, hw1, hw2) attn_mask = attn_map.mean(1, keepdim=False).sum(1, keepdim=False) > 1.0 attn_mask = attn_mask.reshape(b, map_size[0], map_size[1]).unsqueeze(1).repeat(1, latent_channel, 1, 1).type(attn_map.dtype) attn_mask = F.interpolate(attn_mask, (latent_h, latent_w)) degraded_latents = gaussian_blur_2d(original_latents, kernel_size=9, sigma=1.0) degraded_latents = degraded_latents * attn_mask + original_latents * (1 - attn_mask) degraded_latents = self.scheduler.add_noise(degraded_latents, noise=eps, timesteps=t[None]) return degraded_latents def pred_x0(self, sample, model_output, timestep): alpha_prod_t = self.scheduler.alphas_cumprod[timestep].to(sample.device) beta_prod_t = 1 - alpha_prod_t if self.scheduler.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.scheduler.config.prediction_type == 'sample': pred_original_sample = model_output elif self.scheduler.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output model_output = alpha_prod_t ** 0.5 * model_output + beta_prod_t ** 0.5 * sample else: raise ValueError(f'prediction_type given as {self.scheduler.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction`') return pred_original_sample def pred_epsilon(self, sample, model_output, timestep): alpha_prod_t = self.scheduler.alphas_cumprod[timestep] beta_prod_t = 1 - alpha_prod_t if self.scheduler.config.prediction_type == 'epsilon': pred_eps = model_output elif self.scheduler.config.prediction_type == 'sample': pred_eps = (sample - alpha_prod_t ** 0.5 * model_output) / beta_prod_t ** 0.5 elif self.scheduler.config.prediction_type == 'v_prediction': pred_eps = beta_prod_t ** 0.5 * sample + alpha_prod_t ** 0.5 * model_output else: raise ValueError(f'prediction_type given as {self.scheduler.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction`') return pred_eps def gaussian_blur_2d(img, kernel_size, sigma): ksize_half = (kernel_size - 1) * 0.5 x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size) pdf = torch.exp(-0.5 * (x / sigma).pow(2)) x_kernel = pdf / pdf.sum() x_kernel = x_kernel.to(device=img.device, dtype=img.dtype) kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :]) kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1]) padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2] img = F.pad(img, padding, mode='reflect') img = F.conv2d(img, kernel2d, groups=img.shape[-3]) return img # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_xl/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available _dummy_objects = {} _additional_imports = {} _import_structure = {'pipeline_output': ['StableDiffusionXLPipelineOutput']} if is_transformers_available() and is_flax_available(): _import_structure['pipeline_output'].extend(['FlaxStableDiffusionXLPipelineOutput']) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_stable_diffusion_xl'] = ['StableDiffusionXLPipeline'] _import_structure['pipeline_stable_diffusion_xl_img2img'] = ['StableDiffusionXLImg2ImgPipeline'] _import_structure['pipeline_stable_diffusion_xl_inpaint'] = ['StableDiffusionXLInpaintPipeline'] _import_structure['pipeline_stable_diffusion_xl_instruct_pix2pix'] = ['StableDiffusionXLInstructPix2PixPipeline'] if is_transformers_available() and is_flax_available(): from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState _additional_imports.update({'PNDMSchedulerState': PNDMSchedulerState}) _import_structure['pipeline_flax_stable_diffusion_xl'] = ['FlaxStableDiffusionXLPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_xl import StableDiffusionXLPipeline from .pipeline_stable_diffusion_xl_img2img import StableDiffusionXLImg2ImgPipeline from .pipeline_stable_diffusion_xl_inpaint import StableDiffusionXLInpaintPipeline from .pipeline_stable_diffusion_xl_instruct_pix2pix import StableDiffusionXLInstructPix2PixPipeline try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_flax_objects import * else: from .pipeline_flax_stable_diffusion_xl import FlaxStableDiffusionXLPipeline from .pipeline_output import FlaxStableDiffusionXLPipelineOutput else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) for (name, value) in _additional_imports.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py from functools import partial from typing import Dict, List, Optional, Union import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from transformers import CLIPTokenizer, FlaxCLIPTextModel from diffusers.utils import logging from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel from ...schedulers import FlaxDDIMScheduler, FlaxDPMSolverMultistepScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler from ..pipeline_flax_utils import FlaxDiffusionPipeline from .pipeline_output import FlaxStableDiffusionXLPipelineOutput logger = logging.get_logger(__name__) DEBUG = False class FlaxStableDiffusionXLPipeline(FlaxDiffusionPipeline): def __init__(self, text_encoder: FlaxCLIPTextModel, text_encoder_2: FlaxCLIPTextModel, vae: FlaxAutoencoderKL, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: FlaxUNet2DConditionModel, scheduler: Union[FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler], dtype: jnp.dtype=jnp.float32): super().__init__() self.dtype = dtype self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def prepare_inputs(self, prompt: Union[str, List[str]]): if not isinstance(prompt, (str, list)): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') inputs = [] for tokenizer in [self.tokenizer, self.tokenizer_2]: text_inputs = tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='np') inputs.append(text_inputs.input_ids) inputs = jnp.stack(inputs, axis=1) return inputs def __call__(self, prompt_ids: jax.Array, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int=50, guidance_scale: Union[float, jax.Array]=7.5, height: Optional[int]=None, width: Optional[int]=None, latents: jnp.array=None, neg_prompt_ids: jnp.array=None, return_dict: bool=True, output_type: str=None, jit: bool=False): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor if isinstance(guidance_scale, float) and jit: guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) guidance_scale = guidance_scale[:, None] return_latents = output_type == 'latent' if jit: images = _p_generate(self, prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents) else: images = self._generate(prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents) if not return_dict: return (images,) return FlaxStableDiffusionXLPipelineOutput(images=images) def get_embeddings(self, prompt_ids: jnp.array, params): te_1_inputs = prompt_ids[:, 0, :] te_2_inputs = prompt_ids[:, 1, :] prompt_embeds = self.text_encoder(te_1_inputs, params=params['text_encoder'], output_hidden_states=True) prompt_embeds = prompt_embeds['hidden_states'][-2] prompt_embeds_2_out = self.text_encoder_2(te_2_inputs, params=params['text_encoder_2'], output_hidden_states=True) prompt_embeds_2 = prompt_embeds_2_out['hidden_states'][-2] text_embeds = prompt_embeds_2_out['text_embeds'] prompt_embeds = jnp.concatenate([prompt_embeds, prompt_embeds_2], axis=-1) return (prompt_embeds, text_embeds) def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, bs, dtype): add_time_ids = list(original_size + crops_coords_top_left + target_size) add_time_ids = jnp.array([add_time_ids] * bs, dtype=dtype) return add_time_ids def _generate(self, prompt_ids: jnp.array, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int, height: int, width: int, guidance_scale: float, latents: Optional[jnp.array]=None, neg_prompt_ids: Optional[jnp.array]=None, return_latents=False): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') (prompt_embeds, pooled_embeds) = self.get_embeddings(prompt_ids, params) batch_size = prompt_embeds.shape[0] if neg_prompt_ids is None: neg_prompt_embeds = jnp.zeros_like(prompt_embeds) negative_pooled_embeds = jnp.zeros_like(pooled_embeds) else: (neg_prompt_embeds, negative_pooled_embeds) = self.get_embeddings(neg_prompt_ids, params) add_time_ids = self._get_add_time_ids((height, width), (0, 0), (height, width), prompt_embeds.shape[0], dtype=prompt_embeds.dtype) prompt_embeds = jnp.concatenate([neg_prompt_embeds, prompt_embeds], axis=0) add_text_embeds = jnp.concatenate([negative_pooled_embeds, pooled_embeds], axis=0) add_time_ids = jnp.concatenate([add_time_ids, add_time_ids], axis=0) guidance_scale = jnp.array([guidance_scale], dtype=jnp.float32) latents_shape = (batch_size, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor) if latents is None: latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) elif latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}') scheduler_state = self.scheduler.set_timesteps(params['scheduler'], num_inference_steps=num_inference_steps, shape=latents.shape) latents = latents * scheduler_state.init_noise_sigma added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} def loop_body(step, args): (latents, scheduler_state) = args latents_input = jnp.concatenate([latents] * 2) t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] timestep = jnp.broadcast_to(t, latents_input.shape[0]) latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) noise_pred = self.unet.apply({'params': params['unet']}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs).sample (noise_pred_uncond, noise_prediction_text) = jnp.split(noise_pred, 2, axis=0) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) (latents, scheduler_state) = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() return (latents, scheduler_state) if DEBUG: for i in range(num_inference_steps): (latents, scheduler_state) = loop_body(i, (latents, scheduler_state)) else: (latents, _) = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) if return_latents: return latents latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.apply({'params': params['vae']}, latents, method=self.vae.decode).sample image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) return image @partial(jax.pmap, in_axes=(None, 0, 0, 0, None, None, None, 0, 0, 0, None), static_broadcasted_argnums=(0, 4, 5, 6, 10)) def _p_generate(pipe, prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents): return pipe._generate(prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_output.py from dataclasses import dataclass from typing import List, Union import numpy as np import PIL.Image from ...utils import BaseOutput, is_flax_available @dataclass class StableDiffusionXLPipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] if is_flax_available(): import flax @flax.struct.dataclass class FlaxStableDiffusionXLPipelineOutput(BaseOutput): images: np.ndarray # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import StableDiffusionXLPipelineOutput if is_invisible_watermark_available(): from .watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionXLPipeline\n\n >>> pipe = StableDiffusionXLPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n >>> prompt = "a photo of an astronaut riding a horse on mars"\n >>> image = pipe(prompt).images[0]\n ```\n' def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionXLPipeline(DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, IPAdapterMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->image_encoder->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'image_encoder', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'negative_add_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection=None, feature_extractor: CLIPImageProcessor=None, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = self.unet.config.sample_size add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, prompt_2, height, width, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, original_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Optional[Tuple[int, int]]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) self.check_inputs(prompt, prompt_2, height, width, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids(negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if self.denoising_end is not None and isinstance(self.denoising_end, float) and (self.denoising_end > 0) and (self.denoising_end < 1): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - self.denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) negative_add_time_ids = callback_outputs.pop('negative_add_time_ids', negative_add_time_ids) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) elif latents.dtype != self.vae.dtype: if torch.backends.mps.is_available(): self.vae = self.vae.to(latents.dtype) has_latents_mean = hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == 'latent': if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import StableDiffusionXLPipelineOutput if is_invisible_watermark_available(): from .watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionXLImg2ImgPipeline\n >>> from diffusers.utils import load_image\n\n >>> pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n >>> url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png"\n\n >>> init_image = load_image(url).convert("RGB")\n >>> prompt = "a photo of an astronaut riding a horse on mars"\n >>> image = pipe(prompt, image=init_image).images[0]\n ```\n' def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionXLImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->image_encoder->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'image_encoder', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'add_neg_time_ids'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection=None, feature_extractor: CLIPImageProcessor=None, requires_aesthetics_score: bool=False, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, image_encoder=image_encoder, feature_extractor=feature_extractor, scheduler=scheduler) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, prompt_2, strength, num_inference_steps, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if num_inference_steps is None: raise ValueError('`num_inference_steps` cannot be None.') elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: raise ValueError(f'`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type {type(num_inference_steps)}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) else: discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - denoising_start * self.scheduler.config.num_train_timesteps)) num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: num_inference_steps = num_inference_steps + 1 t_start = len(self.scheduler.timesteps) - num_inference_steps timesteps = self.scheduler.timesteps[t_start:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start) return (timesteps, num_inference_steps) def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') latents_mean = latents_std = None if hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None: latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.text_encoder_2.to('cpu') torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} ') init_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) if latents_mean is not None and latents_std is not None: latents_mean = latents_mean.to(device=device, dtype=dtype) latents_std = latents_std.to(device=device, dtype=dtype) init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std else: init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype, text_encoder_projection_dim=None): if self.config.requires_aesthetics_score: add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) add_neg_time_ids = list(negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)) else: add_time_ids = list(original_size + crops_coords_top_left + target_size) add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim > passed_add_embed_dim and expected_add_embed_dim - passed_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model.') elif expected_add_embed_dim < passed_add_embed_dim and passed_add_embed_dim - expected_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model.') elif expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) return (add_time_ids, add_neg_time_ids) def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def denoising_start(self): return self._denoising_start @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, strength: float=0.3, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, denoising_start: Optional[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, original_size: Tuple[int, int]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Tuple[int, int]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, aesthetic_score: float=6.0, negative_aesthetic_score: float=2.5, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs self.check_inputs(prompt, prompt_2, strength, num_inference_steps, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) image = self.image_processor.preprocess(image) def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) add_noise = True if self.denoising_start is None else False if latents is None: latents = self.prepare_latents(image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, add_noise) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) (height, width) = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) if negative_original_size is None: negative_original_size = original_size if negative_target_size is None: negative_target_size = target_size add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim (add_time_ids, add_neg_time_ids) = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if self.denoising_end is not None and self.denoising_start is not None and denoising_value_valid(self.denoising_end) and denoising_value_valid(self.denoising_start) and (self.denoising_start >= self.denoising_end): raise ValueError(f'`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: ' + f' {self.denoising_end} when using type float.') elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - self.denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) add_neg_time_ids = callback_outputs.pop('add_neg_time_ids', add_neg_time_ids) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) elif latents.dtype != self.vae.dtype: if torch.backends.mps.is_available(): self.vae = self.vae.to(latents.dtype) has_latents_mean = hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import StableDiffusionXLPipelineOutput if is_invisible_watermark_available(): from .watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionXLInpaintPipeline\n >>> from diffusers.utils import load_image\n\n >>> pipe = StableDiffusionXLInpaintPipeline.from_pretrained(\n ... "stabilityai/stable-diffusion-xl-base-1.0",\n ... torch_dtype=torch.float16,\n ... variant="fp16",\n ... use_safetensors=True,\n ... )\n >>> pipe.to("cuda")\n\n >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"\n >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"\n\n >>> init_image = load_image(img_url).convert("RGB")\n >>> mask_image = load_image(mask_url).convert("RGB")\n\n >>> prompt = "A majestic tiger sitting on a bench"\n >>> image = pipe(\n ... prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80\n ... ).images[0]\n ```\n' def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def mask_pil_to_torch(mask, height, width): if isinstance(mask, (PIL.Image.Image, np.ndarray)): mask = [mask] if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] mask = np.concatenate([np.array(m.convert('L'))[None, None, :] for m in mask], axis=0) mask = mask.astype(np.float32) / 255.0 elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): mask = np.concatenate([m[None, None, :] for m in mask], axis=0) mask = torch.from_numpy(mask) return mask def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionXLInpaintPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin, IPAdapterMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->image_encoder->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'image_encoder', 'feature_extractor'] _callback_tensor_inputs = ['latents', 'prompt_embeds', 'negative_prompt_embeds', 'add_text_embeds', 'add_time_ids', 'negative_pooled_prompt_embeds', 'add_neg_time_ids', 'mask', 'masked_image_latents'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection=None, feature_extractor: CLIPImageProcessor=None, requires_aesthetics_score: bool=False, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, image_encoder=image_encoder, feature_extractor=feature_extractor, scheduler=scheduler) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, prompt_2, image, mask_image, height, width, strength, callback_steps, output_type, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, padding_mask_crop=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if padding_mask_crop is not None: if not isinstance(image, PIL.Image.Image): raise ValueError(f'The image should be a PIL image when inpainting mask crop, but is of type {type(image)}.') if not isinstance(mask_image, PIL.Image.Image): raise ValueError(f'The mask image should be a PIL image when inpainting mask crop, but is of type {type(mask_image)}.') if output_type != 'pil': raise ValueError(f'The output type should be PIL when inpainting mask crop, but is {output_type}.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, image=None, timestep=None, is_strength_max=True, add_noise=True, return_noise=False, return_image_latents=False): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if (image is None or timestep is None) and (not is_strength_max): raise ValueError('Since strength < 1. initial latents are to be initialised as a combination of Image + Noise.However, either the image or the noise timestep has not been provided.') if image.shape[1] == 4: image_latents = image.to(device=device, dtype=dtype) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) elif return_image_latents or (latents is None and (not is_strength_max)): image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None and add_noise: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents elif add_noise: noise = latents.to(device) latents = noise * self.scheduler.init_noise_sigma else: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = image_latents.to(device) outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): dtype = image.dtype if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list): image_latents = [retrieve_latents(self.vae.encode(image[i:i + 1]), generator=generator[i]) for i in range(image.shape[0])] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) image_latents = image_latents.to(dtype) image_latents = self.vae.config.scaling_factor * image_latents return image_latents def prepare_mask_latents(self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance): mask = torch.nn.functional.interpolate(mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)) mask = mask.to(device=device, dtype=dtype) if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError(f"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number of masks that you pass is divisible by the total requested batch size.") mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask if masked_image is not None and masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = None if masked_image is not None: if masked_image_latents is None: masked_image = masked_image.to(device=device, dtype=dtype) masked_image_latents = self._encode_vae_image(masked_image, generator=generator) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError(f"The passed images and the required batch size don't match. Images are supposed to be duplicated to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed. Make sure the number of images that you pass is divisible by the total requested batch size.") masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) masked_image_latents = torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return (mask, masked_image_latents) def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) else: discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - denoising_start * self.scheduler.config.num_train_timesteps)) num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: num_inference_steps = num_inference_steps + 1 t_start = len(self.scheduler.timesteps) - num_inference_steps timesteps = self.scheduler.timesteps[t_start:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start) return (timesteps, num_inference_steps) def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype, text_encoder_projection_dim=None): if self.config.requires_aesthetics_score: add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) add_neg_time_ids = list(negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)) else: add_time_ids = list(original_size + crops_coords_top_left + target_size) add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim > passed_add_embed_dim and expected_add_embed_dim - passed_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model.') elif expected_add_embed_dim < passed_add_embed_dim and passed_add_embed_dim - expected_add_embed_dim == self.unet.config.addition_time_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model.') elif expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) return (add_time_ids, add_neg_time_ids) def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def denoising_start(self): return self._denoising_start @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, mask_image: PipelineImageInput=None, masked_image_latents: torch.Tensor=None, height: Optional[int]=None, width: Optional[int]=None, padding_mask_crop: Optional[int]=None, strength: float=0.9999, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, denoising_start: Optional[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, original_size: Tuple[int, int]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Tuple[int, int]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, aesthetic_score: float=6.0, negative_aesthetic_score: float=2.5, clip_skip: Optional[int]=None, callback_on_step_end: Optional[Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, prompt_2, image, mask_image, height, width, strength, callback_steps, output_type, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs, padding_mask_crop) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start self._interrupt = False if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device text_encoder_lora_scale = self.cross_attention_kwargs.get('scale', None) if self.cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip) def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None) if num_inference_steps < 1: raise ValueError(f'After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipelinesteps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.') latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) is_strength_max = strength == 1.0 if padding_mask_crop is not None: crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) resize_mode = 'fill' else: crops_coords = None resize_mode = 'default' original_image = image init_image = self.image_processor.preprocess(image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode) init_image = init_image.to(dtype=torch.float32) mask = self.mask_processor.preprocess(mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords) if masked_image_latents is not None: masked_image = masked_image_latents elif init_image.shape[1] == 4: masked_image = None else: masked_image = init_image * (mask < 0.5) num_channels_latents = self.vae.config.latent_channels num_channels_unet = self.unet.config.in_channels return_image_latents = num_channels_unet == 4 add_noise = True if self.denoising_start is None else False latents_outputs = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, image=init_image, timestep=latent_timestep, is_strength_max=is_strength_max, add_noise=add_noise, return_noise=True, return_image_latents=return_image_latents) if return_image_latents: (latents, noise, image_latents) = latents_outputs else: (latents, noise) = latents_outputs (mask, masked_image_latents) = self.prepare_mask_latents(mask, masked_image, batch_size * num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator, self.do_classifier_free_guidance) if num_channels_unet == 9: num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image} = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of `pipeline.unet` or your `mask_image` or `image` input.') elif num_channels_unet != 4: raise ValueError(f'The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}.') extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) (height, width) = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) if negative_original_size is None: negative_original_size = original_size if negative_target_size is None: negative_target_size = target_size add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim (add_time_ids, add_neg_time_ids) = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if self.denoising_end is not None and self.denoising_start is not None and denoising_value_valid(self.denoising_end) and denoising_value_valid(self.denoising_start) and (self.denoising_start >= self.denoising_end): raise ValueError(f'`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: ' + f' {self.denoising_end} when using type float.') elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - self.denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if num_channels_unet == 9: latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = image_embeds noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if num_channels_unet == 4: init_latents_proper = image_latents if self.do_classifier_free_guidance: (init_mask, _) = mask.chunk(2) else: init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise(init_latents_proper, noise, torch.tensor([noise_timestep])) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) prompt_embeds = callback_outputs.pop('prompt_embeds', prompt_embeds) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) add_text_embeds = callback_outputs.pop('add_text_embeds', add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop('negative_pooled_prompt_embeds', negative_pooled_prompt_embeds) add_time_ids = callback_outputs.pop('add_time_ids', add_time_ids) add_neg_time_ids = callback_outputs.pop('add_neg_time_ids', add_neg_time_ids) mask = callback_outputs.pop('mask', mask) masked_image_latents = callback_outputs.pop('masked_image_latents', masked_image_latents) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) elif latents.dtype != self.vae.dtype: if torch.backends.mps.is_available(): self.vae = self.vae.to(latents.dtype) has_latents_mean = hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: return StableDiffusionXLPipelineOutput(images=latents) if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) if padding_mask_crop is not None: image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import PIL.Image import torch from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import StableDiffusionXLPipelineOutput if is_invisible_watermark_available(): from .watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionXLInstructPix2PixPipeline\n >>> from diffusers.utils import load_image\n\n >>> resolution = 768\n >>> image = load_image(\n ... "https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png"\n ... ).resize((resolution, resolution))\n >>> edit_instruction = "Turn sky into a cloudy one"\n\n >>> pipe = StableDiffusionXLInstructPix2PixPipeline.from_pretrained(\n ... "diffusers/sdxl-instructpix2pix-768", torch_dtype=torch.float16\n ... ).to("cuda")\n\n >>> edited_image = pipe(\n ... prompt=edit_instruction,\n ... image=image,\n ... height=resolution,\n ... width=resolution,\n ... guidance_scale=3.0,\n ... image_guidance_scale=1.5,\n ... num_inference_steps=30,\n ... ).images[0]\n >>> edited_image\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class StableDiffusionXLInstructPix2PixPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None, is_cosxl_edit: Optional[bool]=False): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = self.unet.config.sample_size self.is_cosxl_edit = is_cosxl_edit add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt, negative_prompt_2] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) prompt_embeds_dtype = self.text_encoder_2.dtype if self.text_encoder_2 is not None else self.unet.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def prepare_image_latents(self, image, batch_size, num_images_per_prompt, dtype, device, do_classifier_free_guidance, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: image_latents = image else: needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: image = image.float() self.upcast_vae() image_latents = retrieve_latents(self.vae.encode(image), sample_mode='argmax') if needs_upcasting: self.vae.to(dtype=torch.float16) if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: deprecation_message = f'You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts.') else: image_latents = torch.cat([image_latents], dim=0) if do_classifier_free_guidance: uncond_image_latents = torch.zeros_like(image_latents) image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) if image_latents.dtype != self.vae.dtype: image_latents = image_latents.to(dtype=self.vae.dtype) if self.is_cosxl_edit: image_latents = image_latents * self.vae.config.scaling_factor return image_latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=100, denoising_end: Optional[float]=None, guidance_scale: float=5.0, image_guidance_scale: float=1.5, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, original_size: Tuple[int, int]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Tuple[int, int]=None): height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if image is None: raise ValueError('`image` input cannot be undefined.') if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 and image_guidance_scale >= 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale) image = self.image_processor.preprocess(image, height=height, width=width).to(device) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps image_latents = self.prepare_image_latents(image, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, do_classifier_free_guidance) num_channels_latents = self.vae.config.latent_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) num_channels_image = image_latents.shape[1] if num_channels_latents + num_channels_image != self.unet.config.in_channels: raise ValueError(f'Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_image`: {num_channels_image} = {num_channels_latents + num_channels_image}. Please verify the config of `pipeline.unet` or your `image` input.') extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if do_classifier_free_guidance: prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds, negative_prompt_embeds], dim=0) add_text_embeds = torch.cat([add_text_embeds, negative_pooled_prompt_embeds, negative_pooled_prompt_embeds], dim=0) add_time_ids = torch.cat([add_time_ids, add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if denoising_end is not None and isinstance(denoising_end, float) and (denoising_end > 0) and (denoising_end < 1): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 3) if do_classifier_free_guidance else latents scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} noise_pred = self.unet(scaled_latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_text, noise_pred_image, noise_pred_uncond) = noise_pred.chunk(3) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_image) + image_guidance_scale * (noise_pred_image - noise_pred_uncond) if do_classifier_free_guidance and guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): latents = latents.to(latents_dtype) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) elif latents.dtype != self.vae.dtype: if torch.backends.mps.is_available(): self.vae = self.vae.to(latents.dtype) has_latents_mean = hasattr(self.vae.config, 'latents_mean') and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, 'latents_std') and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: return StableDiffusionXLPipelineOutput(images=latents) if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/stable_diffusion_xl/watermark.py import numpy as np import torch from ...utils import is_invisible_watermark_available if is_invisible_watermark_available(): from imwatermark import WatermarkEncoder WATERMARK_MESSAGE = 197828617679262 WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class StableDiffusionXLWatermarker: def __init__(self): self.watermark = WATERMARK_BITS self.encoder = WatermarkEncoder() self.encoder.set_watermark('bits', self.watermark) def apply_watermark(self, images: torch.Tensor): if images.shape[-1] < 256: return images images = (255 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1).float().numpy() images = images[:, :, :, ::-1] images = [self.encoder.encode(image, 'dwtDct')[:, :, ::-1] for image in images] images = np.array(images) images = torch.from_numpy(images).permute(0, 3, 1, 2) images = torch.clamp(2 * (images / 255 - 0.5), min=-1.0, max=1.0) return images # File: diffusers-main/src/diffusers/pipelines/stable_video_diffusion/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, BaseOutput, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure.update({'pipeline_stable_video_diffusion': ['StableVideoDiffusionPipeline', 'StableVideoDiffusionPipelineOutput']}) if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_video_diffusion import StableVideoDiffusionPipeline, StableVideoDiffusionPipelineOutput else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py import inspect from dataclasses import dataclass from typing import Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput from ...models import AutoencoderKLTemporalDecoder, UNetSpatioTemporalConditionModel from ...schedulers import EulerDiscreteScheduler from ...utils import BaseOutput, logging, replace_example_docstring from ...utils.torch_utils import is_compiled_module, randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusers import StableVideoDiffusionPipeline\n >>> from diffusers.utils import load_image, export_to_video\n\n >>> pipe = StableVideoDiffusionPipeline.from_pretrained(\n ... "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"\n ... )\n >>> pipe.to("cuda")\n\n >>> image = load_image(\n ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd-docstring-example.jpeg"\n ... )\n >>> image = image.resize((1024, 576))\n\n >>> frames = pipe(image, num_frames=25, decode_chunk_size=8).frames[0]\n >>> export_to_video(frames, "generated.mp4", fps=7)\n ```\n' def _append_dims(x, target_dims): dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less') return x[(...,) + (None,) * dims_to_append] def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) @dataclass class StableVideoDiffusionPipelineOutput(BaseOutput): frames: Union[List[List[PIL.Image.Image]], np.ndarray, torch.Tensor] class StableVideoDiffusionPipeline(DiffusionPipeline): model_cpu_offload_seq = 'image_encoder->unet->vae' _callback_tensor_inputs = ['latents'] def __init__(self, vae: AutoencoderKLTemporalDecoder, image_encoder: CLIPVisionModelWithProjection, unet: UNetSpatioTemporalConditionModel, scheduler: EulerDiscreteScheduler, feature_extractor: CLIPImageProcessor): super().__init__() self.register_modules(vae=vae, image_encoder=image_encoder, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(do_resize=True, vae_scale_factor=self.vae_scale_factor) def _encode_image(self, image: PipelineImageInput, device: Union[str, torch.device], num_videos_per_prompt: int, do_classifier_free_guidance: bool) -> torch.Tensor: dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.video_processor.pil_to_numpy(image) image = self.video_processor.numpy_to_pt(image) image = image * 2.0 - 1.0 image = _resize_with_antialiasing(image, (224, 224)) image = (image + 1.0) / 2.0 image = self.feature_extractor(images=image, do_normalize=True, do_center_crop=False, do_resize=False, do_rescale=False, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) image_embeddings = self.image_encoder(image).image_embeds image_embeddings = image_embeddings.unsqueeze(1) (bs_embed, seq_len, _) = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_videos_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) if do_classifier_free_guidance: negative_image_embeddings = torch.zeros_like(image_embeddings) image_embeddings = torch.cat([negative_image_embeddings, image_embeddings]) return image_embeddings def _encode_vae_image(self, image: torch.Tensor, device: Union[str, torch.device], num_videos_per_prompt: int, do_classifier_free_guidance: bool): image = image.to(device=device) image_latents = self.vae.encode(image).latent_dist.mode() image_latents = image_latents.repeat(num_videos_per_prompt, 1, 1, 1) if do_classifier_free_guidance: negative_image_latents = torch.zeros_like(image_latents) image_latents = torch.cat([negative_image_latents, image_latents]) return image_latents def _get_add_time_ids(self, fps: int, motion_bucket_id: int, noise_aug_strength: float, dtype: torch.dtype, batch_size: int, num_videos_per_prompt: int, do_classifier_free_guidance: bool): add_time_ids = [fps, motion_bucket_id, noise_aug_strength] passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_time_ids = add_time_ids.repeat(batch_size * num_videos_per_prompt, 1) if do_classifier_free_guidance: add_time_ids = torch.cat([add_time_ids, add_time_ids]) return add_time_ids def decode_latents(self, latents: torch.Tensor, num_frames: int, decode_chunk_size: int=14): latents = latents.flatten(0, 1) latents = 1 / self.vae.config.scaling_factor * latents forward_vae_fn = self.vae._orig_mod.forward if is_compiled_module(self.vae) else self.vae.forward accepts_num_frames = 'num_frames' in set(inspect.signature(forward_vae_fn).parameters.keys()) frames = [] for i in range(0, latents.shape[0], decode_chunk_size): num_frames_in = latents[i:i + decode_chunk_size].shape[0] decode_kwargs = {} if accepts_num_frames: decode_kwargs['num_frames'] = num_frames_in frame = self.vae.decode(latents[i:i + decode_chunk_size], **decode_kwargs).sample frames.append(frame) frames = torch.cat(frames, dim=0) frames = frames.reshape(-1, num_frames, *frames.shape[1:]).permute(0, 2, 1, 3, 4) frames = frames.float() return frames def check_inputs(self, image, height, width): if not isinstance(image, torch.Tensor) and (not isinstance(image, PIL.Image.Image)) and (not isinstance(image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is {type(image)}') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') def prepare_latents(self, batch_size: int, num_frames: int, num_channels_latents: int, height: int, width: int, dtype: torch.dtype, device: Union[str, torch.device], generator: torch.Generator, latents: Optional[torch.Tensor]=None): shape = (batch_size, num_frames, num_channels_latents // 2, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): if isinstance(self.guidance_scale, (int, float)): return self.guidance_scale > 1 return self.guidance_scale.max() > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor], height: int=576, width: int=1024, num_frames: Optional[int]=None, num_inference_steps: int=25, sigmas: Optional[List[float]]=None, min_guidance_scale: float=1.0, max_guidance_scale: float=3.0, fps: int=7, motion_bucket_id: int=127, noise_aug_strength: float=0.02, decode_chunk_size: Optional[int]=None, num_videos_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], return_dict: bool=True): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_frames = num_frames if num_frames is not None else self.unet.config.num_frames decode_chunk_size = decode_chunk_size if decode_chunk_size is not None else num_frames self.check_inputs(image, height, width) if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, list): batch_size = len(image) else: batch_size = image.shape[0] device = self._execution_device self._guidance_scale = max_guidance_scale image_embeddings = self._encode_image(image, device, num_videos_per_prompt, self.do_classifier_free_guidance) fps = fps - 1 image = self.video_processor.preprocess(image, height=height, width=width).to(device) noise = randn_tensor(image.shape, generator=generator, device=device, dtype=image.dtype) image = image + noise_aug_strength * noise needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.vae.to(dtype=torch.float32) image_latents = self._encode_vae_image(image, device=device, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance) image_latents = image_latents.to(image_embeddings.dtype) if needs_upcasting: self.vae.to(dtype=torch.float16) image_latents = image_latents.unsqueeze(1).repeat(1, num_frames, 1, 1, 1) added_time_ids = self._get_add_time_ids(fps, motion_bucket_id, noise_aug_strength, image_embeddings.dtype, batch_size, num_videos_per_prompt, self.do_classifier_free_guidance) added_time_ids = added_time_ids.to(device) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, None, sigmas) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_videos_per_prompt, num_frames, num_channels_latents, height, width, image_embeddings.dtype, device, generator, latents) guidance_scale = torch.linspace(min_guidance_scale, max_guidance_scale, num_frames).unsqueeze(0) guidance_scale = guidance_scale.to(device, latents.dtype) guidance_scale = guidance_scale.repeat(batch_size * num_videos_per_prompt, 1) guidance_scale = _append_dims(guidance_scale, latents.ndim) self._guidance_scale = guidance_scale num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_model_input = torch.cat([latent_model_input, image_latents], dim=2) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings, added_time_ids=added_time_ids, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_cond) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if not output_type == 'latent': if needs_upcasting: self.vae.to(dtype=torch.float16) frames = self.decode_latents(latents, num_frames, decode_chunk_size) frames = self.video_processor.postprocess_video(video=frames, output_type=output_type) else: frames = latents self.maybe_free_model_hooks() if not return_dict: return frames return StableVideoDiffusionPipelineOutput(frames=frames) def _resize_with_antialiasing(input, size, interpolation='bicubic', align_corners=True): (h, w) = input.shape[-2:] factors = (h / size[0], w / size[1]) sigmas = (max((factors[0] - 1.0) / 2.0, 0.001), max((factors[1] - 1.0) / 2.0, 0.001)) ks = (int(max(2.0 * 2 * sigmas[0], 3)), int(max(2.0 * 2 * sigmas[1], 3))) if ks[0] % 2 == 0: ks = (ks[0] + 1, ks[1]) if ks[1] % 2 == 0: ks = (ks[0], ks[1] + 1) input = _gaussian_blur2d(input, ks, sigmas) output = torch.nn.functional.interpolate(input, size=size, mode=interpolation, align_corners=align_corners) return output def _compute_padding(kernel_size): if len(kernel_size) < 2: raise AssertionError(kernel_size) computed = [k - 1 for k in kernel_size] out_padding = 2 * len(kernel_size) * [0] for i in range(len(kernel_size)): computed_tmp = computed[-(i + 1)] pad_front = computed_tmp // 2 pad_rear = computed_tmp - pad_front out_padding[2 * i + 0] = pad_front out_padding[2 * i + 1] = pad_rear return out_padding def _filter2d(input, kernel): (b, c, h, w) = input.shape tmp_kernel = kernel[:, None, ...].to(device=input.device, dtype=input.dtype) tmp_kernel = tmp_kernel.expand(-1, c, -1, -1) (height, width) = tmp_kernel.shape[-2:] padding_shape: List[int] = _compute_padding([height, width]) input = torch.nn.functional.pad(input, padding_shape, mode='reflect') tmp_kernel = tmp_kernel.reshape(-1, 1, height, width) input = input.view(-1, tmp_kernel.size(0), input.size(-2), input.size(-1)) output = torch.nn.functional.conv2d(input, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1) out = output.view(b, c, h, w) return out def _gaussian(window_size: int, sigma): if isinstance(sigma, float): sigma = torch.tensor([[sigma]]) batch_size = sigma.shape[0] x = (torch.arange(window_size, device=sigma.device, dtype=sigma.dtype) - window_size // 2).expand(batch_size, -1) if window_size % 2 == 0: x = x + 0.5 gauss = torch.exp(-x.pow(2.0) / (2 * sigma.pow(2.0))) return gauss / gauss.sum(-1, keepdim=True) def _gaussian_blur2d(input, kernel_size, sigma): if isinstance(sigma, tuple): sigma = torch.tensor([sigma], dtype=input.dtype) else: sigma = sigma.to(dtype=input.dtype) (ky, kx) = (int(kernel_size[0]), int(kernel_size[1])) bs = sigma.shape[0] kernel_x = _gaussian(kx, sigma[:, 1].view(bs, 1)) kernel_y = _gaussian(ky, sigma[:, 0].view(bs, 1)) out_x = _filter2d(input, kernel_x[..., None, :]) out = _filter2d(out_x, kernel_y[..., None]) return out # File: diffusers-main/src/diffusers/pipelines/t2i_adapter/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_stable_diffusion_adapter'] = ['StableDiffusionAdapterPipeline'] _import_structure['pipeline_stable_diffusion_xl_adapter'] = ['StableDiffusionXLAdapterPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_adapter import StableDiffusionAdapterPipeline from .pipeline_stable_diffusion_xl_adapter import StableDiffusionXLAdapterPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py import inspect from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, MultiAdapter, T2IAdapter, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, BaseOutput, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker @dataclass class StableDiffusionAdapterPipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> from diffusers.utils import load_image\n >>> import torch\n >>> from diffusers import StableDiffusionAdapterPipeline, T2IAdapter\n\n >>> image = load_image(\n ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/color_ref.png"\n ... )\n\n >>> color_palette = image.resize((8, 8))\n >>> color_palette = color_palette.resize((512, 512), resample=Image.Resampling.NEAREST)\n\n >>> adapter = T2IAdapter.from_pretrained("TencentARC/t2iadapter_color_sd14v1", torch_dtype=torch.float16)\n >>> pipe = StableDiffusionAdapterPipeline.from_pretrained(\n ... "CompVis/stable-diffusion-v1-4",\n ... adapter=adapter,\n ... torch_dtype=torch.float16,\n ... )\n\n >>> pipe.to("cuda")\n\n >>> out_image = pipe(\n ... "At night, glowing cubes in front of the beach",\n ... image=color_palette,\n ... ).images[0]\n ```\n' def _preprocess_adapter_image(image, height, width): if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION['lanczos'])) for i in image] image = [i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): if image[0].ndim == 3: image = torch.stack(image, dim=0) elif image[0].ndim == 4: image = torch.cat(image, dim=0) else: raise ValueError(f'Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}') return image def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionAdapterPipeline(DiffusionPipeline, StableDiffusionMixin): model_cpu_offload_seq = 'text_encoder->adapter->unet->vae' _optional_components = ['safety_checker', 'feature_extractor'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') if safety_checker is not None and feature_extractor is None: raise ValueError("Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.") if isinstance(adapter, (list, tuple)): adapter = MultiAdapter(adapter) self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, adapter=adapter, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def decode_latents(self, latents): deprecation_message = 'The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead' deprecate('decode_latents', '1.0.0', deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, image, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if isinstance(self.adapter, MultiAdapter): if not isinstance(image, list): raise ValueError('MultiAdapter is enabled, but `image` is not a list. Please pass a list of images to `image`.') if len(image) != len(self.adapter.adapters): raise ValueError(f'MultiAdapter requires passing the same number of images as adapters. Given {len(image)} images and {len(self.adapter.adapters)} adapters.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def _default_height_width(self, height, width, image): while isinstance(image, list): image = image[0] if height is None: if isinstance(image, PIL.Image.Image): height = image.height elif isinstance(image, torch.Tensor): height = image.shape[-2] height = height // self.adapter.downscale_factor * self.adapter.downscale_factor if width is None: if isinstance(image, PIL.Image.Image): width = image.width elif isinstance(image, torch.Tensor): width = image.shape[-1] width = width // self.adapter.downscale_factor * self.adapter.downscale_factor return (height, width) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, image: Union[torch.Tensor, PIL.Image.Image, List[PIL.Image.Image]]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, adapter_conditioning_scale: Union[float, List[float]]=1.0, clip_skip: Optional[int]=None): (height, width) = self._default_height_width(height, width, image) device = self._execution_device self.check_inputs(prompt, height, width, callback_steps, image, negative_prompt, prompt_embeds, negative_prompt_embeds) self._guidance_scale = guidance_scale if isinstance(self.adapter, MultiAdapter): adapter_input = [] for one_image in image: one_image = _preprocess_adapter_image(one_image, height, width) one_image = one_image.to(device=device, dtype=self.adapter.dtype) adapter_input.append(one_image) else: adapter_input = _preprocess_adapter_image(image, height, width) adapter_input = adapter_input.to(device=device, dtype=self.adapter.dtype) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) if isinstance(self.adapter, MultiAdapter): adapter_state = self.adapter(adapter_input, adapter_conditioning_scale) for (k, v) in enumerate(adapter_state): adapter_state[k] = v else: adapter_state = self.adapter(adapter_input) for (k, v) in enumerate(adapter_state): adapter_state[k] = v * adapter_conditioning_scale if num_images_per_prompt > 1: for (k, v) in enumerate(adapter_state): adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1) if self.do_classifier_free_guidance: for (k, v) in enumerate(adapter_state): adapter_state[k] = torch.cat([v] * 2, dim=0) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=cross_attention_kwargs, down_intrablock_additional_residuals=[state.clone() for state in adapter_state], return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if output_type == 'latent': image = latents has_nsfw_concept = None elif output_type == 'pil': image = self.decode_latents(latents) (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) image = self.numpy_to_pil(image) else: image = self.decode_latents(latents) (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionAdapterPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # File: diffusers-main/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, MultiAdapter, T2IAdapter, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import T2IAdapter, StableDiffusionXLAdapterPipeline, DDPMScheduler\n >>> from diffusers.utils import load_image\n\n >>> sketch_image = load_image("https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch.png").convert("L")\n\n >>> model_id = "stabilityai/stable-diffusion-xl-base-1.0"\n\n >>> adapter = T2IAdapter.from_pretrained(\n ... "Adapter/t2iadapter",\n ... subfolder="sketch_sdxl_1.0",\n ... torch_dtype=torch.float16,\n ... adapter_type="full_adapter_xl",\n ... )\n >>> scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler")\n\n >>> pipe = StableDiffusionXLAdapterPipeline.from_pretrained(\n ... model_id, adapter=adapter, torch_dtype=torch.float16, variant="fp16", scheduler=scheduler\n ... ).to("cuda")\n\n >>> generator = torch.manual_seed(42)\n >>> sketch_image_out = pipe(\n ... prompt="a photo of a dog in real world, high quality",\n ... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality",\n ... image=sketch_image,\n ... generator=generator,\n ... guidance_scale=7.5,\n ... ).images[0]\n ```\n' def _preprocess_adapter_image(image, height, width): if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION['lanczos'])) for i in image] image = [i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): if image[0].ndim == 3: image = torch.stack(image, dim=0) elif image[0].ndim == 4: image = torch.cat(image, dim=0) else: raise ValueError(f'Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}') return image def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def retrieve_timesteps(scheduler, num_inference_steps: Optional[int]=None, device: Optional[Union[str, torch.device]]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None, **kwargs): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values') if timesteps is not None: accepts_timesteps = 'timesteps' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = 'sigmas' in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError(f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.") scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return (timesteps, num_inference_steps) class StableDiffusionXLAdapterPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->image_encoder->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'feature_extractor', 'image_encoder'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]], scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool=True, feature_extractor: CLIPImageProcessor=None, image_encoder: CLIPVisionModelWithProjection=None): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, adapter=adapter, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = self.unet.config.sample_size def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder(torch.zeros_like(image), output_hidden_states=True).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) return (image_enc_hidden_states, uncond_image_enc_hidden_states) else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return (image_embeds, uncond_image_embeds) def prepare_ip_adapter_image_embeds(self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError(f'`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters.') for (single_ip_adapter_image, image_proj_layer) in zip(ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) (single_image_embeds, single_negative_image_embeds) = self.encode_image(single_ip_adapter_image, device, 1, output_hidden_state) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: (single_negative_image_embeds, single_image_embeds) = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for (i, single_image_embeds) in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, prompt_2, height, width, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError('Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.') if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError(f'`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}') elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError(f'`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def _default_height_width(self, height, width, image): while isinstance(image, list): image = image[0] if height is None: if isinstance(image, PIL.Image.Image): height = image.height elif isinstance(image, torch.Tensor): height = image.shape[-2] height = height // self.adapter.downscale_factor * self.adapter.downscale_factor if width is None: if isinstance(image, PIL.Image.Image): width = image.width elif isinstance(image, torch.Tensor): width = image.shape[-1] width = width // self.adapter.downscale_factor * self.adapter.downscale_factor return (height, width) def get_guidance_scale_embedding(self, w: torch.Tensor, embedding_dim: int=512, dtype: torch.dtype=torch.float32) -> torch.Tensor: assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, prompt_2: Optional[Union[str, List[str]]]=None, image: PipelineImageInput=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, timesteps: List[int]=None, sigmas: List[float]=None, denoising_end: Optional[float]=None, guidance_scale: float=5.0, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, ip_adapter_image: Optional[PipelineImageInput]=None, ip_adapter_image_embeds: Optional[List[torch.Tensor]]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, original_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Optional[Tuple[int, int]]=None, negative_original_size: Optional[Tuple[int, int]]=None, negative_crops_coords_top_left: Tuple[int, int]=(0, 0), negative_target_size: Optional[Tuple[int, int]]=None, adapter_conditioning_scale: Union[float, List[float]]=1.0, adapter_conditioning_factor: float=1.0, clip_skip: Optional[int]=None): (height, width) = self._default_height_width(height, width, image) device = self._execution_device if isinstance(self.adapter, MultiAdapter): adapter_input = [] for one_image in image: one_image = _preprocess_adapter_image(one_image, height, width) one_image = one_image.to(device=device, dtype=self.adapter.dtype) adapter_input.append(one_image) else: adapter_input = _preprocess_adapter_image(image, height, width) adapter_input = adapter_input.to(device=device, dtype=self.adapter.dtype) original_size = original_size or (height, width) target_size = target_size or (height, width) self.check_inputs(prompt, prompt_2, height, width, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds) self._guidance_scale = guidance_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, clip_skip=clip_skip) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds(ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance) (timesteps, num_inference_steps) = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding(guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim).to(device=device, dtype=latents.dtype) if isinstance(self.adapter, MultiAdapter): adapter_state = self.adapter(adapter_input, adapter_conditioning_scale) for (k, v) in enumerate(adapter_state): adapter_state[k] = v else: adapter_state = self.adapter(adapter_input) for (k, v) in enumerate(adapter_state): adapter_state[k] = v * adapter_conditioning_scale if num_images_per_prompt > 1: for (k, v) in enumerate(adapter_state): adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1) if self.do_classifier_free_guidance: for (k, v) in enumerate(adapter_state): adapter_state[k] = torch.cat([v] * 2, dim=0) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids(negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if denoising_end is not None and isinstance(denoising_end, float) and (denoising_end > 0) and (denoising_end < 1): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs['image_embeds'] = image_embeds if i < int(num_inference_steps * adapter_conditioning_factor): down_intrablock_additional_residuals = [state.clone() for state in adapter_state] else: down_intrablock_additional_residuals = None noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=cross_attention_kwargs, down_intrablock_additional_residuals=down_intrablock_additional_residuals, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if self.do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents return StableDiffusionXLPipelineOutput(images=image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/text_to_video_synthesis/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['pipeline_output'] = ['TextToVideoSDPipelineOutput'] _import_structure['pipeline_text_to_video_synth'] = ['TextToVideoSDPipeline'] _import_structure['pipeline_text_to_video_synth_img2img'] = ['VideoToVideoSDPipeline'] _import_structure['pipeline_text_to_video_zero'] = ['TextToVideoZeroPipeline'] _import_structure['pipeline_text_to_video_zero_sdxl'] = ['TextToVideoZeroSDXLPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_output import TextToVideoSDPipelineOutput from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_img2img import VideoToVideoSDPipeline from .pipeline_text_to_video_zero import TextToVideoZeroPipeline from .pipeline_text_to_video_zero_sdxl import TextToVideoZeroSDXLPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py from dataclasses import dataclass from typing import List, Union import numpy as np import PIL import torch from ...utils import BaseOutput @dataclass class TextToVideoSDPipelineOutput(BaseOutput): frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]] # File: diffusers-main/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet3DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from . import TextToVideoSDPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import TextToVideoSDPipeline\n >>> from diffusers.utils import export_to_video\n\n >>> pipe = TextToVideoSDPipeline.from_pretrained(\n ... "damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16"\n ... )\n >>> pipe.enable_model_cpu_offload()\n\n >>> prompt = "Spiderman is surfing"\n >>> video_frames = pipe(prompt).frames[0]\n >>> video_path = export_to_video(video_frames)\n >>> video_path\n ```\n' class TextToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: KarrasDiffusionSchedulers): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents (batch_size, channels, num_frames, height, width) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) image = self.vae.decode(latents).sample video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) video = video.float() return video def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, height: Optional[int]=None, width: Optional[int]=None, num_frames: int=16, num_inference_steps: int=50, guidance_scale: float=9.0, negative_prompt: Optional[Union[str, List[str]]]=None, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='np', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None): height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_images_per_prompt = 1 self.check_inputs(prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_images_per_prompt, num_channels_latents, num_frames, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) (bsz, channel, frames, width, height) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if output_type == 'latent': video = latents else: video_tensor = self.decode_latents(latents) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (video,) return TextToVideoSDPipelineOutput(frames=video) # File: diffusers-main/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet3DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from . import TextToVideoSDPipelineOutput logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler\n >>> from diffusers.utils import export_to_video\n\n >>> pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)\n >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)\n >>> pipe.to("cuda")\n\n >>> prompt = "spiderman running in the desert"\n >>> video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames[0]\n >>> # safe low-res video\n >>> video_path = export_to_video(video_frames, output_video_path="./video_576_spiderman.mp4")\n\n >>> # let\'s offload the text-to-image model\n >>> pipe.to("cpu")\n\n >>> # and load the image-to-image model\n >>> pipe = DiffusionPipeline.from_pretrained(\n ... "cerspense/zeroscope_v2_XL", torch_dtype=torch.float16, revision="refs/pr/15"\n ... )\n >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)\n >>> pipe.enable_model_cpu_offload()\n\n >>> # The VAE consumes A LOT of memory, let\'s make sure we run it in sliced mode\n >>> pipe.vae.enable_slicing()\n\n >>> # now let\'s upscale it\n >>> video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames]\n\n >>> # and denoise it\n >>> video_frames = pipe(prompt, video=video, strength=0.6).frames[0]\n >>> video_path = export_to_video(video_frames, output_video_path="./video_1024_spiderman.mp4")\n >>> video_path\n ```\n' def retrieve_latents(encoder_output: torch.Tensor, generator: Optional[torch.Generator]=None, sample_mode: str='sample'): if hasattr(encoder_output, 'latent_dist') and sample_mode == 'sample': return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, 'latent_dist') and sample_mode == 'argmax': return encoder_output.latent_dist.mode() elif hasattr(encoder_output, 'latents'): return encoder_output.latents else: raise AttributeError('Could not access latents of provided encoder_output') class VideoToVideoSDPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin): model_cpu_offload_seq = 'text_encoder->unet->vae' def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: KarrasDiffusionSchedulers): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents (batch_size, channels, num_frames, height, width) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) image = self.vae.decode(latents).sample video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) video = video.float() return video def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def check_inputs(self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def get_timesteps(self, num_inference_steps, strength, device): init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order:] if hasattr(self.scheduler, 'set_begin_index'): self.scheduler.set_begin_index(t_start * self.scheduler.order) return (timesteps, num_inference_steps - t_start) def prepare_latents(self, video, timestep, batch_size, dtype, device, generator=None): video = video.to(device=device, dtype=dtype) (bsz, channel, frames, width, height) = video.shape video = video.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) if video.shape[1] == 4: init_latents = video else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') elif isinstance(generator, list): init_latents = [retrieve_latents(self.vae.encode(video[i:i + 1]), generator=generator[i]) for i in range(batch_size)] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(video), generator=generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `video` of batch size {init_latents.shape[0]} to {batch_size} text prompts.') else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents latents = latents[None, :].reshape((bsz, frames, latents.shape[1]) + latents.shape[2:]).permute(0, 2, 1, 3, 4) return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Union[str, List[str]]=None, video: Union[List[np.ndarray], torch.Tensor]=None, strength: float=0.6, num_inference_steps: int=50, guidance_scale: float=15.0, negative_prompt: Optional[Union[str, List[str]]]=None, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='np', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, clip_skip: Optional[int]=None): num_images_per_prompt = 1 self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) video = self.video_processor.preprocess_video(video) self.scheduler.set_timesteps(num_inference_steps, device=device) (timesteps, num_inference_steps) = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) latents = self.prepare_latents(video, latent_timestep, batch_size, prompt_embeds.dtype, device, generator) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) (bsz, channel, frames, width, height) = latents.shape latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4) if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.unet.to('cpu') if output_type == 'latent': video = latents else: video_tensor = self.decode_latents(latents) video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (video,) return TextToVideoSDPipelineOutput(frames=video) # File: diffusers-main/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py import copy import inspect from dataclasses import dataclass from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from torch.nn.functional import grid_sample from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) def rearrange_0(tensor, f): (F, C, H, W) = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor def rearrange_1(tensor): (B, C, F, H, W) = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) def rearrange_3(tensor, f): (F, D, C) = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) def rearrange_4(tensor): (B, F, D, C) = tensor.size() return torch.reshape(tensor, (B * F, D, C)) class CrossFrameAttnProcessor: def __init__(self, batch_size=2): self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): (batch_size, sequence_length, _) = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if not is_cross_attention: video_length = key.size()[0] // self.batch_size first_frame_index = [0] * video_length key = rearrange_3(key, video_length) key = key[:, first_frame_index] value = rearrange_3(value, video_length) value = value[:, first_frame_index] key = rearrange_4(key) value = rearrange_4(value) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states class CrossFrameAttnProcessor2_0: def __init__(self, batch_size=2): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape inner_dim = hidden_states.shape[-1] if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if not is_cross_attention: video_length = max(1, key.size()[0] // self.batch_size) first_frame_index = [0] * video_length key = rearrange_3(key, video_length) key = key[:, first_frame_index] value = rearrange_3(value, video_length) value = value[:, first_frame_index] key = rearrange_4(key) value = rearrange_4(value) head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoPipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): (_, _, H, W) = reference_flow.size() (_, _, h, w) = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode='bilinear') coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode='nearest', padding_mode='reflection') return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * frame_ids[fr_idx] reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * frame_ids[fr_idx] return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): motion_field = create_motion_field(motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents class TextToVideoZeroPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin): def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool=True): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor) if safety_checker is None and requires_safety_checker: logger.warning(f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def forward_loop(self, x_t0, t0, t1, generator): eps = randn_tensor(x_t0.size(), generator=generator, dtype=x_t0.dtype, device=x_t0.device) alpha_vec = torch.prod(self.scheduler.alphas[t0:t1]) x_t1 = torch.sqrt(alpha_vec) * x_t0 + torch.sqrt(1 - alpha_vec) * eps return x_t1 def backward_loop(self, latents, timesteps, prompt_embeds, guidance_scale, callback, callback_steps, num_warmup_steps, extra_step_kwargs, cross_attention_kwargs=None): do_classifier_free_guidance = guidance_scale > 1.0 num_steps = (len(timesteps) - num_warmup_steps) // self.scheduler.order with self.progress_bar(total=num_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) return latents.clone().detach() def check_inputs(self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__(self, prompt: Union[str, List[str]], video_length: Optional[int]=8, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, num_videos_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, motion_field_strength_x: float=12, motion_field_strength_y: float=12, output_type: Optional[str]='tensor', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: Optional[int]=1, t0: int=44, t1: int=47, frame_ids: Optional[List[int]]=None): assert video_length > 0 if frame_ids is None: frame_ids = list(range(video_length)) assert len(frame_ids) == video_length assert num_videos_per_prompt == 1 original_attn_proc = self.unet.attn_processors processor = CrossFrameAttnProcessor2_0(batch_size=2) if hasattr(F, 'scaled_dot_product_attention') else CrossFrameAttnProcessor(batch_size=2) self.unet.set_attn_processor(processor) if isinstance(prompt, str): prompt = [prompt] if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor self.check_inputs(prompt, height, width, callback_steps) batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds_tuple = self.encode_prompt(prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_videos_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order x_1_t1 = self.backward_loop(timesteps=timesteps[:-t1 - 1], prompt_embeds=prompt_embeds, latents=latents, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=num_warmup_steps) scheduler_copy = copy.deepcopy(self.scheduler) x_1_t0 = self.backward_loop(timesteps=timesteps[-t1 - 1:-t0 - 1], prompt_embeds=prompt_embeds, latents=x_1_t1, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=0) x_2k_t0 = x_1_t0.repeat(video_length - 1, 1, 1, 1) x_2k_t0 = create_motion_field_and_warp_latents(motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, latents=x_2k_t0, frame_ids=frame_ids[1:]) x_2k_t1 = self.forward_loop(x_t0=x_2k_t0, t0=timesteps[-t0 - 1].item(), t1=timesteps[-t1 - 1].item(), generator=generator) x_1k_t1 = torch.cat([x_1_t1, x_2k_t1]) (b, l, d) = prompt_embeds.size() prompt_embeds = prompt_embeds[:, None].repeat(1, video_length, 1, 1).reshape(b * video_length, l, d) self.scheduler = scheduler_copy x_1k_0 = self.backward_loop(timesteps=timesteps[-t1 - 1:], prompt_embeds=prompt_embeds, latents=x_1k_t1, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=0) latents = x_1k_0 if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.unet.to('cpu') torch.cuda.empty_cache() if output_type == 'latent': image = latents has_nsfw_concept = None else: image = self.decode_latents(latents) (image, has_nsfw_concept) = self.run_safety_checker(image, device, prompt_embeds.dtype) self.maybe_free_model_hooks() self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image, has_nsfw_concept) return TextToVideoPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type='pil') else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors='pt').to(device) (image, has_nsfw_concept) = self.safety_checker(images=image, clip_input=safety_checker_input.pixel_values.to(dtype)) return (image, has_nsfw_concept) def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # File: diffusers-main/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py import copy import inspect from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.nn.functional as F from torch.nn.functional import grid_sample from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, BaseOutput, is_invisible_watermark_available, logging, scale_lora_layers, unscale_lora_layers from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker logger = logging.get_logger(__name__) def rearrange_0(tensor, f): (F, C, H, W) = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor def rearrange_1(tensor): (B, C, F, H, W) = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) def rearrange_3(tensor, f): (F, D, C) = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) def rearrange_4(tensor): (B, F, D, C) = tensor.size() return torch.reshape(tensor, (B * F, D, C)) class CrossFrameAttnProcessor: def __init__(self, batch_size=2): self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): (batch_size, sequence_length, _) = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if not is_cross_attention: video_length = key.size()[0] // self.batch_size first_frame_index = [0] * video_length key = rearrange_3(key, video_length) key = key[:, first_frame_index] value = rearrange_3(value, video_length) value = value[:, first_frame_index] key = rearrange_4(key) value = rearrange_4(value) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states class CrossFrameAttnProcessor2_0: def __init__(self, batch_size=2): if not hasattr(F, 'scaled_dot_product_attention'): raise ImportError('AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.') self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): (batch_size, sequence_length, _) = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape inner_dim = hidden_states.shape[-1] if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if not is_cross_attention: video_length = max(1, key.size()[0] // self.batch_size) first_frame_index = [0] * video_length key = rearrange_3(key, video_length) key = key[:, first_frame_index] value = rearrange_3(value, video_length) value = value[:, first_frame_index] key = rearrange_4(key) value = rearrange_4(value) head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoSDXLPipelineOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] def coords_grid(batch, ht, wd, device): coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): (_, _, H, W) = reference_flow.size() (_, _, h, w) = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode='bilinear') coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode='nearest', padding_mode='reflection') return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * frame_ids[fr_idx] reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * frame_ids[fr_idx] return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): motion_field = create_motion_field(motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class TextToVideoZeroSDXLPipeline(DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin): model_cpu_offload_seq = 'text_encoder->text_encoder_2->unet->vae' _optional_components = ['tokenizer', 'tokenizer_2', 'text_encoder', 'text_encoder_2', 'image_encoder', 'feature_extractor'] def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection=None, feature_extractor: CLIPImageProcessor=None, force_zeros_for_empty_prompt: bool=True, add_watermarker: Optional[bool]=None): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = self.unet.config.sample_size add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance(self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0)) if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError(f'Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`.') add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) latents = latents * self.scheduler.init_noise_sigma return latents def check_inputs(self, prompt, prompt_2, height, width, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None): if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt_2 is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') elif prompt_2 is not None and (not isinstance(prompt_2, str) and (not isinstance(prompt_2, list))): raise ValueError(f'`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError('If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`.') if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError('If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`.') def encode_prompt(self, prompt: str, prompt_2: Optional[str]=None, device: Optional[torch.device]=None, num_images_per_prompt: int=1, do_classifier_free_guidance: bool=True, negative_prompt: Optional[str]=None, negative_prompt_2: Optional[str]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): device = device or self._execution_device if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_embeds_list = [] prompts = [prompt, prompt_2] for (prompt, tokenizer, text_encoder) in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer(prompt, padding='max_length', max_length=tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}') prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or '' negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for (negative_prompt, tokenizer, text_encoder) in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer(negative_prompt, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True) negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(bs_embed * num_images_per_prompt, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder_2, lora_scale) return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) def forward_loop(self, x_t0, t0, t1, generator): eps = randn_tensor(x_t0.size(), generator=generator, dtype=x_t0.dtype, device=x_t0.device) alpha_vec = torch.prod(self.scheduler.alphas[t0:t1]) x_t1 = torch.sqrt(alpha_vec) * x_t0 + torch.sqrt(1 - alpha_vec) * eps return x_t1 def backward_loop(self, latents, timesteps, prompt_embeds, guidance_scale, callback, callback_steps, num_warmup_steps, extra_step_kwargs, add_text_embeds, add_time_ids, cross_attention_kwargs=None, guidance_rescale: float=0.0): do_classifier_free_guidance = guidance_scale > 1.0 num_steps = (len(timesteps) - num_warmup_steps) // self.scheduler.order with self.progress_bar(total=num_steps) as progress_bar: for (i, t) in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {'text_embeds': add_text_embeds, 'time_ids': add_time_ids} noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False)[0] if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if do_classifier_free_guidance and guidance_rescale > 0.0: noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) return latents.clone().detach() @torch.no_grad() def __call__(self, prompt: Union[str, List[str]], prompt_2: Optional[Union[str, List[str]]]=None, video_length: Optional[int]=8, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: int=50, denoising_end: Optional[float]=None, guidance_scale: float=7.5, negative_prompt: Optional[Union[str, List[str]]]=None, negative_prompt_2: Optional[Union[str, List[str]]]=None, num_videos_per_prompt: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, frame_ids: Optional[List[int]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, pooled_prompt_embeds: Optional[torch.Tensor]=None, negative_pooled_prompt_embeds: Optional[torch.Tensor]=None, latents: Optional[torch.Tensor]=None, motion_field_strength_x: float=12, motion_field_strength_y: float=12, output_type: Optional[str]='tensor', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1, cross_attention_kwargs: Optional[Dict[str, Any]]=None, guidance_rescale: float=0.0, original_size: Optional[Tuple[int, int]]=None, crops_coords_top_left: Tuple[int, int]=(0, 0), target_size: Optional[Tuple[int, int]]=None, t0: int=44, t1: int=47): assert video_length > 0 if frame_ids is None: frame_ids = list(range(video_length)) assert len(frame_ids) == video_length assert num_videos_per_prompt == 1 original_attn_proc = self.unet.attn_processors processor = CrossFrameAttnProcessor2_0(batch_size=2) if hasattr(F, 'scaled_dot_product_attention') else CrossFrameAttnProcessor(batch_size=2) self.unet.set_attn_processor(processor) if isinstance(prompt, str): prompt = [prompt] if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) self.check_inputs(prompt, prompt_2, height, width, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) batch_size = 1 if isinstance(prompt, str) else len(prompt) if isinstance(prompt, list) else prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1.0 text_encoder_lora_scale = cross_attention_kwargs.get('scale', None) if cross_attention_kwargs is not None else None (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.encode_prompt(prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents(batch_size * num_videos_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids(original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_videos_per_prompt, 1) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order x_1_t1 = self.backward_loop(timesteps=timesteps[:-t1 - 1], prompt_embeds=prompt_embeds, latents=latents, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=num_warmup_steps, add_text_embeds=add_text_embeds, add_time_ids=add_time_ids) scheduler_copy = copy.deepcopy(self.scheduler) x_1_t0 = self.backward_loop(timesteps=timesteps[-t1 - 1:-t0 - 1], prompt_embeds=prompt_embeds, latents=x_1_t1, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=0, add_text_embeds=add_text_embeds, add_time_ids=add_time_ids) x_2k_t0 = x_1_t0.repeat(video_length - 1, 1, 1, 1) x_2k_t0 = create_motion_field_and_warp_latents(motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, latents=x_2k_t0, frame_ids=frame_ids[1:]) x_2k_t1 = self.forward_loop(x_t0=x_2k_t0, t0=timesteps[-t0 - 1].to(torch.long), t1=timesteps[-t1 - 1].to(torch.long), generator=generator) latents = torch.cat([x_1_t1, x_2k_t1]) self.scheduler = scheduler_copy timesteps = timesteps[-t1 - 1:] (b, l, d) = prompt_embeds.size() prompt_embeds = prompt_embeds[:, None].repeat(1, video_length, 1, 1).reshape(b * video_length, l, d) (b, k) = add_text_embeds.size() add_text_embeds = add_text_embeds[:, None].repeat(1, video_length, 1).reshape(b * video_length, k) (b, k) = add_time_ids.size() add_time_ids = add_time_ids[:, None].repeat(1, video_length, 1).reshape(b * video_length, k) if denoising_end is not None and isinstance(denoising_end, float) and (denoising_end > 0) and (denoising_end < 1): discrete_timestep_cutoff = int(round(self.scheduler.config.num_train_timesteps - denoising_end * self.scheduler.config.num_train_timesteps)) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] x_1k_0 = self.backward_loop(timesteps=timesteps, prompt_embeds=prompt_embeds, latents=latents, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=0, add_text_embeds=add_text_embeds, add_time_ids=add_time_ids) latents = x_1k_0 if not output_type == 'latent': needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents return TextToVideoSDXLPipelineOutput(images=image) if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image,) return TextToVideoSDXLPipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/unclip/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, is_transformers_version _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline _dummy_objects.update({'UnCLIPImageVariationPipeline': UnCLIPImageVariationPipeline, 'UnCLIPPipeline': UnCLIPPipeline}) else: _import_structure['pipeline_unclip'] = ['UnCLIPPipeline'] _import_structure['pipeline_unclip_image_variation'] = ['UnCLIPImageVariationPipeline'] _import_structure['text_proj'] = ['UnCLIPTextProjModel'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/unclip/pipeline_unclip.py import inspect from typing import List, Optional, Tuple, Union import torch from torch.nn import functional as F from transformers import CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput from ...models import PriorTransformer, UNet2DConditionModel, UNet2DModel from ...schedulers import UnCLIPScheduler from ...utils import logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_proj import UnCLIPTextProjModel logger = logging.get_logger(__name__) class UnCLIPPipeline(DiffusionPipeline): _exclude_from_cpu_offload = ['prior'] prior: PriorTransformer decoder: UNet2DConditionModel text_proj: UnCLIPTextProjModel text_encoder: CLIPTextModelWithProjection tokenizer: CLIPTokenizer super_res_first: UNet2DModel super_res_last: UNet2DModel prior_scheduler: UnCLIPScheduler decoder_scheduler: UnCLIPScheduler super_res_scheduler: UnCLIPScheduler model_cpu_offload_seq = 'text_encoder->text_proj->decoder->super_res_first->super_res_last' def __init__(self, prior: PriorTransformer, decoder: UNet2DConditionModel, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_proj: UnCLIPTextProjModel, super_res_first: UNet2DModel, super_res_last: UNet2DModel, prior_scheduler: UnCLIPScheduler, decoder_scheduler: UnCLIPScheduler, super_res_scheduler: UnCLIPScheduler): super().__init__() self.register_modules(prior=prior, decoder=decoder, text_encoder=text_encoder, tokenizer=tokenizer, text_proj=text_proj, super_res_first=super_res_first, super_res_last=super_res_last, prior_scheduler=prior_scheduler, decoder_scheduler=decoder_scheduler, super_res_scheduler=super_res_scheduler) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]]=None, text_attention_mask: Optional[torch.Tensor]=None): if text_model_output is None: batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids[:, :self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds text_enc_hid_states = text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] (prompt_embeds, text_enc_hid_states) = (text_model_output[0], text_model_output[1]) text_mask = text_attention_mask prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [''] * batch_size uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds uncond_text_enc_hid_states = negative_prompt_embeds_text_encoder_output.last_hidden_state seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_enc_hid_states.shape[1] uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) uncond_text_enc_hid_states = uncond_text_enc_hid_states.view(batch_size * num_images_per_prompt, seq_len, -1) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return (prompt_embeds, text_enc_hid_states, text_mask) @torch.no_grad() def __call__(self, prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: int=1, prior_num_inference_steps: int=25, decoder_num_inference_steps: int=25, super_res_num_inference_steps: int=7, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, prior_latents: Optional[torch.Tensor]=None, decoder_latents: Optional[torch.Tensor]=None, super_res_latents: Optional[torch.Tensor]=None, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]]=None, text_attention_mask: Optional[torch.Tensor]=None, prior_guidance_scale: float=4.0, decoder_guidance_scale: float=8.0, output_type: Optional[str]='pil', return_dict: bool=True): if prompt is not None: if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') else: batch_size = text_model_output[0].shape[0] device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 (prompt_embeds, text_enc_hid_states, text_mask) = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask) self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) prior_timesteps_tensor = self.prior_scheduler.timesteps embedding_dim = self.prior.config.embedding_dim prior_latents = self.prepare_latents((batch_size, embedding_dim), prompt_embeds.dtype, device, generator, prior_latents, self.prior_scheduler) for (i, t) in enumerate(self.progress_bar(prior_timesteps_tensor)): latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents predicted_image_embedding = self.prior(latent_model_input, timestep=t, proj_embedding=prompt_embeds, encoder_hidden_states=text_enc_hid_states, attention_mask=text_mask).predicted_image_embedding if do_classifier_free_guidance: (predicted_image_embedding_uncond, predicted_image_embedding_text) = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * (predicted_image_embedding_text - predicted_image_embedding_uncond) if i + 1 == prior_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = prior_timesteps_tensor[i + 1] prior_latents = self.prior_scheduler.step(predicted_image_embedding, timestep=t, sample=prior_latents, generator=generator, prev_timestep=prev_timestep).prev_sample prior_latents = self.prior.post_process_latents(prior_latents) image_embeddings = prior_latents (text_enc_hid_states, additive_clip_time_embeddings) = self.text_proj(image_embeddings=image_embeddings, prompt_embeds=prompt_embeds, text_encoder_hidden_states=text_enc_hid_states, do_classifier_free_guidance=do_classifier_free_guidance) if device.type == 'mps': text_mask = text_mask.type(torch.int) decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) decoder_text_mask = decoder_text_mask.type(torch.bool) else: decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True) self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) decoder_timesteps_tensor = self.decoder_scheduler.timesteps num_channels_latents = self.decoder.config.in_channels height = self.decoder.config.sample_size width = self.decoder.config.sample_size decoder_latents = self.prepare_latents((batch_size, num_channels_latents, height, width), text_enc_hid_states.dtype, device, generator, decoder_latents, self.decoder_scheduler) for (i, t) in enumerate(self.progress_bar(decoder_timesteps_tensor)): latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents noise_pred = self.decoder(sample=latent_model_input, timestep=t, encoder_hidden_states=text_enc_hid_states, class_labels=additive_clip_time_embeddings, attention_mask=decoder_text_mask).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (noise_pred_uncond, _) = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) (noise_pred_text, predicted_variance) = noise_pred_text.split(latent_model_input.shape[1], dim=1) noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if i + 1 == decoder_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = decoder_timesteps_tensor[i + 1] decoder_latents = self.decoder_scheduler.step(noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator).prev_sample decoder_latents = decoder_latents.clamp(-1, 1) image_small = decoder_latents self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) super_res_timesteps_tensor = self.super_res_scheduler.timesteps channels = self.super_res_first.config.in_channels // 2 height = self.super_res_first.config.sample_size width = self.super_res_first.config.sample_size super_res_latents = self.prepare_latents((batch_size, channels, height, width), image_small.dtype, device, generator, super_res_latents, self.super_res_scheduler) if device.type == 'mps': image_upscaled = F.interpolate(image_small, size=[height, width]) else: interpolate_antialias = {} if 'antialias' in inspect.signature(F.interpolate).parameters: interpolate_antialias['antialias'] = True image_upscaled = F.interpolate(image_small, size=[height, width], mode='bicubic', align_corners=False, **interpolate_antialias) for (i, t) in enumerate(self.progress_bar(super_res_timesteps_tensor)): if i == super_res_timesteps_tensor.shape[0] - 1: unet = self.super_res_last else: unet = self.super_res_first latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) noise_pred = unet(sample=latent_model_input, timestep=t).sample if i + 1 == super_res_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = super_res_timesteps_tensor[i + 1] super_res_latents = self.super_res_scheduler.step(noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator).prev_sample image = super_res_latents self.maybe_free_model_hooks() image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py import inspect from typing import List, Optional, Union import PIL.Image import torch from torch.nn import functional as F from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...models import UNet2DConditionModel, UNet2DModel from ...schedulers import UnCLIPScheduler from ...utils import logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_proj import UnCLIPTextProjModel logger = logging.get_logger(__name__) class UnCLIPImageVariationPipeline(DiffusionPipeline): decoder: UNet2DConditionModel text_proj: UnCLIPTextProjModel text_encoder: CLIPTextModelWithProjection tokenizer: CLIPTokenizer feature_extractor: CLIPImageProcessor image_encoder: CLIPVisionModelWithProjection super_res_first: UNet2DModel super_res_last: UNet2DModel decoder_scheduler: UnCLIPScheduler super_res_scheduler: UnCLIPScheduler model_cpu_offload_seq = 'text_encoder->image_encoder->text_proj->decoder->super_res_first->super_res_last' def __init__(self, decoder: UNet2DConditionModel, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_proj: UnCLIPTextProjModel, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection, super_res_first: UNet2DModel, super_res_last: UNet2DModel, decoder_scheduler: UnCLIPScheduler, super_res_scheduler: UnCLIPScheduler): super().__init__() self.register_modules(decoder=decoder, text_encoder=text_encoder, tokenizer=tokenizer, text_proj=text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=super_res_first, super_res_last=super_res_last, decoder_scheduler=decoder_scheduler, super_res_scheduler=super_res_scheduler) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt') text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds text_encoder_hidden_states = text_encoder_output.last_hidden_state prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [''] * batch_size max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(batch_size * num_images_per_prompt, seq_len, -1) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return (prompt_embeds, text_encoder_hidden_states, text_mask) def _encode_image(self, image, device, num_images_per_prompt, image_embeddings: Optional[torch.Tensor]=None): dtype = next(self.image_encoder.parameters()).dtype if image_embeddings is None: if not isinstance(image, torch.Tensor): image = self.feature_extractor(images=image, return_tensors='pt').pixel_values image = image.to(device=device, dtype=dtype) image_embeddings = self.image_encoder(image).image_embeds image_embeddings = image_embeddings.repeat_interleave(num_images_per_prompt, dim=0) return image_embeddings @torch.no_grad() def __call__(self, image: Optional[Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor]]=None, num_images_per_prompt: int=1, decoder_num_inference_steps: int=25, super_res_num_inference_steps: int=7, generator: Optional[torch.Generator]=None, decoder_latents: Optional[torch.Tensor]=None, super_res_latents: Optional[torch.Tensor]=None, image_embeddings: Optional[torch.Tensor]=None, decoder_guidance_scale: float=8.0, output_type: Optional[str]='pil', return_dict: bool=True): if image is not None: if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, list): batch_size = len(image) else: batch_size = image.shape[0] else: batch_size = image_embeddings.shape[0] prompt = [''] * batch_size device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = decoder_guidance_scale > 1.0 (prompt_embeds, text_encoder_hidden_states, text_mask) = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance) image_embeddings = self._encode_image(image, device, num_images_per_prompt, image_embeddings) (text_encoder_hidden_states, additive_clip_time_embeddings) = self.text_proj(image_embeddings=image_embeddings, prompt_embeds=prompt_embeds, text_encoder_hidden_states=text_encoder_hidden_states, do_classifier_free_guidance=do_classifier_free_guidance) if device.type == 'mps': text_mask = text_mask.type(torch.int) decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) decoder_text_mask = decoder_text_mask.type(torch.bool) else: decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True) self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) decoder_timesteps_tensor = self.decoder_scheduler.timesteps num_channels_latents = self.decoder.config.in_channels height = self.decoder.config.sample_size width = self.decoder.config.sample_size if decoder_latents is None: decoder_latents = self.prepare_latents((batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, device, generator, decoder_latents, self.decoder_scheduler) for (i, t) in enumerate(self.progress_bar(decoder_timesteps_tensor)): latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents noise_pred = self.decoder(sample=latent_model_input, timestep=t, encoder_hidden_states=text_encoder_hidden_states, class_labels=additive_clip_time_embeddings, attention_mask=decoder_text_mask).sample if do_classifier_free_guidance: (noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2) (noise_pred_uncond, _) = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) (noise_pred_text, predicted_variance) = noise_pred_text.split(latent_model_input.shape[1], dim=1) noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if i + 1 == decoder_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = decoder_timesteps_tensor[i + 1] decoder_latents = self.decoder_scheduler.step(noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator).prev_sample decoder_latents = decoder_latents.clamp(-1, 1) image_small = decoder_latents self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) super_res_timesteps_tensor = self.super_res_scheduler.timesteps channels = self.super_res_first.config.in_channels // 2 height = self.super_res_first.config.sample_size width = self.super_res_first.config.sample_size if super_res_latents is None: super_res_latents = self.prepare_latents((batch_size, channels, height, width), image_small.dtype, device, generator, super_res_latents, self.super_res_scheduler) if device.type == 'mps': image_upscaled = F.interpolate(image_small, size=[height, width]) else: interpolate_antialias = {} if 'antialias' in inspect.signature(F.interpolate).parameters: interpolate_antialias['antialias'] = True image_upscaled = F.interpolate(image_small, size=[height, width], mode='bicubic', align_corners=False, **interpolate_antialias) for (i, t) in enumerate(self.progress_bar(super_res_timesteps_tensor)): if i == super_res_timesteps_tensor.shape[0] - 1: unet = self.super_res_last else: unet = self.super_res_first latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) noise_pred = unet(sample=latent_model_input, timestep=t).sample if i + 1 == super_res_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = super_res_timesteps_tensor[i + 1] super_res_latents = self.super_res_scheduler.step(noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator).prev_sample image = super_res_latents self.maybe_free_model_hooks() image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == 'pil': image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image) # File: diffusers-main/src/diffusers/pipelines/unclip/text_proj.py import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UnCLIPTextProjModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, *, clip_extra_context_tokens: int=4, clip_embeddings_dim: int=768, time_embed_dim: int, cross_attention_dim): super().__init__() self.learned_classifier_free_guidance_embeddings = nn.Parameter(torch.zeros(clip_embeddings_dim)) self.embedding_proj = nn.Linear(clip_embeddings_dim, time_embed_dim) self.clip_image_embeddings_project_to_time_embeddings = nn.Linear(clip_embeddings_dim, time_embed_dim) self.clip_extra_context_tokens = clip_extra_context_tokens self.clip_extra_context_tokens_proj = nn.Linear(clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim) self.encoder_hidden_states_proj = nn.Linear(clip_embeddings_dim, cross_attention_dim) self.text_encoder_hidden_states_norm = nn.LayerNorm(cross_attention_dim) def forward(self, *, image_embeddings, prompt_embeds, text_encoder_hidden_states, do_classifier_free_guidance): if do_classifier_free_guidance: image_embeddings_batch_size = image_embeddings.shape[0] classifier_free_guidance_embeddings = self.learned_classifier_free_guidance_embeddings.unsqueeze(0) classifier_free_guidance_embeddings = classifier_free_guidance_embeddings.expand(image_embeddings_batch_size, -1) image_embeddings = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0) assert image_embeddings.shape[0] == prompt_embeds.shape[0] batch_size = prompt_embeds.shape[0] time_projected_prompt_embeds = self.embedding_proj(prompt_embeds) time_projected_image_embeddings = self.clip_image_embeddings_project_to_time_embeddings(image_embeddings) additive_clip_time_embeddings = time_projected_image_embeddings + time_projected_prompt_embeds clip_extra_context_tokens = self.clip_extra_context_tokens_proj(image_embeddings) clip_extra_context_tokens = clip_extra_context_tokens.reshape(batch_size, -1, self.clip_extra_context_tokens) clip_extra_context_tokens = clip_extra_context_tokens.permute(0, 2, 1) text_encoder_hidden_states = self.encoder_hidden_states_proj(text_encoder_hidden_states) text_encoder_hidden_states = self.text_encoder_hidden_states_norm(text_encoder_hidden_states) text_encoder_hidden_states = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1) return (text_encoder_hidden_states, additive_clip_time_embeddings) # File: diffusers-main/src/diffusers/pipelines/unidiffuser/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ImageTextPipelineOutput, UniDiffuserPipeline _dummy_objects.update({'ImageTextPipelineOutput': ImageTextPipelineOutput, 'UniDiffuserPipeline': UniDiffuserPipeline}) else: _import_structure['modeling_text_decoder'] = ['UniDiffuserTextDecoder'] _import_structure['modeling_uvit'] = ['UniDiffuserModel', 'UTransformer2DModel'] _import_structure['pipeline_unidiffuser'] = ['ImageTextPipelineOutput', 'UniDiffuserPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ImageTextPipelineOutput, UniDiffuserPipeline else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformer2DModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py from typing import Optional import numpy as np import torch from torch import nn from transformers import GPT2Config, GPT2LMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UniDiffuserTextDecoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): _keys_to_ignore_on_load_unexpected = ['h\\.\\d+\\.attn\\.bias', 'h\\.\\d+\\.attn\\.masked_bias'] @register_to_config def __init__(self, prefix_length: int, prefix_inner_dim: int, prefix_hidden_dim: Optional[int]=None, vocab_size: int=50257, n_positions: int=1024, n_embd: int=768, n_layer: int=12, n_head: int=12, n_inner: Optional[int]=None, activation_function: str='gelu_new', resid_pdrop: float=0.1, embd_pdrop: float=0.1, attn_pdrop: float=0.1, layer_norm_epsilon: float=1e-05, initializer_range: float=0.02, scale_attn_weights: bool=True, use_cache: bool=True, scale_attn_by_inverse_layer_idx: bool=False, reorder_and_upcast_attn: bool=False): super().__init__() self.prefix_length = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError(f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and `n_embd`: {n_embd} are not equal.') self.prefix_inner_dim = prefix_inner_dim self.prefix_hidden_dim = prefix_hidden_dim self.encode_prefix = nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim) if self.prefix_hidden_dim is not None else nn.Identity() self.decode_prefix = nn.Linear(self.prefix_hidden_dim, n_embd) if self.prefix_hidden_dim is not None else nn.Identity() gpt_config = GPT2Config(vocab_size=vocab_size, n_positions=n_positions, n_embd=n_embd, n_layer=n_layer, n_head=n_head, n_inner=n_inner, activation_function=activation_function, resid_pdrop=resid_pdrop, embd_pdrop=embd_pdrop, attn_pdrop=attn_pdrop, layer_norm_epsilon=layer_norm_epsilon, initializer_range=initializer_range, scale_attn_weights=scale_attn_weights, use_cache=use_cache, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn) self.transformer = GPT2LMHeadModel(gpt_config) def forward(self, input_ids: torch.Tensor, prefix_embeds: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None): embedding_text = self.transformer.transformer.wte(input_ids) hidden = self.encode_prefix(prefix_embeds) prefix_embeds = self.decode_prefix(hidden) embedding_cat = torch.cat((prefix_embeds, embedding_text), dim=1) if labels is not None: dummy_token = self.get_dummy_token(input_ids.shape[0], input_ids.device) labels = torch.cat((dummy_token, input_ids), dim=1) out = self.transformer(inputs_embeds=embedding_cat, labels=labels, attention_mask=attention_mask) if self.prefix_hidden_dim is not None: return (out, hidden) else: return out def get_dummy_token(self, batch_size: int, device: torch.device) -> torch.Tensor: return torch.zeros(batch_size, self.prefix_length, dtype=torch.int64, device=device) def encode(self, prefix): return self.encode_prefix(prefix) @torch.no_grad() def generate_captions(self, features, eos_token_id, device): features = torch.split(features, 1, dim=0) generated_tokens = [] generated_seq_lengths = [] for feature in features: feature = self.decode_prefix(feature.to(device)) (output_tokens, seq_lengths) = self.generate_beam(input_embeds=feature, device=device, eos_token_id=eos_token_id) generated_tokens.append(output_tokens[0]) generated_seq_lengths.append(seq_lengths[0]) generated_tokens = torch.stack(generated_tokens) generated_seq_lengths = torch.stack(generated_seq_lengths) return (generated_tokens, generated_seq_lengths) @torch.no_grad() def generate_beam(self, input_ids=None, input_embeds=None, device=None, beam_size: int=5, entry_length: int=67, temperature: float=1.0, eos_token_id: Optional[int]=None): stop_token_index = eos_token_id tokens = None scores = None seq_lengths = torch.ones(beam_size, device=device, dtype=torch.int) is_stopped = torch.zeros(beam_size, device=device, dtype=torch.bool) if input_embeds is not None: generated = input_embeds else: generated = self.transformer.transformer.wte(input_ids) for i in range(entry_length): outputs = self.transformer(inputs_embeds=generated) logits = outputs.logits logits = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) logits = logits.softmax(-1).log() if scores is None: (scores, next_tokens) = logits.topk(beam_size, -1) generated = generated.expand(beam_size, *generated.shape[1:]) (next_tokens, scores) = (next_tokens.permute(1, 0), scores.squeeze(0)) if tokens is None: tokens = next_tokens else: tokens = tokens.expand(beam_size, *tokens.shape[1:]) tokens = torch.cat((tokens, next_tokens), dim=1) else: logits[is_stopped] = -float(np.inf) logits[is_stopped, 0] = 0 scores_sum = scores[:, None] + logits seq_lengths[~is_stopped] += 1 scores_sum_average = scores_sum / seq_lengths[:, None] (scores_sum_average, next_tokens) = scores_sum_average.view(-1).topk(beam_size, -1) next_tokens_source = next_tokens // scores_sum.shape[1] seq_lengths = seq_lengths[next_tokens_source] next_tokens = next_tokens % scores_sum.shape[1] next_tokens = next_tokens.unsqueeze(1) tokens = tokens[next_tokens_source] tokens = torch.cat((tokens, next_tokens), dim=1) generated = generated[next_tokens_source] scores = scores_sum_average * seq_lengths is_stopped = is_stopped[next_tokens_source] next_token_embed = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0], 1, -1) generated = torch.cat((generated, next_token_embed), dim=1) is_stopped = is_stopped + next_tokens.eq(stop_token_index).squeeze() if is_stopped.all(): break scores = scores / seq_lengths order = scores.argsort(descending=True) output_texts = [tokens[i] for i in order] output_texts = torch.stack(output_texts, dim=0) seq_lengths = torch.tensor([seq_lengths[i] for i in order], dtype=seq_lengths.dtype) return (output_texts, seq_lengths) # File: diffusers-main/src/diffusers/pipelines/unidiffuser/modeling_uvit.py import math from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin from ...models.attention import FeedForward from ...models.attention_processor import Attention from ...models.embeddings import TimestepEmbedding, Timesteps, get_2d_sincos_pos_embed from ...models.modeling_outputs import Transformer2DModelOutput from ...models.normalization import AdaLayerNorm from ...utils import logging logger = logging.get_logger(__name__) def _no_grad_trunc_normal_(tensor, mean, std, a, b): def norm_cdf(x): return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 if mean < a - 2 * std or mean > b + 2 * std: logger.warning('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.') with torch.no_grad(): l = norm_cdf((a - mean) / std) u = norm_cdf((b - mean) / std) tensor.uniform_(2 * l - 1, 2 * u - 1) tensor.erfinv_() tensor.mul_(std * math.sqrt(2.0)) tensor.add_(mean) tensor.clamp_(min=a, max=b) return tensor def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): return _no_grad_trunc_normal_(tensor, mean, std, a, b) class PatchEmbed(nn.Module): def __init__(self, height=224, width=224, patch_size=16, in_channels=3, embed_dim=768, layer_norm=False, flatten=True, bias=True, use_pos_embed=True): super().__init__() num_patches = height // patch_size * (width // patch_size) self.flatten = flatten self.layer_norm = layer_norm self.proj = nn.Conv2d(in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias) if layer_norm: self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-06) else: self.norm = None self.use_pos_embed = use_pos_embed if self.use_pos_embed: pos_embed = get_2d_sincos_pos_embed(embed_dim, int(num_patches ** 0.5)) self.register_buffer('pos_embed', torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False) def forward(self, latent): latent = self.proj(latent) if self.flatten: latent = latent.flatten(2).transpose(1, 2) if self.layer_norm: latent = self.norm(latent) if self.use_pos_embed: return latent + self.pos_embed else: return latent class SkipBlock(nn.Module): def __init__(self, dim: int): super().__init__() self.skip_linear = nn.Linear(2 * dim, dim) self.norm = nn.LayerNorm(dim) def forward(self, x, skip): x = self.skip_linear(torch.cat([x, skip], dim=-1)) x = self.norm(x) return x class UTransformerBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout=0.0, cross_attention_dim: Optional[int]=None, activation_fn: str='geglu', num_embeds_ada_norm: Optional[int]=None, attention_bias: bool=False, only_cross_attention: bool=False, double_self_attention: bool=False, upcast_attention: bool=False, norm_elementwise_affine: bool=True, norm_type: str='layer_norm', pre_layer_norm: bool=True, final_dropout: bool=False): super().__init__() self.only_cross_attention = only_cross_attention self.use_ada_layer_norm = num_embeds_ada_norm is not None and norm_type == 'ada_norm' self.pre_layer_norm = pre_layer_norm if norm_type in ('ada_norm', 'ada_norm_zero') and num_embeds_ada_norm is None: raise ValueError(f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.') self.attn1 = Attention(query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=upcast_attention) if cross_attention_dim is not None or double_self_attention: self.attn2 = Attention(query_dim=dim, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention) else: self.attn2 = None if self.use_ada_layer_norm: self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) else: self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) if cross_attention_dim is not None or double_self_attention: self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) else: self.norm2 = None self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, timestep=None, cross_attention_kwargs=None, class_labels=None): if self.pre_layer_norm: if self.use_ada_layer_norm: norm_hidden_states = self.norm1(hidden_states, timestep) else: norm_hidden_states = self.norm1(hidden_states) else: norm_hidden_states = hidden_states cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs) if not self.pre_layer_norm: if self.use_ada_layer_norm: attn_output = self.norm1(attn_output, timestep) else: attn_output = self.norm1(attn_output) hidden_states = attn_output + hidden_states if self.attn2 is not None: if self.pre_layer_norm: norm_hidden_states = self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) else: norm_hidden_states = hidden_states attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs) if not self.pre_layer_norm: attn_output = self.norm2(attn_output, timestep) if self.use_ada_layer_norm else self.norm2(attn_output) hidden_states = attn_output + hidden_states if self.pre_layer_norm: norm_hidden_states = self.norm3(hidden_states) else: norm_hidden_states = hidden_states ff_output = self.ff(norm_hidden_states) if not self.pre_layer_norm: ff_output = self.norm3(ff_output) hidden_states = ff_output + hidden_states return hidden_states class UniDiffuserBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout=0.0, cross_attention_dim: Optional[int]=None, activation_fn: str='geglu', num_embeds_ada_norm: Optional[int]=None, attention_bias: bool=False, only_cross_attention: bool=False, double_self_attention: bool=False, upcast_attention: bool=False, norm_elementwise_affine: bool=True, norm_type: str='layer_norm', pre_layer_norm: bool=False, final_dropout: bool=True): super().__init__() self.only_cross_attention = only_cross_attention self.use_ada_layer_norm = num_embeds_ada_norm is not None and norm_type == 'ada_norm' self.pre_layer_norm = pre_layer_norm if norm_type in ('ada_norm', 'ada_norm_zero') and num_embeds_ada_norm is None: raise ValueError(f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.') self.attn1 = Attention(query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=upcast_attention) if cross_attention_dim is not None or double_self_attention: self.attn2 = Attention(query_dim=dim, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention) else: self.attn2 = None if self.use_ada_layer_norm: self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) else: self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) if cross_attention_dim is not None or double_self_attention: self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) else: self.norm2 = None self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, timestep=None, cross_attention_kwargs=None, class_labels=None): if self.pre_layer_norm: if self.use_ada_layer_norm: hidden_states = self.norm1(hidden_states, timestep) else: hidden_states = self.norm1(hidden_states) cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} attn_output = self.attn1(hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs) hidden_states = attn_output + hidden_states if not self.pre_layer_norm: if self.use_ada_layer_norm: hidden_states = self.norm1(hidden_states, timestep) else: hidden_states = self.norm1(hidden_states) if self.attn2 is not None: if self.pre_layer_norm: hidden_states = self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) attn_output = self.attn2(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs) hidden_states = attn_output + hidden_states if not self.pre_layer_norm: hidden_states = self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) if self.pre_layer_norm: hidden_states = self.norm3(hidden_states) ff_output = self.ff(hidden_states) hidden_states = ff_output + hidden_states if not self.pre_layer_norm: hidden_states = self.norm3(hidden_states) return hidden_states class UTransformer2DModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, num_attention_heads: int=16, attention_head_dim: int=88, in_channels: Optional[int]=None, out_channels: Optional[int]=None, num_layers: int=1, dropout: float=0.0, norm_num_groups: int=32, cross_attention_dim: Optional[int]=None, attention_bias: bool=False, sample_size: Optional[int]=None, num_vector_embeds: Optional[int]=None, patch_size: Optional[int]=2, activation_fn: str='geglu', num_embeds_ada_norm: Optional[int]=None, use_linear_projection: bool=False, only_cross_attention: bool=False, upcast_attention: bool=False, norm_type: str='layer_norm', block_type: str='unidiffuser', pre_layer_norm: bool=False, norm_elementwise_affine: bool=True, use_patch_pos_embed=False, ff_final_dropout: bool=False): super().__init__() self.use_linear_projection = use_linear_projection self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim assert in_channels is not None and patch_size is not None, 'Patch input requires in_channels and patch_size.' assert sample_size is not None, 'UTransformer2DModel over patched input must provide sample_size' self.height = sample_size self.width = sample_size self.patch_size = patch_size self.pos_embed = PatchEmbed(height=sample_size, width=sample_size, patch_size=patch_size, in_channels=in_channels, embed_dim=inner_dim, use_pos_embed=use_patch_pos_embed) if block_type == 'unidiffuser': block_cls = UniDiffuserBlock else: block_cls = UTransformerBlock self.transformer_in_blocks = nn.ModuleList([block_cls(inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, norm_type=norm_type, pre_layer_norm=pre_layer_norm, norm_elementwise_affine=norm_elementwise_affine, final_dropout=ff_final_dropout) for d in range(num_layers // 2)]) self.transformer_mid_block = block_cls(inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, norm_type=norm_type, pre_layer_norm=pre_layer_norm, norm_elementwise_affine=norm_elementwise_affine, final_dropout=ff_final_dropout) self.transformer_out_blocks = nn.ModuleList([nn.ModuleDict({'skip': SkipBlock(inner_dim), 'block': block_cls(inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, norm_type=norm_type, pre_layer_norm=pre_layer_norm, norm_elementwise_affine=norm_elementwise_affine, final_dropout=ff_final_dropout)}) for d in range(num_layers // 2)]) self.out_channels = in_channels if out_channels is None else out_channels self.norm_out = nn.LayerNorm(inner_dim) def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, class_labels=None, cross_attention_kwargs=None, return_dict: bool=True, hidden_states_is_embedding: bool=False, unpatchify: bool=True): if not unpatchify and return_dict: raise ValueError(f'Cannot both define `unpatchify`: {unpatchify} and `return_dict`: {return_dict} since when `unpatchify` is {unpatchify} the returned output is of shape (batch_size, seq_len, hidden_dim) rather than (batch_size, num_channels, height, width).') if not hidden_states_is_embedding: hidden_states = self.pos_embed(hidden_states) skips = [] for in_block in self.transformer_in_blocks: hidden_states = in_block(hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels) skips.append(hidden_states) hidden_states = self.transformer_mid_block(hidden_states) for out_block in self.transformer_out_blocks: hidden_states = out_block['skip'](hidden_states, skips.pop()) hidden_states = out_block['block'](hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels) hidden_states = self.norm_out(hidden_states) if unpatchify: height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape(shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)) hidden_states = torch.einsum('nhwpqc->nchpwq', hidden_states) output = hidden_states.reshape(shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)) else: output = hidden_states if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) class UniDiffuserModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, text_dim: int=768, clip_img_dim: int=512, num_text_tokens: int=77, num_attention_heads: int=16, attention_head_dim: int=88, in_channels: Optional[int]=None, out_channels: Optional[int]=None, num_layers: int=1, dropout: float=0.0, norm_num_groups: int=32, cross_attention_dim: Optional[int]=None, attention_bias: bool=False, sample_size: Optional[int]=None, num_vector_embeds: Optional[int]=None, patch_size: Optional[int]=None, activation_fn: str='geglu', num_embeds_ada_norm: Optional[int]=None, use_linear_projection: bool=False, only_cross_attention: bool=False, upcast_attention: bool=False, norm_type: str='layer_norm', block_type: str='unidiffuser', pre_layer_norm: bool=False, use_timestep_embedding=False, norm_elementwise_affine: bool=True, use_patch_pos_embed=False, ff_final_dropout: bool=True, use_data_type_embedding: bool=False): super().__init__() self.inner_dim = num_attention_heads * attention_head_dim assert sample_size is not None, 'UniDiffuserModel over patched input must provide sample_size' self.sample_size = sample_size self.in_channels = in_channels self.out_channels = in_channels if out_channels is None else out_channels self.patch_size = patch_size self.num_patches = self.sample_size // patch_size * (self.sample_size // patch_size) self.vae_img_in = PatchEmbed(height=sample_size, width=sample_size, patch_size=patch_size, in_channels=in_channels, embed_dim=self.inner_dim, use_pos_embed=use_patch_pos_embed) self.clip_img_in = nn.Linear(clip_img_dim, self.inner_dim) self.text_in = nn.Linear(text_dim, self.inner_dim) self.timestep_img_proj = Timesteps(self.inner_dim, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_img_embed = TimestepEmbedding(self.inner_dim, 4 * self.inner_dim, out_dim=self.inner_dim) if use_timestep_embedding else nn.Identity() self.timestep_text_proj = Timesteps(self.inner_dim, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_text_embed = TimestepEmbedding(self.inner_dim, 4 * self.inner_dim, out_dim=self.inner_dim) if use_timestep_embedding else nn.Identity() self.num_text_tokens = num_text_tokens self.num_tokens = 1 + 1 + num_text_tokens + 1 + self.num_patches self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, self.inner_dim)) self.pos_embed_drop = nn.Dropout(p=dropout) trunc_normal_(self.pos_embed, std=0.02) self.use_data_type_embedding = use_data_type_embedding if self.use_data_type_embedding: self.data_type_token_embedding = nn.Embedding(2, self.inner_dim) self.data_type_pos_embed_token = nn.Parameter(torch.zeros(1, 1, self.inner_dim)) self.transformer = UTransformer2DModel(num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, in_channels=in_channels, out_channels=out_channels, num_layers=num_layers, dropout=dropout, norm_num_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, attention_bias=attention_bias, sample_size=sample_size, num_vector_embeds=num_vector_embeds, patch_size=patch_size, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, norm_type=norm_type, block_type=block_type, pre_layer_norm=pre_layer_norm, norm_elementwise_affine=norm_elementwise_affine, use_patch_pos_embed=use_patch_pos_embed, ff_final_dropout=ff_final_dropout) patch_dim = patch_size ** 2 * out_channels self.vae_img_out = nn.Linear(self.inner_dim, patch_dim) self.clip_img_out = nn.Linear(self.inner_dim, clip_img_dim) self.text_out = nn.Linear(self.inner_dim, text_dim) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed'} def forward(self, latent_image_embeds: torch.Tensor, image_embeds: torch.Tensor, prompt_embeds: torch.Tensor, timestep_img: Union[torch.Tensor, float, int], timestep_text: Union[torch.Tensor, float, int], data_type: Optional[Union[torch.Tensor, float, int]]=1, encoder_hidden_states=None, cross_attention_kwargs=None): batch_size = latent_image_embeds.shape[0] vae_hidden_states = self.vae_img_in(latent_image_embeds) clip_hidden_states = self.clip_img_in(image_embeds) text_hidden_states = self.text_in(prompt_embeds) (num_text_tokens, num_img_tokens) = (text_hidden_states.size(1), vae_hidden_states.size(1)) if not torch.is_tensor(timestep_img): timestep_img = torch.tensor([timestep_img], dtype=torch.long, device=vae_hidden_states.device) timestep_img = timestep_img * torch.ones(batch_size, dtype=timestep_img.dtype, device=timestep_img.device) timestep_img_token = self.timestep_img_proj(timestep_img) timestep_img_token = timestep_img_token.to(dtype=self.dtype) timestep_img_token = self.timestep_img_embed(timestep_img_token) timestep_img_token = timestep_img_token.unsqueeze(dim=1) if not torch.is_tensor(timestep_text): timestep_text = torch.tensor([timestep_text], dtype=torch.long, device=vae_hidden_states.device) timestep_text = timestep_text * torch.ones(batch_size, dtype=timestep_text.dtype, device=timestep_text.device) timestep_text_token = self.timestep_text_proj(timestep_text) timestep_text_token = timestep_text_token.to(dtype=self.dtype) timestep_text_token = self.timestep_text_embed(timestep_text_token) timestep_text_token = timestep_text_token.unsqueeze(dim=1) if self.use_data_type_embedding: assert data_type is not None, 'data_type must be supplied if the model uses a data type embedding' if not torch.is_tensor(data_type): data_type = torch.tensor([data_type], dtype=torch.int, device=vae_hidden_states.device) data_type = data_type * torch.ones(batch_size, dtype=data_type.dtype, device=data_type.device) data_type_token = self.data_type_token_embedding(data_type).unsqueeze(dim=1) hidden_states = torch.cat([timestep_img_token, timestep_text_token, data_type_token, text_hidden_states, clip_hidden_states, vae_hidden_states], dim=1) else: hidden_states = torch.cat([timestep_img_token, timestep_text_token, text_hidden_states, clip_hidden_states, vae_hidden_states], dim=1) if self.use_data_type_embedding: pos_embed = torch.cat([self.pos_embed[:, :1 + 1, :], self.data_type_pos_embed_token, self.pos_embed[:, 1 + 1:, :]], dim=1) else: pos_embed = self.pos_embed hidden_states = hidden_states + pos_embed hidden_states = self.pos_embed_drop(hidden_states) hidden_states = self.transformer(hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=None, class_labels=None, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, hidden_states_is_embedding=True, unpatchify=False)[0] if self.use_data_type_embedding: (t_img_token_out, t_text_token_out, data_type_token_out, text_out, img_clip_out, img_vae_out) = hidden_states.split((1, 1, 1, num_text_tokens, 1, num_img_tokens), dim=1) else: (t_img_token_out, t_text_token_out, text_out, img_clip_out, img_vae_out) = hidden_states.split((1, 1, num_text_tokens, 1, num_img_tokens), dim=1) img_vae_out = self.vae_img_out(img_vae_out) height = width = int(img_vae_out.shape[1] ** 0.5) img_vae_out = img_vae_out.reshape(shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)) img_vae_out = torch.einsum('nhwpqc->nchpwq', img_vae_out) img_vae_out = img_vae_out.reshape(shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)) img_clip_out = self.clip_img_out(img_clip_out) text_out = self.text_out(text_out) return (img_vae_out, img_clip_out, text_out) # File: diffusers-main/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py import inspect from dataclasses import dataclass from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection, GPT2Tokenizer from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ...utils.outputs import BaseOutput from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel logger = logging.get_logger(__name__) @dataclass class ImageTextPipelineOutput(BaseOutput): images: Optional[Union[List[PIL.Image.Image], np.ndarray]] text: Optional[Union[List[str], List[List[str]]]] class UniDiffuserPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->image_encoder->unet->vae->text_decoder' def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, image_encoder: CLIPVisionModelWithProjection, clip_image_processor: CLIPImageProcessor, clip_tokenizer: CLIPTokenizer, text_decoder: UniDiffuserTextDecoder, text_tokenizer: GPT2Tokenizer, unet: UniDiffuserModel, scheduler: KarrasDiffusionSchedulers): super().__init__() if text_encoder.config.hidden_size != text_decoder.prefix_inner_dim: raise ValueError(f'The text encoder hidden size and text decoder prefix inner dim must be the same, but `text_encoder.config.hidden_size`: {text_encoder.config.hidden_size} and `text_decoder.prefix_inner_dim`: {text_decoder.prefix_inner_dim}') self.register_modules(vae=vae, text_encoder=text_encoder, image_encoder=image_encoder, clip_image_processor=clip_image_processor, clip_tokenizer=clip_tokenizer, text_decoder=text_decoder, text_tokenizer=text_tokenizer, unet=unet, scheduler=scheduler) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.num_channels_latents = vae.config.latent_channels self.text_encoder_seq_len = text_encoder.config.max_position_embeddings self.text_encoder_hidden_size = text_encoder.config.hidden_size self.image_encoder_projection_dim = image_encoder.config.projection_dim self.unet_resolution = unet.config.sample_size self.text_intermediate_dim = self.text_encoder_hidden_size if self.text_decoder.prefix_hidden_dim is not None: self.text_intermediate_dim = self.text_decoder.prefix_hidden_dim self.mode = None self.safety_checker = None def prepare_extra_step_kwargs(self, generator, eta): accepts_eta = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs['eta'] = eta accepts_generator = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs['generator'] = generator return extra_step_kwargs def _infer_mode(self, prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents): prompt_available = prompt is not None or prompt_embeds is not None image_available = image is not None input_available = prompt_available or image_available prompt_latents_available = prompt_latents is not None vae_latents_available = vae_latents is not None clip_latents_available = clip_latents is not None full_latents_available = latents is not None image_latents_available = vae_latents_available and clip_latents_available all_indv_latents_available = prompt_latents_available and image_latents_available if self.mode is not None: mode = self.mode elif prompt_available: mode = 'text2img' elif image_available: mode = 'img2text' elif full_latents_available or all_indv_latents_available: mode = 'joint' elif prompt_latents_available: mode = 'text' elif image_latents_available: mode = 'img' else: mode = 'joint' if self.mode is None and prompt_available and image_available: logger.warning(f"You have supplied both a text prompt and image to the pipeline and mode has not been set manually, defaulting to mode '{mode}'.") if self.mode is None and (not input_available): if vae_latents_available != clip_latents_available: logger.warning(f"You have supplied exactly one of `vae_latents` and `clip_latents`, whereas either both or none are expected to be supplied. Defaulting to mode '{mode}'.") elif not prompt_latents_available and (not vae_latents_available) and (not clip_latents_available): logger.warning(f"No inputs or latents have been supplied, and mode has not been manually set, defaulting to mode '{mode}'.") return mode def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_vae_tiling(self): self.vae.enable_tiling() def disable_vae_tiling(self): self.vae.disable_tiling() def set_text_mode(self): self.mode = 'text' def set_image_mode(self): self.mode = 'img' def set_text_to_image_mode(self): self.mode = 'text2img' def set_image_to_text_mode(self): self.mode = 'img2text' def set_joint_mode(self): self.mode = 'joint' def reset_mode(self): self.mode = None def _infer_batch_size(self, mode, prompt, prompt_embeds, image, num_images_per_prompt, num_prompts_per_image, latents, prompt_latents, vae_latents, clip_latents): if num_images_per_prompt is None: num_images_per_prompt = 1 if num_prompts_per_image is None: num_prompts_per_image = 1 assert num_images_per_prompt > 0, 'num_images_per_prompt must be a positive integer' assert num_prompts_per_image > 0, 'num_prompts_per_image must be a positive integer' if mode in ['text2img']: if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] multiplier = num_images_per_prompt elif mode in ['img2text']: if isinstance(image, PIL.Image.Image): batch_size = 1 else: batch_size = image.shape[0] multiplier = num_prompts_per_image elif mode in ['img']: if vae_latents is not None: batch_size = vae_latents.shape[0] elif clip_latents is not None: batch_size = clip_latents.shape[0] else: batch_size = 1 multiplier = num_images_per_prompt elif mode in ['text']: if prompt_latents is not None: batch_size = prompt_latents.shape[0] else: batch_size = 1 multiplier = num_prompts_per_image elif mode in ['joint']: if latents is not None: batch_size = latents.shape[0] elif prompt_latents is not None: batch_size = prompt_latents.shape[0] elif vae_latents is not None: batch_size = vae_latents.shape[0] elif clip_latents is not None: batch_size = clip_latents.shape[0] else: batch_size = 1 if num_images_per_prompt == num_prompts_per_image: multiplier = num_images_per_prompt else: multiplier = min(num_images_per_prompt, num_prompts_per_image) logger.warning(f'You are using mode `{mode}` and `num_images_per_prompt`: {num_images_per_prompt} and num_prompts_per_image: {num_prompts_per_image} are not equal. Using batch size equal to `min(num_images_per_prompt, num_prompts_per_image) = {batch_size}.') return (batch_size, multiplier) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, **kwargs): deprecation_message = '`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple.' deprecate('_encode_prompt()', '1.0.0', deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, lora_scale: Optional[float]=None, clip_skip: Optional[int]=None): if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.clip_tokenizer) text_inputs = self.clip_tokenizer(prompt, padding='max_length', max_length=self.clip_tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids untruncated_ids = self.clip_tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.clip_tokenizer.batch_decode(untruncated_ids[:, self.clip_tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.clip_tokenizer.model_max_length} tokens: {removed_text}') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True) prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) (bs_embed, seq_len, _) = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.clip_tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.clip_tokenizer(uncond_tokens, padding='max_length', max_length=max_length, truncation=True, return_tensors='pt') if hasattr(self.text_encoder.config, 'use_attention_mask') and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=attention_mask) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: unscale_lora_layers(self.text_encoder, lora_scale) return (prompt_embeds, negative_prompt_embeds) def encode_image_vae_latents(self, image, batch_size, num_prompts_per_image, dtype, device, do_classifier_free_guidance, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_prompts_per_image if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if isinstance(generator, list): image_latents = [self.vae.encode(image[i:i + 1]).latent_dist.sample(generator=generator[i]) * self.vae.config.scaling_factor for i in range(batch_size)] image_latents = torch.cat(image_latents, dim=0) else: image_latents = self.vae.encode(image).latent_dist.sample(generator=generator) image_latents = image_latents * self.vae.config.scaling_factor if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: deprecation_message = f'You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts.') else: image_latents = torch.cat([image_latents], dim=0) if do_classifier_free_guidance: uncond_image_latents = torch.zeros_like(image_latents) image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) return image_latents def encode_image_clip_latents(self, image, batch_size, num_prompts_per_image, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError(f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}') preprocessed_image = self.clip_image_processor.preprocess(image, return_tensors='pt') preprocessed_image = preprocessed_image.to(device=device, dtype=dtype) batch_size = batch_size * num_prompts_per_image if isinstance(generator, list): image_latents = [self.image_encoder(**preprocessed_image[i:i + 1]).image_embeds for i in range(batch_size)] image_latents = torch.cat(image_latents, dim=0) else: image_latents = self.image_encoder(**preprocessed_image).image_embeds if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: deprecation_message = f'You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial images (`image`). Initial images are now duplicating to match the number of text prompts. Note that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update your script to pass as many initial images as text prompts to suppress this warning.' deprecate('len(prompt) != len(image)', '1.0.0', deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError(f'Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts.') else: image_latents = torch.cat([image_latents], dim=0) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') return image_latents def prepare_text_latents(self, batch_size, num_images_per_prompt, seq_len, hidden_size, dtype, device, generator, latents=None): shape = (batch_size * num_images_per_prompt, seq_len, hidden_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.repeat(num_images_per_prompt, 1, 1) latents = latents.to(device=device, dtype=dtype) latents = latents * self.scheduler.init_noise_sigma return latents def prepare_image_vae_latents(self, batch_size, num_prompts_per_image, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size * num_prompts_per_image, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.repeat(num_prompts_per_image, 1, 1, 1) latents = latents.to(device=device, dtype=dtype) latents = latents * self.scheduler.init_noise_sigma return latents def prepare_image_clip_latents(self, batch_size, num_prompts_per_image, clip_img_dim, dtype, device, generator, latents=None): shape = (batch_size * num_prompts_per_image, 1, clip_img_dim) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError(f'You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators.') if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.repeat(num_prompts_per_image, 1, 1) latents = latents.to(device=device, dtype=dtype) latents = latents * self.scheduler.init_noise_sigma return latents def decode_text_latents(self, text_latents, device): (output_token_list, seq_lengths) = self.text_decoder.generate_captions(text_latents, self.text_tokenizer.eos_token_id, device=device) output_list = output_token_list.cpu().numpy() generated_text = [self.text_tokenizer.decode(output[:int(length)], skip_special_tokens=True) for (output, length) in zip(output_list, seq_lengths)] return generated_text def _split(self, x, height, width): batch_size = x.shape[0] latent_height = height // self.vae_scale_factor latent_width = width // self.vae_scale_factor img_vae_dim = self.num_channels_latents * latent_height * latent_width (img_vae, img_clip) = x.split([img_vae_dim, self.image_encoder_projection_dim], dim=1) img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width)) img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim)) return (img_vae, img_clip) def _combine(self, img_vae, img_clip): img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1)) img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1)) return torch.concat([img_vae, img_clip], dim=-1) def _split_joint(self, x, height, width): batch_size = x.shape[0] latent_height = height // self.vae_scale_factor latent_width = width // self.vae_scale_factor img_vae_dim = self.num_channels_latents * latent_height * latent_width text_dim = self.text_encoder_seq_len * self.text_intermediate_dim (img_vae, img_clip, text) = x.split([img_vae_dim, self.image_encoder_projection_dim, text_dim], dim=1) img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width)) img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim)) text = torch.reshape(text, (batch_size, self.text_encoder_seq_len, self.text_intermediate_dim)) return (img_vae, img_clip, text) def _combine_joint(self, img_vae, img_clip, text): img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1)) img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1)) text = torch.reshape(text, (text.shape[0], -1)) return torch.concat([img_vae, img_clip, text], dim=-1) def _get_noise_pred(self, mode, latents, t, prompt_embeds, img_vae, img_clip, max_timestep, data_type, guidance_scale, generator, device, height, width): if mode == 'joint': (img_vae_latents, img_clip_latents, text_latents) = self._split_joint(latents, height, width) (img_vae_out, img_clip_out, text_out) = self.unet(img_vae_latents, img_clip_latents, text_latents, timestep_img=t, timestep_text=t, data_type=data_type) x_out = self._combine_joint(img_vae_out, img_clip_out, text_out) if guidance_scale <= 1.0: return x_out img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype) img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype) text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) (_, _, text_out_uncond) = self.unet(img_vae_T, img_clip_T, text_latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type) (img_vae_out_uncond, img_clip_out_uncond, _) = self.unet(img_vae_latents, img_clip_latents, text_T, timestep_img=t, timestep_text=max_timestep, data_type=data_type) x_out_uncond = self._combine_joint(img_vae_out_uncond, img_clip_out_uncond, text_out_uncond) return guidance_scale * x_out + (1.0 - guidance_scale) * x_out_uncond elif mode == 'text2img': (img_vae_latents, img_clip_latents) = self._split(latents, height, width) (img_vae_out, img_clip_out, text_out) = self.unet(img_vae_latents, img_clip_latents, prompt_embeds, timestep_img=t, timestep_text=0, data_type=data_type) img_out = self._combine(img_vae_out, img_clip_out) if guidance_scale <= 1.0: return img_out text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) (img_vae_out_uncond, img_clip_out_uncond, text_out_uncond) = self.unet(img_vae_latents, img_clip_latents, text_T, timestep_img=t, timestep_text=max_timestep, data_type=data_type) img_out_uncond = self._combine(img_vae_out_uncond, img_clip_out_uncond) return guidance_scale * img_out + (1.0 - guidance_scale) * img_out_uncond elif mode == 'img2text': (img_vae_out, img_clip_out, text_out) = self.unet(img_vae, img_clip, latents, timestep_img=0, timestep_text=t, data_type=data_type) if guidance_scale <= 1.0: return text_out img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype) img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype) (img_vae_out_uncond, img_clip_out_uncond, text_out_uncond) = self.unet(img_vae_T, img_clip_T, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type) return guidance_scale * text_out + (1.0 - guidance_scale) * text_out_uncond elif mode == 'text': (img_vae_out, img_clip_out, text_out) = self.unet(img_vae, img_clip, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type) return text_out elif mode == 'img': (img_vae_latents, img_clip_latents) = self._split(latents, height, width) (img_vae_out, img_clip_out, text_out) = self.unet(img_vae_latents, img_clip_latents, prompt_embeds, timestep_img=t, timestep_text=max_timestep, data_type=data_type) img_out = self._combine(img_vae_out, img_clip_out) return img_out def check_latents_shape(self, latents_name, latents, expected_shape): latents_shape = latents.shape expected_num_dims = len(expected_shape) + 1 expected_shape_str = ', '.join((str(dim) for dim in expected_shape)) if len(latents_shape) != expected_num_dims: raise ValueError(f'`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape {latents_shape} has {len(latents_shape)} dimensions.') for i in range(1, expected_num_dims): if latents_shape[i] != expected_shape[i - 1]: raise ValueError(f'`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape {latents_shape} has {latents_shape[i]} != {expected_shape[i - 1]} at dimension {i}.') def check_inputs(self, mode, prompt, image, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, latents=None, prompt_latents=None, vae_latents=None, clip_latents=None): if height % self.vae_scale_factor != 0 or width % self.vae_scale_factor != 0: raise ValueError(f'`height` and `width` have to be divisible by {self.vae_scale_factor} but are {height} and {width}.') if callback_steps is None or (callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)): raise ValueError(f'`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}.') if mode == 'text2img': if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if mode == 'img2text': if image is None: raise ValueError('`img2text` mode requires an image to be provided.') latent_height = height // self.vae_scale_factor latent_width = width // self.vae_scale_factor full_latents_available = latents is not None prompt_latents_available = prompt_latents is not None vae_latents_available = vae_latents is not None clip_latents_available = clip_latents is not None if full_latents_available: individual_latents_available = prompt_latents is not None or vae_latents is not None or clip_latents is not None if individual_latents_available: logger.warning('You have supplied both `latents` and at least one of `prompt_latents`, `vae_latents`, and `clip_latents`. The value of `latents` will override the value of any individually supplied latents.') img_vae_dim = self.num_channels_latents * latent_height * latent_width text_dim = self.text_encoder_seq_len * self.text_encoder_hidden_size latents_dim = img_vae_dim + self.image_encoder_projection_dim + text_dim latents_expected_shape = (latents_dim,) self.check_latents_shape('latents', latents, latents_expected_shape) if prompt_latents_available: prompt_latents_expected_shape = (self.text_encoder_seq_len, self.text_encoder_hidden_size) self.check_latents_shape('prompt_latents', prompt_latents, prompt_latents_expected_shape) if vae_latents_available: vae_latents_expected_shape = (self.num_channels_latents, latent_height, latent_width) self.check_latents_shape('vae_latents', vae_latents, vae_latents_expected_shape) if clip_latents_available: clip_latents_expected_shape = (1, self.image_encoder_projection_dim) self.check_latents_shape('clip_latents', clip_latents, clip_latents_expected_shape) if mode in ['text2img', 'img'] and vae_latents_available and clip_latents_available: if vae_latents.shape[0] != clip_latents.shape[0]: raise ValueError(f'Both `vae_latents` and `clip_latents` are supplied, but their batch dimensions are not equal: {vae_latents.shape[0]} != {clip_latents.shape[0]}.') if mode == 'joint' and prompt_latents_available and vae_latents_available and clip_latents_available: if prompt_latents.shape[0] != vae_latents.shape[0] or prompt_latents.shape[0] != clip_latents.shape[0]: raise ValueError(f'All of `prompt_latents`, `vae_latents`, and `clip_latents` are supplied, but their batch dimensions are not equal: {prompt_latents.shape[0]} != {vae_latents.shape[0]} != {clip_latents.shape[0]}.') @torch.no_grad() def __call__(self, prompt: Optional[Union[str, List[str]]]=None, image: Optional[Union[torch.Tensor, PIL.Image.Image]]=None, height: Optional[int]=None, width: Optional[int]=None, data_type: Optional[int]=1, num_inference_steps: int=50, guidance_scale: float=8.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: Optional[int]=1, num_prompts_per_image: Optional[int]=1, eta: float=0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, prompt_latents: Optional[torch.Tensor]=None, vae_latents: Optional[torch.Tensor]=None, clip_latents: Optional[torch.Tensor]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback: Optional[Callable[[int, int, torch.Tensor], None]]=None, callback_steps: int=1): height = height or self.unet_resolution * self.vae_scale_factor width = width or self.unet_resolution * self.vae_scale_factor mode = self._infer_mode(prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents) self.check_inputs(mode, prompt, image, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, latents, prompt_latents, vae_latents, clip_latents) (batch_size, multiplier) = self._infer_batch_size(mode, prompt, prompt_embeds, image, num_images_per_prompt, num_prompts_per_image, latents, prompt_latents, vae_latents, clip_latents) device = self._execution_device reduce_text_emb_dim = self.text_intermediate_dim < self.text_encoder_hidden_size or self.mode != 'text2img' do_classifier_free_guidance = guidance_scale > 1.0 if latents is not None: (vae_latents, clip_latents, prompt_latents) = self._split_joint(latents, height, width) if mode in ['text2img']: assert prompt is not None or prompt_embeds is not None (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=multiplier, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) else: prompt_embeds = self.prepare_text_latents(batch_size=batch_size, num_images_per_prompt=multiplier, seq_len=self.text_encoder_seq_len, hidden_size=self.text_encoder_hidden_size, dtype=self.text_encoder.dtype, device=device, generator=generator, latents=prompt_latents) if reduce_text_emb_dim: prompt_embeds = self.text_decoder.encode(prompt_embeds) if mode in ['img2text']: assert image is not None, '`img2text` requires a conditioning image' image_vae = self.image_processor.preprocess(image) (height, width) = image_vae.shape[-2:] image_vae_latents = self.encode_image_vae_latents(image=image_vae, batch_size=batch_size, num_prompts_per_image=multiplier, dtype=prompt_embeds.dtype, device=device, do_classifier_free_guidance=False, generator=generator) image_clip_latents = self.encode_image_clip_latents(image=image, batch_size=batch_size, num_prompts_per_image=multiplier, dtype=prompt_embeds.dtype, device=device, generator=generator) image_clip_latents = image_clip_latents.unsqueeze(1) else: image_vae_latents = self.prepare_image_vae_latents(batch_size=batch_size, num_prompts_per_image=multiplier, num_channels_latents=self.num_channels_latents, height=height, width=width, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=vae_latents) image_clip_latents = self.prepare_image_clip_latents(batch_size=batch_size, num_prompts_per_image=multiplier, clip_img_dim=self.image_encoder_projection_dim, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=clip_latents) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps max_timestep = self.scheduler.config.num_train_timesteps if mode == 'joint': latents = self._combine_joint(image_vae_latents, image_clip_latents, prompt_embeds) elif mode in ['text2img', 'img']: latents = self._combine(image_vae_latents, image_clip_latents) elif mode in ['img2text', 'text']: latents = prompt_embeds extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) logger.debug(f'Scheduler extra step kwargs: {extra_step_kwargs}') num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for (i, t) in enumerate(timesteps): noise_pred = self._get_noise_pred(mode, latents, t, prompt_embeds, image_vae_latents, image_clip_latents, max_timestep, data_type, guidance_scale, generator, device, height, width) latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if i == len(timesteps) - 1 or (i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) image = None text = None if mode == 'joint': (image_vae_latents, image_clip_latents, text_latents) = self._split_joint(latents, height, width) if not output_type == 'latent': image = self.vae.decode(image_vae_latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = image_vae_latents text = self.decode_text_latents(text_latents, device) elif mode in ['text2img', 'img']: (image_vae_latents, image_clip_latents) = self._split(latents, height, width) if not output_type == 'latent': image = self.vae.decode(image_vae_latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = image_vae_latents elif mode in ['img2text', 'text']: text_latents = latents text = self.decode_text_latents(text_latents, device) self.maybe_free_model_hooks() if image is not None: do_denormalize = [True] * image.shape[0] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if hasattr(self, 'final_offload_hook') and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, text) return ImageTextPipelineOutput(images=image, text=text) # File: diffusers-main/src/diffusers/pipelines/wuerstchen/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure['modeling_paella_vq_model'] = ['PaellaVQModel'] _import_structure['modeling_wuerstchen_diffnext'] = ['WuerstchenDiffNeXt'] _import_structure['modeling_wuerstchen_prior'] = ['WuerstchenPrior'] _import_structure['pipeline_wuerstchen'] = ['WuerstchenDecoderPipeline'] _import_structure['pipeline_wuerstchen_combined'] = ['WuerstchenCombinedPipeline'] _import_structure['pipeline_wuerstchen_prior'] = ['DEFAULT_STAGE_C_TIMESTEPS', 'WuerstchenPriorPipeline'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .modeling_paella_vq_model import PaellaVQModel from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt from .modeling_wuerstchen_prior import WuerstchenPrior from .pipeline_wuerstchen import WuerstchenDecoderPipeline from .pipeline_wuerstchen_combined import WuerstchenCombinedPipeline from .pipeline_wuerstchen_prior import DEFAULT_STAGE_C_TIMESTEPS, WuerstchenPriorPipeline else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py from typing import Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.autoencoders.vae import DecoderOutput, VectorQuantizer from ...models.modeling_utils import ModelMixin from ...models.vq_model import VQEncoderOutput from ...utils.accelerate_utils import apply_forward_hook class MixingResidualBlock(nn.Module): def __init__(self, inp_channels, embed_dim): super().__init__() self.norm1 = nn.LayerNorm(inp_channels, elementwise_affine=False, eps=1e-06) self.depthwise = nn.Sequential(nn.ReplicationPad2d(1), nn.Conv2d(inp_channels, inp_channels, kernel_size=3, groups=inp_channels)) self.norm2 = nn.LayerNorm(inp_channels, elementwise_affine=False, eps=1e-06) self.channelwise = nn.Sequential(nn.Linear(inp_channels, embed_dim), nn.GELU(), nn.Linear(embed_dim, inp_channels)) self.gammas = nn.Parameter(torch.zeros(6), requires_grad=True) def forward(self, x): mods = self.gammas x_temp = self.norm1(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * (1 + mods[0]) + mods[1] x = x + self.depthwise(x_temp) * mods[2] x_temp = self.norm2(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * (1 + mods[3]) + mods[4] x = x + self.channelwise(x_temp.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * mods[5] return x class PaellaVQModel(ModelMixin, ConfigMixin): @register_to_config def __init__(self, in_channels: int=3, out_channels: int=3, up_down_scale_factor: int=2, levels: int=2, bottleneck_blocks: int=12, embed_dim: int=384, latent_channels: int=4, num_vq_embeddings: int=8192, scale_factor: float=0.3764): super().__init__() c_levels = [embed_dim // 2 ** i for i in reversed(range(levels))] self.in_block = nn.Sequential(nn.PixelUnshuffle(up_down_scale_factor), nn.Conv2d(in_channels * up_down_scale_factor ** 2, c_levels[0], kernel_size=1)) down_blocks = [] for i in range(levels): if i > 0: down_blocks.append(nn.Conv2d(c_levels[i - 1], c_levels[i], kernel_size=4, stride=2, padding=1)) block = MixingResidualBlock(c_levels[i], c_levels[i] * 4) down_blocks.append(block) down_blocks.append(nn.Sequential(nn.Conv2d(c_levels[-1], latent_channels, kernel_size=1, bias=False), nn.BatchNorm2d(latent_channels))) self.down_blocks = nn.Sequential(*down_blocks) self.vquantizer = VectorQuantizer(num_vq_embeddings, vq_embed_dim=latent_channels, legacy=False, beta=0.25) up_blocks = [nn.Sequential(nn.Conv2d(latent_channels, c_levels[-1], kernel_size=1))] for i in range(levels): for j in range(bottleneck_blocks if i == 0 else 1): block = MixingResidualBlock(c_levels[levels - 1 - i], c_levels[levels - 1 - i] * 4) up_blocks.append(block) if i < levels - 1: up_blocks.append(nn.ConvTranspose2d(c_levels[levels - 1 - i], c_levels[levels - 2 - i], kernel_size=4, stride=2, padding=1)) self.up_blocks = nn.Sequential(*up_blocks) self.out_block = nn.Sequential(nn.Conv2d(c_levels[0], out_channels * up_down_scale_factor ** 2, kernel_size=1), nn.PixelShuffle(up_down_scale_factor)) @apply_forward_hook def encode(self, x: torch.Tensor, return_dict: bool=True) -> VQEncoderOutput: h = self.in_block(x) h = self.down_blocks(h) if not return_dict: return (h,) return VQEncoderOutput(latents=h) @apply_forward_hook def decode(self, h: torch.Tensor, force_not_quantize: bool=True, return_dict: bool=True) -> Union[DecoderOutput, torch.Tensor]: if not force_not_quantize: (quant, _, _) = self.vquantizer(h) else: quant = h x = self.up_blocks(quant) dec = self.out_block(x) if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward(self, sample: torch.Tensor, return_dict: bool=True) -> Union[DecoderOutput, torch.Tensor]: x = sample h = self.encode(x).latents dec = self.decode(h).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec) # File: diffusers-main/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py import torch import torch.nn as nn from ...models.attention_processor import Attention class WuerstchenLayerNorm(nn.LayerNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, x): x = x.permute(0, 2, 3, 1) x = super().forward(x) return x.permute(0, 3, 1, 2) class TimestepBlock(nn.Module): def __init__(self, c, c_timestep): super().__init__() self.mapper = nn.Linear(c_timestep, c * 2) def forward(self, x, t): (a, b) = self.mapper(t)[:, :, None, None].chunk(2, dim=1) return x * (1 + a) + b class ResBlock(nn.Module): def __init__(self, c, c_skip=0, kernel_size=3, dropout=0.0): super().__init__() self.depthwise = nn.Conv2d(c + c_skip, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c) self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-06) self.channelwise = nn.Sequential(nn.Linear(c, c * 4), nn.GELU(), GlobalResponseNorm(c * 4), nn.Dropout(dropout), nn.Linear(c * 4, c)) def forward(self, x, x_skip=None): x_res = x if x_skip is not None: x = torch.cat([x, x_skip], dim=1) x = self.norm(self.depthwise(x)).permute(0, 2, 3, 1) x = self.channelwise(x).permute(0, 3, 1, 2) return x + x_res class GlobalResponseNorm(nn.Module): def __init__(self, dim): super().__init__() self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) def forward(self, x): agg_norm = torch.norm(x, p=2, dim=(1, 2), keepdim=True) stand_div_norm = agg_norm / (agg_norm.mean(dim=-1, keepdim=True) + 1e-06) return self.gamma * (x * stand_div_norm) + self.beta + x class AttnBlock(nn.Module): def __init__(self, c, c_cond, nhead, self_attn=True, dropout=0.0): super().__init__() self.self_attn = self_attn self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-06) self.attention = Attention(query_dim=c, heads=nhead, dim_head=c // nhead, dropout=dropout, bias=True) self.kv_mapper = nn.Sequential(nn.SiLU(), nn.Linear(c_cond, c)) def forward(self, x, kv): kv = self.kv_mapper(kv) norm_x = self.norm(x) if self.self_attn: (batch_size, channel, _, _) = x.shape kv = torch.cat([norm_x.view(batch_size, channel, -1).transpose(1, 2), kv], dim=1) x = x + self.attention(norm_x, encoder_hidden_states=kv) return x # File: diffusers-main/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py import math import numpy as np import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin from .modeling_wuerstchen_common import AttnBlock, GlobalResponseNorm, TimestepBlock, WuerstchenLayerNorm class WuerstchenDiffNeXt(ModelMixin, ConfigMixin): @register_to_config def __init__(self, c_in=4, c_out=4, c_r=64, patch_size=2, c_cond=1024, c_hidden=[320, 640, 1280, 1280], nhead=[-1, 10, 20, 20], blocks=[4, 4, 14, 4], level_config=['CT', 'CTA', 'CTA', 'CTA'], inject_effnet=[False, True, True, True], effnet_embd=16, clip_embd=1024, kernel_size=3, dropout=0.1): super().__init__() self.c_r = c_r self.c_cond = c_cond if not isinstance(dropout, list): dropout = [dropout] * len(c_hidden) self.clip_mapper = nn.Linear(clip_embd, c_cond) self.effnet_mappers = nn.ModuleList([nn.Conv2d(effnet_embd, c_cond, kernel_size=1) if inject else None for inject in inject_effnet + list(reversed(inject_effnet))]) self.seq_norm = nn.LayerNorm(c_cond, elementwise_affine=False, eps=1e-06) self.embedding = nn.Sequential(nn.PixelUnshuffle(patch_size), nn.Conv2d(c_in * patch_size ** 2, c_hidden[0], kernel_size=1), WuerstchenLayerNorm(c_hidden[0], elementwise_affine=False, eps=1e-06)) def get_block(block_type, c_hidden, nhead, c_skip=0, dropout=0): if block_type == 'C': return ResBlockStageB(c_hidden, c_skip, kernel_size=kernel_size, dropout=dropout) elif block_type == 'A': return AttnBlock(c_hidden, c_cond, nhead, self_attn=True, dropout=dropout) elif block_type == 'T': return TimestepBlock(c_hidden, c_r) else: raise ValueError(f'Block type {block_type} not supported') self.down_blocks = nn.ModuleList() for i in range(len(c_hidden)): down_block = nn.ModuleList() if i > 0: down_block.append(nn.Sequential(WuerstchenLayerNorm(c_hidden[i - 1], elementwise_affine=False, eps=1e-06), nn.Conv2d(c_hidden[i - 1], c_hidden[i], kernel_size=2, stride=2))) for _ in range(blocks[i]): for block_type in level_config[i]: c_skip = c_cond if inject_effnet[i] else 0 down_block.append(get_block(block_type, c_hidden[i], nhead[i], c_skip=c_skip, dropout=dropout[i])) self.down_blocks.append(down_block) self.up_blocks = nn.ModuleList() for i in reversed(range(len(c_hidden))): up_block = nn.ModuleList() for j in range(blocks[i]): for (k, block_type) in enumerate(level_config[i]): c_skip = c_hidden[i] if i < len(c_hidden) - 1 and j == k == 0 else 0 c_skip += c_cond if inject_effnet[i] else 0 up_block.append(get_block(block_type, c_hidden[i], nhead[i], c_skip=c_skip, dropout=dropout[i])) if i > 0: up_block.append(nn.Sequential(WuerstchenLayerNorm(c_hidden[i], elementwise_affine=False, eps=1e-06), nn.ConvTranspose2d(c_hidden[i], c_hidden[i - 1], kernel_size=2, stride=2))) self.up_blocks.append(up_block) self.clf = nn.Sequential(WuerstchenLayerNorm(c_hidden[0], elementwise_affine=False, eps=1e-06), nn.Conv2d(c_hidden[0], 2 * c_out * patch_size ** 2, kernel_size=1), nn.PixelShuffle(patch_size)) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, (nn.Conv2d, nn.Linear)): nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) for mapper in self.effnet_mappers: if mapper is not None: nn.init.normal_(mapper.weight, std=0.02) nn.init.normal_(self.clip_mapper.weight, std=0.02) nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) nn.init.constant_(self.clf[1].weight, 0) for level_block in self.down_blocks + self.up_blocks: for block in level_block: if isinstance(block, ResBlockStageB): block.channelwise[-1].weight.data *= np.sqrt(1 / sum(self.config.blocks)) elif isinstance(block, TimestepBlock): nn.init.constant_(block.mapper.weight, 0) def gen_r_embedding(self, r, max_positions=10000): r = r * max_positions half_dim = self.c_r // 2 emb = math.log(max_positions) / (half_dim - 1) emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() emb = r[:, None] * emb[None, :] emb = torch.cat([emb.sin(), emb.cos()], dim=1) if self.c_r % 2 == 1: emb = nn.functional.pad(emb, (0, 1), mode='constant') return emb.to(dtype=r.dtype) def gen_c_embeddings(self, clip): clip = self.clip_mapper(clip) clip = self.seq_norm(clip) return clip def _down_encode(self, x, r_embed, effnet, clip=None): level_outputs = [] for (i, down_block) in enumerate(self.down_blocks): effnet_c = None for block in down_block: if isinstance(block, ResBlockStageB): if effnet_c is None and self.effnet_mappers[i] is not None: dtype = effnet.dtype effnet_c = self.effnet_mappers[i](nn.functional.interpolate(effnet.float(), size=x.shape[-2:], mode='bicubic', antialias=True, align_corners=True).to(dtype)) skip = effnet_c if self.effnet_mappers[i] is not None else None x = block(x, skip) elif isinstance(block, AttnBlock): x = block(x, clip) elif isinstance(block, TimestepBlock): x = block(x, r_embed) else: x = block(x) level_outputs.insert(0, x) return level_outputs def _up_decode(self, level_outputs, r_embed, effnet, clip=None): x = level_outputs[0] for (i, up_block) in enumerate(self.up_blocks): effnet_c = None for (j, block) in enumerate(up_block): if isinstance(block, ResBlockStageB): if effnet_c is None and self.effnet_mappers[len(self.down_blocks) + i] is not None: dtype = effnet.dtype effnet_c = self.effnet_mappers[len(self.down_blocks) + i](nn.functional.interpolate(effnet.float(), size=x.shape[-2:], mode='bicubic', antialias=True, align_corners=True).to(dtype)) skip = level_outputs[i] if j == 0 and i > 0 else None if effnet_c is not None: if skip is not None: skip = torch.cat([skip, effnet_c], dim=1) else: skip = effnet_c x = block(x, skip) elif isinstance(block, AttnBlock): x = block(x, clip) elif isinstance(block, TimestepBlock): x = block(x, r_embed) else: x = block(x) return x def forward(self, x, r, effnet, clip=None, x_cat=None, eps=0.001, return_noise=True): if x_cat is not None: x = torch.cat([x, x_cat], dim=1) r_embed = self.gen_r_embedding(r) if clip is not None: clip = self.gen_c_embeddings(clip) x_in = x x = self.embedding(x) level_outputs = self._down_encode(x, r_embed, effnet, clip) x = self._up_decode(level_outputs, r_embed, effnet, clip) (a, b) = self.clf(x).chunk(2, dim=1) b = b.sigmoid() * (1 - eps * 2) + eps if return_noise: return (x_in - a) / b else: return (a, b) class ResBlockStageB(nn.Module): def __init__(self, c, c_skip=0, kernel_size=3, dropout=0.0): super().__init__() self.depthwise = nn.Conv2d(c, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c) self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-06) self.channelwise = nn.Sequential(nn.Linear(c + c_skip, c * 4), nn.GELU(), GlobalResponseNorm(c * 4), nn.Dropout(dropout), nn.Linear(c * 4, c)) def forward(self, x, x_skip=None): x_res = x x = self.norm(self.depthwise(x)) if x_skip is not None: x = torch.cat([x, x_skip], dim=1) x = self.channelwise(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x + x_res # File: diffusers-main/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py import math from typing import Dict, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin from ...models.attention_processor import ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor from ...models.modeling_utils import ModelMixin from ...utils import is_torch_version from .modeling_wuerstchen_common import AttnBlock, ResBlock, TimestepBlock, WuerstchenLayerNorm class WuerstchenPrior(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin): unet_name = 'prior' _supports_gradient_checkpointing = True @register_to_config def __init__(self, c_in=16, c=1280, c_cond=1024, c_r=64, depth=16, nhead=16, dropout=0.1): super().__init__() self.c_r = c_r self.projection = nn.Conv2d(c_in, c, kernel_size=1) self.cond_mapper = nn.Sequential(nn.Linear(c_cond, c), nn.LeakyReLU(0.2), nn.Linear(c, c)) self.blocks = nn.ModuleList() for _ in range(depth): self.blocks.append(ResBlock(c, dropout=dropout)) self.blocks.append(TimestepBlock(c, c_r)) self.blocks.append(AttnBlock(c, c, nhead, self_attn=True, dropout=dropout)) self.out = nn.Sequential(WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-06), nn.Conv2d(c, c_in * 2, kernel_size=1)) self.gradient_checkpointing = False self.set_default_attn_processor() @property def attn_processors(self) -> Dict[str, AttentionProcessor]: processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, 'get_processor'): processors[f'{name}.processor'] = module.get_processor() for (sub_name, child) in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}', child, processors) return processors for (name, module) in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError(f'A dict of processors was passed, but the number of processors {len(processor)} does not match the number of attention layers: {count}. Please make sure to pass {count} processor classes.') def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, 'set_processor'): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f'{name}.processor')) for (sub_name, child) in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}', child, processor) for (name, module) in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): if all((proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnAddedKVProcessor() elif all((proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values())): processor = AttnProcessor() else: raise ValueError(f'Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}') self.set_attn_processor(processor) def _set_gradient_checkpointing(self, module, value=False): self.gradient_checkpointing = value def gen_r_embedding(self, r, max_positions=10000): r = r * max_positions half_dim = self.c_r // 2 emb = math.log(max_positions) / (half_dim - 1) emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() emb = r[:, None] * emb[None, :] emb = torch.cat([emb.sin(), emb.cos()], dim=1) if self.c_r % 2 == 1: emb = nn.functional.pad(emb, (0, 1), mode='constant') return emb.to(dtype=r.dtype) def forward(self, x, r, c): x_in = x x = self.projection(x) c_embed = self.cond_mapper(c) r_embed = self.gen_r_embedding(r) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version('>=', '1.11.0'): for block in self.blocks: if isinstance(block, AttnBlock): x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, c_embed, use_reentrant=False) elif isinstance(block, TimestepBlock): x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, r_embed, use_reentrant=False) else: x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, use_reentrant=False) else: for block in self.blocks: if isinstance(block, AttnBlock): x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, c_embed) elif isinstance(block, TimestepBlock): x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, r_embed) else: x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x) else: for block in self.blocks: if isinstance(block, AttnBlock): x = block(x, c_embed) elif isinstance(block, TimestepBlock): x = block(x, r_embed) else: x = block(x) (a, b) = self.out(x).chunk(2, dim=1) return (x_in - a) / ((1 - b).abs() + 1e-05) # File: diffusers-main/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py from typing import Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer from ...schedulers import DDPMWuerstchenScheduler from ...utils import deprecate, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .modeling_paella_vq_model import PaellaVQModel from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import WuerstchenPriorPipeline, WuerstchenDecoderPipeline\n\n >>> prior_pipe = WuerstchenPriorPipeline.from_pretrained(\n ... "warp-ai/wuerstchen-prior", torch_dtype=torch.float16\n ... ).to("cuda")\n >>> gen_pipe = WuerstchenDecoderPipeline.from_pretrain("warp-ai/wuerstchen", torch_dtype=torch.float16).to(\n ... "cuda"\n ... )\n\n >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet"\n >>> prior_output = pipe(prompt)\n >>> images = gen_pipe(prior_output.image_embeddings, prompt=prompt)\n ```\n' class WuerstchenDecoderPipeline(DiffusionPipeline): model_cpu_offload_seq = 'text_encoder->decoder->vqgan' _callback_tensor_inputs = ['latents', 'text_encoder_hidden_states', 'negative_prompt_embeds', 'image_embeddings'] def __init__(self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, decoder: WuerstchenDiffNeXt, scheduler: DDPMWuerstchenScheduler, vqgan: PaellaVQModel, latent_dim_scale: float=10.67) -> None: super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, decoder=decoder, scheduler=scheduler, vqgan=vqgan) self.register_to_config(latent_dim_scale=latent_dim_scale) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids[:, :self.tokenizer.model_max_length] attention_mask = attention_mask[:, :self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask.to(device)) text_encoder_hidden_states = text_encoder_output.last_hidden_state text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_text_encoder_hidden_states = None if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=uncond_input.attention_mask.to(device)) uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(batch_size * num_images_per_prompt, seq_len, -1) return (text_encoder_hidden_states, uncond_text_encoder_hidden_states) @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, image_embeddings: Union[torch.Tensor, List[torch.Tensor]], prompt: Union[str, List[str]]=None, num_inference_steps: int=12, timesteps: Optional[List[float]]=None, guidance_scale: float=0.0, negative_prompt: Optional[Union[str, List[str]]]=None, num_images_per_prompt: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') device = self._execution_device dtype = self.decoder.dtype self._guidance_scale = guidance_scale if not isinstance(prompt, list): if isinstance(prompt, str): prompt = [prompt] else: raise TypeError(f"'prompt' must be of type 'list' or 'str', but got {type(prompt)}.") if self.do_classifier_free_guidance: if negative_prompt is not None and (not isinstance(negative_prompt, list)): if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] else: raise TypeError(f"'negative_prompt' must be of type 'list' or 'str', but got {type(negative_prompt)}.") if isinstance(image_embeddings, list): image_embeddings = torch.cat(image_embeddings, dim=0) if isinstance(image_embeddings, np.ndarray): image_embeddings = torch.Tensor(image_embeddings, device=device).to(dtype=dtype) if not isinstance(image_embeddings, torch.Tensor): raise TypeError(f"'image_embeddings' must be of type 'torch.Tensor' or 'np.array', but got {type(image_embeddings)}.") if not isinstance(num_inference_steps, int): raise TypeError(f"'num_inference_steps' must be of type 'int', but got {type(num_inference_steps)} In Case you want to provide explicit timesteps, please use the 'timesteps' argument.") (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt, device, image_embeddings.size(0) * num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt) text_encoder_hidden_states = torch.cat([prompt_embeds, negative_prompt_embeds]) if negative_prompt_embeds is not None else prompt_embeds effnet = torch.cat([image_embeddings, torch.zeros_like(image_embeddings)]) if self.do_classifier_free_guidance else image_embeddings latent_height = int(image_embeddings.size(2) * self.config.latent_dim_scale) latent_width = int(image_embeddings.size(3) * self.config.latent_dim_scale) latent_features_shape = (image_embeddings.size(0) * num_images_per_prompt, 4, latent_height, latent_width) if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps latents = self.prepare_latents(latent_features_shape, dtype, device, generator, latents, self.scheduler) self._num_timesteps = len(timesteps[:-1]) for (i, t) in enumerate(self.progress_bar(timesteps[:-1])): ratio = t.expand(latents.size(0)).to(dtype) predicted_latents = self.decoder(torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, r=torch.cat([ratio] * 2) if self.do_classifier_free_guidance else ratio, effnet=effnet, clip=text_encoder_hidden_states) if self.do_classifier_free_guidance: (predicted_latents_text, predicted_latents_uncond) = predicted_latents.chunk(2) predicted_latents = torch.lerp(predicted_latents_uncond, predicted_latents_text, self.guidance_scale) latents = self.scheduler.step(model_output=predicted_latents, timestep=ratio, sample=latents, generator=generator).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) image_embeddings = callback_outputs.pop('image_embeddings', image_embeddings) text_encoder_hidden_states = callback_outputs.pop('text_encoder_hidden_states', text_encoder_hidden_states) if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) if output_type not in ['pt', 'np', 'pil', 'latent']: raise ValueError(f'Only the output types `pt`, `np`, `pil` and `latent` are supported not output_type={output_type}') if not output_type == 'latent': latents = self.vqgan.config.scale_factor * latents images = self.vqgan.decode(latents).sample.clamp(0, 1) if output_type == 'np': images = images.permute(0, 2, 3, 1).cpu().float().numpy() elif output_type == 'pil': images = images.permute(0, 2, 3, 1).cpu().float().numpy() images = self.numpy_to_pil(images) else: images = latents self.maybe_free_model_hooks() if not return_dict: return images return ImagePipelineOutput(images) # File: diffusers-main/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py from typing import Callable, Dict, List, Optional, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...schedulers import DDPMWuerstchenScheduler from ...utils import deprecate, replace_example_docstring from ..pipeline_utils import DiffusionPipeline from .modeling_paella_vq_model import PaellaVQModel from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt from .modeling_wuerstchen_prior import WuerstchenPrior from .pipeline_wuerstchen import WuerstchenDecoderPipeline from .pipeline_wuerstchen_prior import WuerstchenPriorPipeline TEXT2IMAGE_EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> from diffusions import WuerstchenCombinedPipeline\n\n >>> pipe = WuerstchenCombinedPipeline.from_pretrained("warp-ai/Wuerstchen", torch_dtype=torch.float16).to(\n ... "cuda"\n ... )\n >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet"\n >>> images = pipe(prompt=prompt)\n ```\n' class WuerstchenCombinedPipeline(DiffusionPipeline): _load_connected_pipes = True def __init__(self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, decoder: WuerstchenDiffNeXt, scheduler: DDPMWuerstchenScheduler, vqgan: PaellaVQModel, prior_tokenizer: CLIPTokenizer, prior_text_encoder: CLIPTextModel, prior_prior: WuerstchenPrior, prior_scheduler: DDPMWuerstchenScheduler): super().__init__() self.register_modules(text_encoder=text_encoder, tokenizer=tokenizer, decoder=decoder, scheduler=scheduler, vqgan=vqgan, prior_prior=prior_prior, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler) self.prior_pipe = WuerstchenPriorPipeline(prior=prior_prior, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler) self.decoder_pipe = WuerstchenDecoderPipeline(text_encoder=text_encoder, tokenizer=tokenizer, decoder=decoder, scheduler=scheduler, vqgan=vqgan) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_model_cpu_offload(self, gpu_id: Optional[int]=None, device: Union[torch.device, str]='cuda'): self.prior_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) def enable_sequential_cpu_offload(self, gpu_id: Optional[int]=None, device: Union[torch.device, str]='cuda'): self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[str, List[str]]]=None, height: int=512, width: int=512, prior_num_inference_steps: int=60, prior_timesteps: Optional[List[float]]=None, prior_guidance_scale: float=4.0, num_inference_steps: int=12, decoder_timesteps: Optional[List[float]]=None, decoder_guidance_scale: float=0.0, negative_prompt: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, num_images_per_prompt: int=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, prior_callback_on_step_end_tensor_inputs: List[str]=['latents'], callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): prior_kwargs = {} if kwargs.get('prior_callback', None) is not None: prior_kwargs['callback'] = kwargs.pop('prior_callback') deprecate('prior_callback', '1.0.0', 'Passing `prior_callback` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`') if kwargs.get('prior_callback_steps', None) is not None: deprecate('prior_callback_steps', '1.0.0', 'Passing `prior_callback_steps` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`') prior_kwargs['callback_steps'] = kwargs.pop('prior_callback_steps') prior_outputs = self.prior_pipe(prompt=prompt if prompt_embeds is None else None, height=height, width=width, num_inference_steps=prior_num_inference_steps, timesteps=prior_timesteps, guidance_scale=prior_guidance_scale, negative_prompt=negative_prompt if negative_prompt_embeds is None else None, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, num_images_per_prompt=num_images_per_prompt, generator=generator, latents=latents, output_type='pt', return_dict=False, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, **prior_kwargs) image_embeddings = prior_outputs[0] outputs = self.decoder_pipe(image_embeddings=image_embeddings, prompt=prompt if prompt is not None else '', num_inference_steps=num_inference_steps, timesteps=decoder_timesteps, guidance_scale=decoder_guidance_scale, negative_prompt=negative_prompt, generator=generator, output_type=output_type, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs) return outputs # File: diffusers-main/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py from dataclasses import dataclass from math import ceil from typing import Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer from ...loaders import StableDiffusionLoraLoaderMixin from ...schedulers import DDPMWuerstchenScheduler from ...utils import BaseOutput, deprecate, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .modeling_wuerstchen_prior import WuerstchenPrior logger = logging.get_logger(__name__) DEFAULT_STAGE_C_TIMESTEPS = list(np.linspace(1.0, 2 / 3, 20)) + list(np.linspace(2 / 3, 0.0, 11))[1:] EXAMPLE_DOC_STRING = '\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import WuerstchenPriorPipeline\n\n >>> prior_pipe = WuerstchenPriorPipeline.from_pretrained(\n ... "warp-ai/wuerstchen-prior", torch_dtype=torch.float16\n ... ).to("cuda")\n\n >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet"\n >>> prior_output = pipe(prompt)\n ```\n' @dataclass class WuerstchenPriorPipelineOutput(BaseOutput): image_embeddings: Union[torch.Tensor, np.ndarray] class WuerstchenPriorPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): unet_name = 'prior' text_encoder_name = 'text_encoder' model_cpu_offload_seq = 'text_encoder->prior' _callback_tensor_inputs = ['latents', 'text_encoder_hidden_states', 'negative_prompt_embeds'] _lora_loadable_modules = ['prior', 'text_encoder'] def __init__(self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, prior: WuerstchenPrior, scheduler: DDPMWuerstchenScheduler, latent_mean: float=42.0, latent_std: float=1.0, resolution_multiple: float=42.67) -> None: super().__init__() self.register_modules(tokenizer=tokenizer, text_encoder=text_encoder, prior=prior, scheduler=scheduler) self.register_to_config(latent_mean=latent_mean, latent_std=latent_std, resolution_multiple=resolution_multiple) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def encode_prompt(self, device, num_images_per_prompt, do_classifier_free_guidance, prompt=None, negative_prompt=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer(prompt, padding='longest', return_tensors='pt').input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and (not torch.equal(text_input_ids, untruncated_ids)): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) logger.warning(f'The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}') text_input_ids = text_input_ids[:, :self.tokenizer.model_max_length] attention_mask = attention_mask[:, :self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask.to(device)) prompt_embeds = text_encoder_output.last_hidden_state prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) if negative_prompt_embeds is None and do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [''] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError(f'`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}.') elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError(f'`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.') else: uncond_tokens = negative_prompt uncond_input = self.tokenizer(uncond_tokens, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device), attention_mask=uncond_input.attention_mask.to(device)) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.last_hidden_state if do_classifier_free_guidance: seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return (prompt_embeds, negative_prompt_embeds) def check_inputs(self, prompt, negative_prompt, num_inference_steps, do_classifier_free_guidance, prompt_embeds=None, negative_prompt_embeds=None): if prompt is not None and prompt_embeds is not None: raise ValueError(f'Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two.') elif prompt is None and prompt_embeds is None: raise ValueError('Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.') elif prompt is not None and (not isinstance(prompt, str) and (not isinstance(prompt, list))): raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}') if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError(f'Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two.') if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError(f'`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}.') if not isinstance(num_inference_steps, int): raise TypeError(f"'num_inference_steps' must be of type 'int', but got {type(num_inference_steps)} In Case you want to provide explicit timesteps, please use the 'timesteps' argument.") @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__(self, prompt: Optional[Union[str, List[str]]]=None, height: int=1024, width: int=1024, num_inference_steps: int=60, timesteps: List[float]=None, guidance_scale: float=8.0, negative_prompt: Optional[Union[str, List[str]]]=None, prompt_embeds: Optional[torch.Tensor]=None, negative_prompt_embeds: Optional[torch.Tensor]=None, num_images_per_prompt: Optional[int]=1, generator: Optional[Union[torch.Generator, List[torch.Generator]]]=None, latents: Optional[torch.Tensor]=None, output_type: Optional[str]='pt', return_dict: bool=True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs): callback = kwargs.pop('callback', None) callback_steps = kwargs.pop('callback_steps', None) if callback is not None: deprecate('callback', '1.0.0', 'Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_steps is not None: deprecate('callback_steps', '1.0.0', 'Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`') if callback_on_step_end_tensor_inputs is not None and (not all((k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs))): raise ValueError(f'`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}') device = self._execution_device self._guidance_scale = guidance_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt is not None and (not isinstance(prompt, list)): if isinstance(prompt, str): prompt = [prompt] else: raise TypeError(f"'prompt' must be of type 'list' or 'str', but got {type(prompt)}.") if self.do_classifier_free_guidance: if negative_prompt is not None and (not isinstance(negative_prompt, list)): if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] else: raise TypeError(f"'negative_prompt' must be of type 'list' or 'str', but got {type(negative_prompt)}.") self.check_inputs(prompt, negative_prompt, num_inference_steps, self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) (prompt_embeds, negative_prompt_embeds) = self.encode_prompt(prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds) text_encoder_hidden_states = torch.cat([prompt_embeds, negative_prompt_embeds]) if negative_prompt_embeds is not None else prompt_embeds dtype = text_encoder_hidden_states.dtype latent_height = ceil(height / self.config.resolution_multiple) latent_width = ceil(width / self.config.resolution_multiple) num_channels = self.prior.config.c_in effnet_features_shape = (num_images_per_prompt * batch_size, num_channels, latent_height, latent_width) if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps latents = self.prepare_latents(effnet_features_shape, dtype, device, generator, latents, self.scheduler) self._num_timesteps = len(timesteps[:-1]) for (i, t) in enumerate(self.progress_bar(timesteps[:-1])): ratio = t.expand(latents.size(0)).to(dtype) predicted_image_embedding = self.prior(torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, r=torch.cat([ratio] * 2) if self.do_classifier_free_guidance else ratio, c=text_encoder_hidden_states) if self.do_classifier_free_guidance: (predicted_image_embedding_text, predicted_image_embedding_uncond) = predicted_image_embedding.chunk(2) predicted_image_embedding = torch.lerp(predicted_image_embedding_uncond, predicted_image_embedding_text, self.guidance_scale) latents = self.scheduler.step(model_output=predicted_image_embedding, timestep=ratio, sample=latents, generator=generator).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop('latents', latents) text_encoder_hidden_states = callback_outputs.pop('text_encoder_hidden_states', text_encoder_hidden_states) negative_prompt_embeds = callback_outputs.pop('negative_prompt_embeds', negative_prompt_embeds) if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, 'order', 1) callback(step_idx, t, latents) latents = latents * self.config.latent_mean - self.config.latent_std self.maybe_free_model_hooks() if output_type == 'np': latents = latents.cpu().float().numpy() if not return_dict: return (latents,) return WuerstchenPriorPipelineOutput(latents) # File: diffusers-main/src/diffusers/schedulers/__init__.py from typing import TYPE_CHECKING from ..utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available _dummy_modules = {} _import_structure = {} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_pt_objects _dummy_modules.update(get_objects_from_module(dummy_pt_objects)) else: _import_structure['deprecated'] = ['KarrasVeScheduler', 'ScoreSdeVpScheduler'] _import_structure['scheduling_amused'] = ['AmusedScheduler'] _import_structure['scheduling_consistency_decoder'] = ['ConsistencyDecoderScheduler'] _import_structure['scheduling_consistency_models'] = ['CMStochasticIterativeScheduler'] _import_structure['scheduling_ddim'] = ['DDIMScheduler'] _import_structure['scheduling_ddim_cogvideox'] = ['CogVideoXDDIMScheduler'] _import_structure['scheduling_ddim_inverse'] = ['DDIMInverseScheduler'] _import_structure['scheduling_ddim_parallel'] = ['DDIMParallelScheduler'] _import_structure['scheduling_ddpm'] = ['DDPMScheduler'] _import_structure['scheduling_ddpm_parallel'] = ['DDPMParallelScheduler'] _import_structure['scheduling_ddpm_wuerstchen'] = ['DDPMWuerstchenScheduler'] _import_structure['scheduling_deis_multistep'] = ['DEISMultistepScheduler'] _import_structure['scheduling_dpm_cogvideox'] = ['CogVideoXDPMScheduler'] _import_structure['scheduling_dpmsolver_multistep'] = ['DPMSolverMultistepScheduler'] _import_structure['scheduling_dpmsolver_multistep_inverse'] = ['DPMSolverMultistepInverseScheduler'] _import_structure['scheduling_dpmsolver_singlestep'] = ['DPMSolverSinglestepScheduler'] _import_structure['scheduling_edm_dpmsolver_multistep'] = ['EDMDPMSolverMultistepScheduler'] _import_structure['scheduling_edm_euler'] = ['EDMEulerScheduler'] _import_structure['scheduling_euler_ancestral_discrete'] = ['EulerAncestralDiscreteScheduler'] _import_structure['scheduling_euler_discrete'] = ['EulerDiscreteScheduler'] _import_structure['scheduling_flow_match_euler_discrete'] = ['FlowMatchEulerDiscreteScheduler'] _import_structure['scheduling_flow_match_heun_discrete'] = ['FlowMatchHeunDiscreteScheduler'] _import_structure['scheduling_heun_discrete'] = ['HeunDiscreteScheduler'] _import_structure['scheduling_ipndm'] = ['IPNDMScheduler'] _import_structure['scheduling_k_dpm_2_ancestral_discrete'] = ['KDPM2AncestralDiscreteScheduler'] _import_structure['scheduling_k_dpm_2_discrete'] = ['KDPM2DiscreteScheduler'] _import_structure['scheduling_lcm'] = ['LCMScheduler'] _import_structure['scheduling_pndm'] = ['PNDMScheduler'] _import_structure['scheduling_repaint'] = ['RePaintScheduler'] _import_structure['scheduling_sasolver'] = ['SASolverScheduler'] _import_structure['scheduling_sde_ve'] = ['ScoreSdeVeScheduler'] _import_structure['scheduling_tcd'] = ['TCDScheduler'] _import_structure['scheduling_unclip'] = ['UnCLIPScheduler'] _import_structure['scheduling_unipc_multistep'] = ['UniPCMultistepScheduler'] _import_structure['scheduling_utils'] = ['AysSchedules', 'KarrasDiffusionSchedulers', 'SchedulerMixin'] _import_structure['scheduling_vq_diffusion'] = ['VQDiffusionScheduler'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_flax_objects _dummy_modules.update(get_objects_from_module(dummy_flax_objects)) else: _import_structure['scheduling_ddim_flax'] = ['FlaxDDIMScheduler'] _import_structure['scheduling_ddpm_flax'] = ['FlaxDDPMScheduler'] _import_structure['scheduling_dpmsolver_multistep_flax'] = ['FlaxDPMSolverMultistepScheduler'] _import_structure['scheduling_euler_discrete_flax'] = ['FlaxEulerDiscreteScheduler'] _import_structure['scheduling_karras_ve_flax'] = ['FlaxKarrasVeScheduler'] _import_structure['scheduling_lms_discrete_flax'] = ['FlaxLMSDiscreteScheduler'] _import_structure['scheduling_pndm_flax'] = ['FlaxPNDMScheduler'] _import_structure['scheduling_sde_ve_flax'] = ['FlaxScoreSdeVeScheduler'] _import_structure['scheduling_utils_flax'] = ['FlaxKarrasDiffusionSchedulers', 'FlaxSchedulerMixin', 'FlaxSchedulerOutput', 'broadcast_to_shape_from_left'] try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_torch_and_scipy_objects _dummy_modules.update(get_objects_from_module(dummy_torch_and_scipy_objects)) else: _import_structure['scheduling_lms_discrete'] = ['LMSDiscreteScheduler'] try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_torch_and_torchsde_objects _dummy_modules.update(get_objects_from_module(dummy_torch_and_torchsde_objects)) else: _import_structure['scheduling_cosine_dpmsolver_multistep'] = ['CosineDPMSolverMultistepScheduler'] _import_structure['scheduling_dpmsolver_sde'] = ['DPMSolverSDEScheduler'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from ..utils import OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * else: from .deprecated import KarrasVeScheduler, ScoreSdeVpScheduler from .scheduling_amused import AmusedScheduler from .scheduling_consistency_decoder import ConsistencyDecoderScheduler from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_cogvideox import CogVideoXDDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_ddpm_wuerstchen import DDPMWuerstchenScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpm_cogvideox import CogVideoXDPMScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_edm_dpmsolver_multistep import EDMDPMSolverMultistepScheduler from .scheduling_edm_euler import EDMEulerScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler from .scheduling_flow_match_heun_discrete import FlowMatchHeunDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPM2AncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPM2DiscreteScheduler from .scheduling_lcm import LCMScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sasolver import SASolverScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_tcd import TCDScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import AysSchedules, KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_euler_discrete_flax import FlaxEulerDiscreteScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * else: from .scheduling_cosine_dpmsolver_multistep import CosineDPMSolverMultistepScheduler from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_modules.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/schedulers/deprecated/__init__.py from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_pt_objects _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) else: _import_structure['scheduling_karras_ve'] = ['KarrasVeScheduler'] _import_structure['scheduling_sde_vp'] = ['ScoreSdeVpScheduler'] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_pt_objects import * else: from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) for (name, value) in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) # File: diffusers-main/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ...utils.torch_utils import randn_tensor from ..scheduling_utils import SchedulerMixin @dataclass class KarrasVeOutput(BaseOutput): prev_sample: torch.Tensor derivative: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None class KarrasVeScheduler(SchedulerMixin, ConfigMixin): order = 2 @register_to_config def __init__(self, sigma_min: float=0.02, sigma_max: float=100, s_noise: float=1.007, s_churn: float=80, s_min: float=0.05, s_max: float=50): self.init_noise_sigma = sigma_max self.num_inference_steps: int = None self.timesteps: np.IntTensor = None self.schedule: torch.Tensor = None def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): self.num_inference_steps = num_inference_steps timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() self.timesteps = torch.from_numpy(timesteps).to(device) schedule = [self.config.sigma_max ** 2 * (self.config.sigma_min ** 2 / self.config.sigma_max ** 2) ** (i / (num_inference_steps - 1)) for i in self.timesteps] self.schedule = torch.tensor(schedule, dtype=torch.float32, device=device) def add_noise_to_input(self, sample: torch.Tensor, sigma: float, generator: Optional[torch.Generator]=None) -> Tuple[torch.Tensor, float]: if self.config.s_min <= sigma <= self.config.s_max: gamma = min(self.config.s_churn / self.num_inference_steps, 2 ** 0.5 - 1) else: gamma = 0 eps = self.config.s_noise * randn_tensor(sample.shape, generator=generator).to(sample.device) sigma_hat = sigma + gamma * sigma sample_hat = sample + (sigma_hat ** 2 - sigma ** 2) ** 0.5 * eps return (sample_hat, sigma_hat) def step(self, model_output: torch.Tensor, sigma_hat: float, sigma_prev: float, sample_hat: torch.Tensor, return_dict: bool=True) -> Union[KarrasVeOutput, Tuple]: pred_original_sample = sample_hat + sigma_hat * model_output derivative = (sample_hat - pred_original_sample) / sigma_hat sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput(prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample) def step_correct(self, model_output: torch.Tensor, sigma_hat: float, sigma_prev: float, sample_hat: torch.Tensor, sample_prev: torch.Tensor, derivative: torch.Tensor, return_dict: bool=True) -> Union[KarrasVeOutput, Tuple]: pred_original_sample = sample_prev + sigma_prev * model_output derivative_corr = (sample_prev - pred_original_sample) / sigma_prev sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput(prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample) def add_noise(self, original_samples, noise, timesteps): raise NotImplementedError() # File: diffusers-main/src/diffusers/schedulers/deprecated/scheduling_sde_vp.py import math from typing import Union import torch from ...configuration_utils import ConfigMixin, register_to_config from ...utils.torch_utils import randn_tensor from ..scheduling_utils import SchedulerMixin class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin): order = 1 @register_to_config def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=0.001): self.sigmas = None self.discrete_sigmas = None self.timesteps = None def set_timesteps(self, num_inference_steps, device: Union[str, torch.device]=None): self.timesteps = torch.linspace(1, self.config.sampling_eps, num_inference_steps, device=device) def step_pred(self, score, x, t, generator=None): if self.timesteps is None: raise ValueError("`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler") log_mean_coeff = -0.25 * t ** 2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff)) std = std.flatten() while len(std.shape) < len(score.shape): std = std.unsqueeze(-1) score = -score / std dt = -1.0 / len(self.timesteps) beta_t = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) beta_t = beta_t.flatten() while len(beta_t.shape) < len(x.shape): beta_t = beta_t.unsqueeze(-1) drift = -0.5 * beta_t * x diffusion = torch.sqrt(beta_t) drift = drift - diffusion ** 2 * score x_mean = x + drift * dt noise = randn_tensor(x.shape, layout=x.layout, generator=generator, device=x.device, dtype=x.dtype) x = x_mean + diffusion * math.sqrt(-dt) * noise return (x, x_mean) def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_amused.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils import SchedulerMixin def gumbel_noise(t, generator=None): device = generator.device if generator is not None else t.device noise = torch.zeros_like(t, device=device).uniform_(0, 1, generator=generator).to(t.device) return -torch.log((-torch.log(noise.clamp(1e-20))).clamp(1e-20)) def mask_by_random_topk(mask_len, probs, temperature=1.0, generator=None): confidence = torch.log(probs.clamp(1e-20)) + temperature * gumbel_noise(probs, generator=generator) sorted_confidence = torch.sort(confidence, dim=-1).values cut_off = torch.gather(sorted_confidence, 1, mask_len.long()) masking = confidence < cut_off return masking @dataclass class AmusedSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: torch.Tensor = None class AmusedScheduler(SchedulerMixin, ConfigMixin): order = 1 temperatures: torch.Tensor @register_to_config def __init__(self, mask_token_id: int, masking_schedule: str='cosine'): self.temperatures = None self.timesteps = None def set_timesteps(self, num_inference_steps: int, temperature: Union[int, Tuple[int, int], List[int]]=(2, 0), device: Union[str, torch.device]=None): self.timesteps = torch.arange(num_inference_steps, device=device).flip(0) if isinstance(temperature, (tuple, list)): self.temperatures = torch.linspace(temperature[0], temperature[1], num_inference_steps, device=device) else: self.temperatures = torch.linspace(temperature, 0.01, num_inference_steps, device=device) def step(self, model_output: torch.Tensor, timestep: torch.long, sample: torch.LongTensor, starting_mask_ratio: int=1, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[AmusedSchedulerOutput, Tuple]: two_dim_input = sample.ndim == 3 and model_output.ndim == 4 if two_dim_input: (batch_size, codebook_size, height, width) = model_output.shape sample = sample.reshape(batch_size, height * width) model_output = model_output.reshape(batch_size, codebook_size, height * width).permute(0, 2, 1) unknown_map = sample == self.config.mask_token_id probs = model_output.softmax(dim=-1) device = probs.device probs_ = probs.to(generator.device) if generator is not None else probs if probs_.device.type == 'cpu' and probs_.dtype != torch.float32: probs_ = probs_.float() probs_ = probs_.reshape(-1, probs.size(-1)) pred_original_sample = torch.multinomial(probs_, 1, generator=generator).to(device=device) pred_original_sample = pred_original_sample[:, 0].view(*probs.shape[:-1]) pred_original_sample = torch.where(unknown_map, pred_original_sample, sample) if timestep == 0: prev_sample = pred_original_sample else: seq_len = sample.shape[1] step_idx = (self.timesteps == timestep).nonzero() ratio = (step_idx + 1) / len(self.timesteps) if self.config.masking_schedule == 'cosine': mask_ratio = torch.cos(ratio * math.pi / 2) elif self.config.masking_schedule == 'linear': mask_ratio = 1 - ratio else: raise ValueError(f'unknown masking schedule {self.config.masking_schedule}') mask_ratio = starting_mask_ratio * mask_ratio mask_len = (seq_len * mask_ratio).floor() mask_len = torch.min(unknown_map.sum(dim=-1, keepdim=True) - 1, mask_len) mask_len = torch.max(torch.tensor([1], device=model_output.device), mask_len) selected_probs = torch.gather(probs, -1, pred_original_sample[:, :, None])[:, :, 0] selected_probs = torch.where(unknown_map, selected_probs, torch.finfo(selected_probs.dtype).max) masking = mask_by_random_topk(mask_len, selected_probs, self.temperatures[step_idx], generator) prev_sample = torch.where(masking, self.config.mask_token_id, pred_original_sample) if two_dim_input: prev_sample = prev_sample.reshape(batch_size, height, width) pred_original_sample = pred_original_sample.reshape(batch_size, height, width) if not return_dict: return (prev_sample, pred_original_sample) return AmusedSchedulerOutput(prev_sample, pred_original_sample) def add_noise(self, sample, timesteps, generator=None): step_idx = (self.timesteps == timesteps).nonzero() ratio = (step_idx + 1) / len(self.timesteps) if self.config.masking_schedule == 'cosine': mask_ratio = torch.cos(ratio * math.pi / 2) elif self.config.masking_schedule == 'linear': mask_ratio = 1 - ratio else: raise ValueError(f'unknown masking schedule {self.config.masking_schedule}') mask_indices = torch.rand(sample.shape, device=generator.device if generator is not None else sample.device, generator=generator).to(sample.device) < mask_ratio masked_sample = sample.clone() masked_sample[mask_indices] = self.config.mask_token_id return masked_sample # File: diffusers-main/src/diffusers/schedulers/scheduling_consistency_decoder.py import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) @dataclass class ConsistencyDecoderSchedulerOutput(BaseOutput): prev_sample: torch.Tensor class ConsistencyDecoderScheduler(SchedulerMixin, ConfigMixin): order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1024, sigma_data: float=0.5): betas = betas_for_alpha_bar(num_train_timesteps) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) self.sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod) self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1.0 - alphas_cumprod) sigmas = torch.sqrt(1.0 / alphas_cumprod - 1) sqrt_recip_alphas_cumprod = torch.sqrt(1.0 / alphas_cumprod) self.c_skip = sqrt_recip_alphas_cumprod * sigma_data ** 2 / (sigmas ** 2 + sigma_data ** 2) self.c_out = sigmas * sigma_data / (sigmas ** 2 + sigma_data ** 2) ** 0.5 self.c_in = sqrt_recip_alphas_cumprod / (sigmas ** 2 + sigma_data ** 2) ** 0.5 def set_timesteps(self, num_inference_steps: Optional[int]=None, device: Union[str, torch.device]=None): if num_inference_steps != 2: raise ValueError('Currently more than 2 inference steps are not supported.') self.timesteps = torch.tensor([1008, 512], dtype=torch.long, device=device) self.sqrt_alphas_cumprod = self.sqrt_alphas_cumprod.to(device) self.sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod.to(device) self.c_skip = self.c_skip.to(device) self.c_out = self.c_out.to(device) self.c_in = self.c_in.to(device) @property def init_noise_sigma(self): return self.sqrt_one_minus_alphas_cumprod[self.timesteps[0]] def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample * self.c_in[timestep] def step(self, model_output: torch.Tensor, timestep: Union[float, torch.Tensor], sample: torch.Tensor, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[ConsistencyDecoderSchedulerOutput, Tuple]: x_0 = self.c_out[timestep] * model_output + self.c_skip[timestep] * sample timestep_idx = torch.where(self.timesteps == timestep)[0] if timestep_idx == len(self.timesteps) - 1: prev_sample = x_0 else: noise = randn_tensor(x_0.shape, generator=generator, dtype=x_0.dtype, device=x_0.device) prev_sample = self.sqrt_alphas_cumprod[self.timesteps[timestep_idx + 1]].to(x_0.dtype) * x_0 + self.sqrt_one_minus_alphas_cumprod[self.timesteps[timestep_idx + 1]].to(x_0.dtype) * noise if not return_dict: return (prev_sample,) return ConsistencyDecoderSchedulerOutput(prev_sample=prev_sample) # File: diffusers-main/src/diffusers/schedulers/scheduling_consistency_models.py from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, logging from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin logger = logging.get_logger(__name__) @dataclass class CMStochasticIterativeSchedulerOutput(BaseOutput): prev_sample: torch.Tensor class CMStochasticIterativeScheduler(SchedulerMixin, ConfigMixin): order = 1 @register_to_config def __init__(self, num_train_timesteps: int=40, sigma_min: float=0.002, sigma_max: float=80.0, sigma_data: float=0.5, s_noise: float=1.0, rho: float=7.0, clip_denoised: bool=True): self.init_noise_sigma = sigma_max ramp = np.linspace(0, 1, num_train_timesteps) sigmas = self._convert_to_karras(ramp) timesteps = self.sigma_to_t(sigmas) self.num_inference_steps = None self.sigmas = torch.from_numpy(sigmas) self.timesteps = torch.from_numpy(timesteps) self.custom_timesteps = False self.is_scale_input_called = False self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] sample = sample / (sigma ** 2 + self.config.sigma_data ** 2) ** 0.5 self.is_scale_input_called = True return sample def sigma_to_t(self, sigmas: Union[float, np.ndarray]): if not isinstance(sigmas, np.ndarray): sigmas = np.array(sigmas, dtype=np.float64) timesteps = 1000 * 0.25 * np.log(sigmas + 1e-44) return timesteps def set_timesteps(self, num_inference_steps: Optional[int]=None, device: Union[str, torch.device]=None, timesteps: Optional[List[int]]=None): if num_inference_steps is None and timesteps is None: raise ValueError('Exactly one of `num_inference_steps` or `timesteps` must be supplied.') if num_inference_steps is not None and timesteps is not None: raise ValueError('Can only pass one of `num_inference_steps` or `timesteps`.') if timesteps is not None: for i in range(1, len(timesteps)): if timesteps[i] >= timesteps[i - 1]: raise ValueError('`timesteps` must be in descending order.') if timesteps[0] >= self.config.num_train_timesteps: raise ValueError(f'`timesteps` must start before `self.config.train_timesteps`: {self.config.num_train_timesteps}.') timesteps = np.array(timesteps, dtype=np.int64) self.custom_timesteps = True else: if num_inference_steps > self.config.num_train_timesteps: raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`: {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle maximal {self.config.num_train_timesteps} timesteps.') self.num_inference_steps = num_inference_steps step_ratio = self.config.num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) self.custom_timesteps = False num_train_timesteps = self.config.num_train_timesteps ramp = timesteps[::-1].copy() ramp = ramp / (num_train_timesteps - 1) sigmas = self._convert_to_karras(ramp) timesteps = self.sigma_to_t(sigmas) sigmas = np.concatenate([sigmas, [self.config.sigma_min]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas).to(device=device) if str(device).startswith('mps'): self.timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32) else: self.timesteps = torch.from_numpy(timesteps).to(device=device) self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def _convert_to_karras(self, ramp): sigma_min: float = self.config.sigma_min sigma_max: float = self.config.sigma_max rho = self.config.rho min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def get_scalings(self, sigma): sigma_data = self.config.sigma_data c_skip = sigma_data ** 2 / (sigma ** 2 + sigma_data ** 2) c_out = sigma * sigma_data / (sigma ** 2 + sigma_data ** 2) ** 0.5 return (c_skip, c_out) def get_scalings_for_boundary_condition(self, sigma): sigma_min = self.config.sigma_min sigma_data = self.config.sigma_data c_skip = sigma_data ** 2 / ((sigma - sigma_min) ** 2 + sigma_data ** 2) c_out = (sigma - sigma_min) * sigma_data / (sigma ** 2 + sigma_data ** 2) ** 0.5 return (c_skip, c_out) def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: torch.Tensor, timestep: Union[float, torch.Tensor], sample: torch.Tensor, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[CMStochasticIterativeSchedulerOutput, Tuple]: if isinstance(timestep, (int, torch.IntTensor, torch.LongTensor)): raise ValueError(f'Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to `{self.__class__}.step()` is not supported. Make sure to pass one of the `scheduler.timesteps` as a timestep.') if not self.is_scale_input_called: logger.warning('The `scale_model_input` function should be called before `step` to ensure correct denoising. See `StableDiffusionPipeline` for a usage example.') sigma_min = self.config.sigma_min sigma_max = self.config.sigma_max if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] if self.step_index + 1 < self.config.num_train_timesteps: sigma_next = self.sigmas[self.step_index + 1] else: sigma_next = self.sigmas[-1] (c_skip, c_out) = self.get_scalings_for_boundary_condition(sigma) denoised = c_out * model_output + c_skip * sample if self.config.clip_denoised: denoised = denoised.clamp(-1, 1) if len(self.timesteps) > 1: noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator) else: noise = torch.zeros_like(model_output) z = noise * self.config.s_noise sigma_hat = sigma_next.clamp(min=sigma_min, max=sigma_max) prev_sample = denoised + z * (sigma_hat ** 2 - sigma_min ** 2) ** 0.5 self._step_index += 1 if not return_dict: return (prev_sample,) return CMStochasticIterativeSchedulerOutput(prev_sample=prev_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_cosine_dpmsolver_multistep.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_dpmsolver_sde import BrownianTreeNoiseSampler from .scheduling_utils import SchedulerMixin, SchedulerOutput class CosineDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): _compatibles = [] order = 1 @register_to_config def __init__(self, sigma_min: float=0.3, sigma_max: float=500, sigma_data: float=1.0, sigma_schedule: str='exponential', num_train_timesteps: int=1000, solver_order: int=2, prediction_type: str='v_prediction', rho: float=7.0, solver_type: str='midpoint', lower_order_final: bool=True, euler_at_final: bool=False, final_sigmas_type: Optional[str]='zero'): if solver_type not in ['midpoint', 'heun']: if solver_type in ['logrho', 'bh1', 'bh2']: self.register_to_config(solver_type='midpoint') else: raise NotImplementedError(f'{solver_type} is not implemented for {self.__class__}') ramp = torch.linspace(0, 1, num_train_timesteps) if sigma_schedule == 'karras': sigmas = self._compute_karras_sigmas(ramp) elif sigma_schedule == 'exponential': sigmas = self._compute_exponential_sigmas(ramp) self.timesteps = self.precondition_noise(sigmas) self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) self.num_inference_steps = None self.model_outputs = [None] * solver_order self.lower_order_nums = 0 self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def init_noise_sigma(self): return (self.config.sigma_max ** 2 + 1) ** 0.5 @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def precondition_inputs(self, sample, sigma): c_in = 1 / (sigma ** 2 + self.config.sigma_data ** 2) ** 0.5 scaled_sample = sample * c_in return scaled_sample def precondition_noise(self, sigma): if not isinstance(sigma, torch.Tensor): sigma = torch.tensor([sigma]) return sigma.atan() / math.pi * 2 def precondition_outputs(self, sample, model_output, sigma): sigma_data = self.config.sigma_data c_skip = sigma_data ** 2 / (sigma ** 2 + sigma_data ** 2) if self.config.prediction_type == 'epsilon': c_out = sigma * sigma_data / (sigma ** 2 + sigma_data ** 2) ** 0.5 elif self.config.prediction_type == 'v_prediction': c_out = -sigma * sigma_data / (sigma ** 2 + sigma_data ** 2) ** 0.5 else: raise ValueError(f'Prediction type {self.config.prediction_type} is not supported.') denoised = c_skip * sample + c_out * model_output return denoised def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] sample = self.precondition_inputs(sample, sigma) self.is_scale_input_called = True return sample def set_timesteps(self, num_inference_steps: int=None, device: Union[str, torch.device]=None): self.num_inference_steps = num_inference_steps ramp = torch.linspace(0, 1, self.num_inference_steps) if self.config.sigma_schedule == 'karras': sigmas = self._compute_karras_sigmas(ramp) elif self.config.sigma_schedule == 'exponential': sigmas = self._compute_exponential_sigmas(ramp) sigmas = sigmas.to(dtype=torch.float32, device=device) self.timesteps = self.precondition_noise(sigmas) if self.config.final_sigmas_type == 'sigma_min': sigma_last = self.config.sigma_min elif self.config.final_sigmas_type == 'zero': sigma_last = 0 else: raise ValueError(f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}") self.sigmas = torch.cat([sigmas, torch.tensor([sigma_last], dtype=torch.float32, device=device)]) self.model_outputs = [None] * self.config.solver_order self.lower_order_nums = 0 self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') self.noise_sampler = None def _compute_karras_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: sigma_min = sigma_min or self.config.sigma_min sigma_max = sigma_max or self.config.sigma_max rho = self.config.rho min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def _compute_exponential_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: sigma_min = sigma_min or self.config.sigma_min sigma_max = sigma_max or self.config.sigma_max sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), len(ramp)).exp().flip(0) return sigmas def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = torch.tensor(1) sigma_t = sigma return (alpha_t, sigma_t) def convert_model_output(self, model_output: torch.Tensor, sample: torch.Tensor=None) -> torch.Tensor: sigma = self.sigmas[self.step_index] x0_pred = self.precondition_outputs(sample, model_output, sigma) return x0_pred def dpm_solver_first_order_update(self, model_output: torch.Tensor, sample: torch.Tensor=None, noise: Optional[torch.Tensor]=None) -> torch.Tensor: (sigma_t, sigma_s) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s, sigma_s) = self._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s assert noise is not None x_t = sigma_t / sigma_s * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * model_output + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise return x_t def multistep_dpm_solver_second_order_update(self, model_output_list: List[torch.Tensor], sample: torch.Tensor=None, noise: Optional[torch.Tensor]=None) -> torch.Tensor: (sigma_t, sigma_s0, sigma_s1) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) (alpha_s1, sigma_s1) = self._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) (m0, m1) = (model_output_list[-1], model_output_list[-2]) (h, h_0) = (lambda_t - lambda_s0, lambda_s0 - lambda_s1) r0 = h_0 / h (D0, D1) = (m0, 1.0 / r0 * (m0 - m1)) assert noise is not None if self.config.solver_type == 'midpoint': x_t = sigma_t / sigma_s0 * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise elif self.config.solver_type == 'heun': x_t = sigma_t / sigma_s0 * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * D0 + alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise return x_t def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps index_candidates = (schedule_timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() return step_index def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: torch.Tensor, timestep: Union[int, torch.Tensor], sample: torch.Tensor, generator=None, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") if self.step_index is None: self._init_step_index(timestep) lower_order_final = self.step_index == len(self.timesteps) - 1 and (self.config.euler_at_final or (self.config.lower_order_final and len(self.timesteps) < 15) or self.config.final_sigmas_type == 'zero') lower_order_second = self.step_index == len(self.timesteps) - 2 and self.config.lower_order_final and (len(self.timesteps) < 15) model_output = self.convert_model_output(model_output, sample=sample) for i in range(self.config.solver_order - 1): self.model_outputs[i] = self.model_outputs[i + 1] self.model_outputs[-1] = model_output if self.noise_sampler is None: seed = None if generator is not None: seed = [g.initial_seed() for g in generator] if isinstance(generator, list) else generator.initial_seed() self.noise_sampler = BrownianTreeNoiseSampler(model_output, sigma_min=self.config.sigma_min, sigma_max=self.config.sigma_max, seed=seed) noise = self.noise_sampler(self.sigmas[self.step_index], self.sigmas[self.step_index + 1]).to(model_output.device) if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) if self.lower_order_nums < self.config.solver_order: self.lower_order_nums += 1 self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_ddim.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin @dataclass class DDIMSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(betas): alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() alphas_bar_sqrt -= alphas_bar_sqrt_T alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class DDIMScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, clip_sample: bool=True, set_alpha_to_one: bool=True, steps_offset: int=0, prediction_type: str='epsilon', thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, clip_sample_range: float=1.0, sample_max_value: float=1.0, timestep_spacing: str='leading', rescale_betas_zero_snr: bool=False): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] self.init_noise_sigma = 1.0 self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def _get_variance(self, timestep, prev_timestep): alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = beta_prod_t_prev / beta_prod_t * (1 - alpha_prod_t / alpha_prod_t_prev) return variance def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: dtype = sample.dtype (batch_size, channels, *remaining_dims) = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp(s, min=1, max=self.config.sample_max_value) s = s.unsqueeze(1) sample = torch.clamp(sample, -s, s) / s sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): if num_inference_steps > self.config.num_train_timesteps: raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`: {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle maximal {self.config.num_train_timesteps} timesteps.') self.num_inference_steps = num_inference_steps if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps).round()[::-1].copy().astype(np.int64) elif self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / self.num_inference_steps timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'.") self.timesteps = torch.from_numpy(timesteps).to(device) def step(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, eta: float=0.0, use_clipped_model_output: bool=False, generator=None, variance_noise: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[DDIMSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 pred_epsilon = model_output elif self.config.prediction_type == 'sample': pred_original_sample = model_output pred_epsilon = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output pred_epsilon = alpha_prod_t ** 0.5 * model_output + beta_prod_t ** 0.5 * sample else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction`') if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp(-self.config.clip_sample_range, self.config.clip_sample_range) variance = self._get_variance(timestep, prev_timestep) std_dev_t = eta * variance ** 0.5 if use_clipped_model_output: pred_epsilon = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t ** 2) ** 0.5 * pred_epsilon prev_sample = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: if variance_noise is not None and generator is not None: raise ValueError('Cannot pass both generator and variance_noise. Please make sure that either `generator` or `variance_noise` stays `None`.') if variance_noise is None: variance_noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype) variance = std_dev_t * variance_noise prev_sample = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) timesteps = timesteps.to(sample.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_ddim_cogvideox.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin @dataclass class DDIMSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(alphas_cumprod): alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() alphas_bar_sqrt -= alphas_bar_sqrt_T alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) alphas_bar = alphas_bar_sqrt ** 2 return alphas_bar class CogVideoXDDIMScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.00085, beta_end: float=0.012, beta_schedule: str='scaled_linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, clip_sample: bool=True, set_alpha_to_one: bool=True, steps_offset: int=0, prediction_type: str='epsilon', clip_sample_range: float=1.0, sample_max_value: float=1.0, timestep_spacing: str='leading', rescale_betas_zero_snr: bool=False, snr_shift_scale: float=3.0): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float64) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.alphas_cumprod = self.alphas_cumprod / (snr_shift_scale + (1 - snr_shift_scale) * self.alphas_cumprod) if rescale_betas_zero_snr: self.alphas_cumprod = rescale_zero_terminal_snr(self.alphas_cumprod) self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] self.init_noise_sigma = 1.0 self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) def _get_variance(self, timestep, prev_timestep): alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = beta_prod_t_prev / beta_prod_t * (1 - alpha_prod_t / alpha_prod_t_prev) return variance def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): if num_inference_steps > self.config.num_train_timesteps: raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`: {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle maximal {self.config.num_train_timesteps} timesteps.') self.num_inference_steps = num_inference_steps if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps).round()[::-1].copy().astype(np.int64) elif self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / self.num_inference_steps timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'.") self.timesteps = torch.from_numpy(timesteps).to(device) def step(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, eta: float=0.0, use_clipped_model_output: bool=False, generator=None, variance_noise: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[DDIMSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == 'sample': pred_original_sample = model_output elif self.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction`') a_t = ((1 - alpha_prod_t_prev) / (1 - alpha_prod_t)) ** 0.5 b_t = alpha_prod_t_prev ** 0.5 - alpha_prod_t ** 0.5 * a_t prev_sample = a_t * sample + b_t * pred_original_sample if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) timesteps = timesteps.to(sample.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_ddim_flax.py from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common @flax.struct.dataclass class DDIMSchedulerState: common: CommonSchedulerState final_alpha_cumprod: jnp.ndarray init_noise_sigma: jnp.ndarray timesteps: jnp.ndarray num_inference_steps: Optional[int] = None @classmethod def create(cls, common: CommonSchedulerState, final_alpha_cumprod: jnp.ndarray, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray): return cls(common=common, final_alpha_cumprod=final_alpha_cumprod, init_noise_sigma=init_noise_sigma, timesteps=timesteps) @dataclass class FlaxDDIMSchedulerOutput(FlaxSchedulerOutput): state: DDIMSchedulerState class FlaxDDIMScheduler(FlaxSchedulerMixin, ConfigMixin): _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] dtype: jnp.dtype @property def has_state(self): return True @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[jnp.ndarray]=None, clip_sample: bool=True, clip_sample_range: float=1.0, set_alpha_to_one: bool=True, steps_offset: int=0, prediction_type: str='epsilon', dtype: jnp.dtype=jnp.float32): self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState]=None) -> DDIMSchedulerState: if common is None: common = CommonSchedulerState.create(self) final_alpha_cumprod = jnp.array(1.0, dtype=self.dtype) if self.config.set_alpha_to_one else common.alphas_cumprod[0] init_noise_sigma = jnp.array(1.0, dtype=self.dtype) timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] return DDIMSchedulerState.create(common=common, final_alpha_cumprod=final_alpha_cumprod, init_noise_sigma=init_noise_sigma, timesteps=timesteps) def scale_model_input(self, state: DDIMSchedulerState, sample: jnp.ndarray, timestep: Optional[int]=None) -> jnp.ndarray: return sample def set_timesteps(self, state: DDIMSchedulerState, num_inference_steps: int, shape: Tuple=()) -> DDIMSchedulerState: step_ratio = self.config.num_train_timesteps // num_inference_steps timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1] + self.config.steps_offset return state.replace(num_inference_steps=num_inference_steps, timesteps=timesteps) def _get_variance(self, state: DDIMSchedulerState, timestep, prev_timestep): alpha_prod_t = state.common.alphas_cumprod[timestep] alpha_prod_t_prev = jnp.where(prev_timestep >= 0, state.common.alphas_cumprod[prev_timestep], state.final_alpha_cumprod) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = beta_prod_t_prev / beta_prod_t * (1 - alpha_prod_t / alpha_prod_t_prev) return variance def step(self, state: DDIMSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, eta: float=0.0, return_dict: bool=True) -> Union[FlaxDDIMSchedulerOutput, Tuple]: if state.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps alphas_cumprod = state.common.alphas_cumprod final_alpha_cumprod = state.final_alpha_cumprod alpha_prod_t = alphas_cumprod[timestep] alpha_prod_t_prev = jnp.where(prev_timestep >= 0, alphas_cumprod[prev_timestep], final_alpha_cumprod) beta_prod_t = 1 - alpha_prod_t if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 pred_epsilon = model_output elif self.config.prediction_type == 'sample': pred_original_sample = model_output pred_epsilon = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output pred_epsilon = alpha_prod_t ** 0.5 * model_output + beta_prod_t ** 0.5 * sample else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction`') if self.config.clip_sample: pred_original_sample = pred_original_sample.clip(-self.config.clip_sample_range, self.config.clip_sample_range) variance = self._get_variance(state, timestep, prev_timestep) std_dev_t = eta * variance ** 0.5 pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t ** 2) ** 0.5 * pred_epsilon prev_sample = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, state) return FlaxDDIMSchedulerOutput(prev_sample=prev_sample, state=state) def add_noise(self, state: DDIMSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray) -> jnp.ndarray: return add_noise_common(state.common, original_samples, noise, timesteps) def get_velocity(self, state: DDIMSchedulerState, sample: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray) -> jnp.ndarray: return get_velocity_common(state.common, sample, noise, timesteps) def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_ddim_inverse.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass class DDIMSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(betas): alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() alphas_bar_sqrt -= alphas_bar_sqrt_T alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class DDIMInverseScheduler(SchedulerMixin, ConfigMixin): order = 1 ignore_for_config = ['kwargs'] _deprecated_kwargs = ['set_alpha_to_zero'] @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, clip_sample: bool=True, set_alpha_to_one: bool=True, steps_offset: int=0, prediction_type: str='epsilon', clip_sample_range: float=1.0, timestep_spacing: str='leading', rescale_betas_zero_snr: bool=False, **kwargs): if kwargs.get('set_alpha_to_zero', None) is not None: deprecation_message = 'The `set_alpha_to_zero` argument is deprecated. Please use `set_alpha_to_one` instead.' deprecate('set_alpha_to_zero', '1.0.0', deprecation_message, standard_warn=False) set_alpha_to_one = kwargs['set_alpha_to_zero'] if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.initial_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] self.init_noise_sigma = 1.0 self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps).copy().astype(np.int64)) def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): if num_inference_steps > self.config.num_train_timesteps: raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`: {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle maximal {self.config.num_train_timesteps} timesteps.') self.num_inference_steps = num_inference_steps if self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round().copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / self.num_inference_steps timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)[::-1]).astype(np.int64) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'.") self.timesteps = torch.from_numpy(timesteps).to(device) def step(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, return_dict: bool=True) -> Union[DDIMSchedulerOutput, Tuple]: prev_timestep = timestep timestep = min(timestep - self.config.num_train_timesteps // self.num_inference_steps, self.config.num_train_timesteps - 1) alpha_prod_t = self.alphas_cumprod[timestep] if timestep >= 0 else self.initial_alpha_cumprod alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] beta_prod_t = 1 - alpha_prod_t if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 pred_epsilon = model_output elif self.config.prediction_type == 'sample': pred_original_sample = model_output pred_epsilon = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output pred_epsilon = alpha_prod_t ** 0.5 * model_output + beta_prod_t ** 0.5 * sample else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction`') if self.config.clip_sample: pred_original_sample = pred_original_sample.clamp(-self.config.clip_sample_range, self.config.clip_sample_range) pred_sample_direction = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon prev_sample = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_ddim_parallel.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin @dataclass class DDIMParallelSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(betas): alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() alphas_bar_sqrt -= alphas_bar_sqrt_T alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class DDIMParallelScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 _is_ode_scheduler = True @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, clip_sample: bool=True, set_alpha_to_one: bool=True, steps_offset: int=0, prediction_type: str='epsilon', thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, clip_sample_range: float=1.0, sample_max_value: float=1.0, timestep_spacing: str='leading', rescale_betas_zero_snr: bool=False): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] self.init_noise_sigma = 1.0 self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def _get_variance(self, timestep, prev_timestep=None): if prev_timestep is None: prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = beta_prod_t_prev / beta_prod_t * (1 - alpha_prod_t / alpha_prod_t_prev) return variance def _batch_get_variance(self, t, prev_t): alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)] alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = beta_prod_t_prev / beta_prod_t * (1 - alpha_prod_t / alpha_prod_t_prev) return variance def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: dtype = sample.dtype (batch_size, channels, *remaining_dims) = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp(s, min=1, max=self.config.sample_max_value) s = s.unsqueeze(1) sample = torch.clamp(sample, -s, s) / s sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): if num_inference_steps > self.config.num_train_timesteps: raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`: {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle maximal {self.config.num_train_timesteps} timesteps.') self.num_inference_steps = num_inference_steps if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps).round()[::-1].copy().astype(np.int64) elif self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / self.num_inference_steps timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'.") self.timesteps = torch.from_numpy(timesteps).to(device) def step(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, eta: float=0.0, use_clipped_model_output: bool=False, generator=None, variance_noise: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[DDIMParallelSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 pred_epsilon = model_output elif self.config.prediction_type == 'sample': pred_original_sample = model_output pred_epsilon = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output pred_epsilon = alpha_prod_t ** 0.5 * model_output + beta_prod_t ** 0.5 * sample else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction`') if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp(-self.config.clip_sample_range, self.config.clip_sample_range) variance = self._get_variance(timestep, prev_timestep) std_dev_t = eta * variance ** 0.5 if use_clipped_model_output: pred_epsilon = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t ** 2) ** 0.5 * pred_epsilon prev_sample = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: if variance_noise is not None and generator is not None: raise ValueError('Cannot pass both generator and variance_noise. Please make sure that either `generator` or `variance_noise` stays `None`.') if variance_noise is None: variance_noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype) variance = std_dev_t * variance_noise prev_sample = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMParallelSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def batch_step_no_noise(self, model_output: torch.Tensor, timesteps: List[int], sample: torch.Tensor, eta: float=0.0, use_clipped_model_output: bool=False) -> torch.Tensor: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") assert eta == 0.0 t = timesteps prev_t = t - self.config.num_train_timesteps // self.num_inference_steps t = t.view(-1, *[1] * (model_output.ndim - 1)) prev_t = prev_t.view(-1, *[1] * (model_output.ndim - 1)) self.alphas_cumprod = self.alphas_cumprod.to(model_output.device) self.final_alpha_cumprod = self.final_alpha_cumprod.to(model_output.device) alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)] alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0) beta_prod_t = 1 - alpha_prod_t if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 pred_epsilon = model_output elif self.config.prediction_type == 'sample': pred_original_sample = model_output pred_epsilon = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output pred_epsilon = alpha_prod_t ** 0.5 * model_output + beta_prod_t ** 0.5 * sample else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction`') if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp(-self.config.clip_sample_range, self.config.clip_sample_range) variance = self._batch_get_variance(t, prev_t).to(model_output.device).view(*alpha_prod_t_prev.shape) std_dev_t = eta * variance ** 0.5 if use_clipped_model_output: pred_epsilon = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t ** 2) ** 0.5 * pred_epsilon prev_sample = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction return prev_sample def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) timesteps = timesteps.to(sample.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_ddpm.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin @dataclass class DDPMSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(betas): alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() alphas_bar_sqrt -= alphas_bar_sqrt_T alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class DDPMScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, variance_type: str='fixed_small', clip_sample: bool=True, prediction_type: str='epsilon', thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, clip_sample_range: float=1.0, sample_max_value: float=1.0, timestep_spacing: str='leading', steps_offset: int=0, rescale_betas_zero_snr: bool=False): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) elif beta_schedule == 'sigmoid': betas = torch.linspace(-6, 6, num_train_timesteps) self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.one = torch.tensor(1.0) self.init_noise_sigma = 1.0 self.custom_timesteps = False self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) self.variance_type = variance_type def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def set_timesteps(self, num_inference_steps: Optional[int]=None, device: Union[str, torch.device]=None, timesteps: Optional[List[int]]=None): if num_inference_steps is not None and timesteps is not None: raise ValueError('Can only pass one of `num_inference_steps` or `custom_timesteps`.') if timesteps is not None: for i in range(1, len(timesteps)): if timesteps[i] >= timesteps[i - 1]: raise ValueError('`custom_timesteps` must be in descending order.') if timesteps[0] >= self.config.num_train_timesteps: raise ValueError(f'`timesteps` must start before `self.config.train_timesteps`: {self.config.num_train_timesteps}.') timesteps = np.array(timesteps, dtype=np.int64) self.custom_timesteps = True else: if num_inference_steps > self.config.num_train_timesteps: raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`: {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle maximal {self.config.num_train_timesteps} timesteps.') self.num_inference_steps = num_inference_steps self.custom_timesteps = False if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps).round()[::-1].copy().astype(np.int64) elif self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / self.num_inference_steps timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") self.timesteps = torch.from_numpy(timesteps).to(device) def _get_variance(self, t, predicted_variance=None, variance_type=None): prev_t = self.previous_timestep(t) alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one current_beta_t = 1 - alpha_prod_t / alpha_prod_t_prev variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * current_beta_t variance = torch.clamp(variance, min=1e-20) if variance_type is None: variance_type = self.config.variance_type if variance_type == 'fixed_small': variance = variance elif variance_type == 'fixed_small_log': variance = torch.log(variance) variance = torch.exp(0.5 * variance) elif variance_type == 'fixed_large': variance = current_beta_t elif variance_type == 'fixed_large_log': variance = torch.log(current_beta_t) elif variance_type == 'learned': return predicted_variance elif variance_type == 'learned_range': min_log = torch.log(variance) max_log = torch.log(current_beta_t) frac = (predicted_variance + 1) / 2 variance = frac * max_log + (1 - frac) * min_log return variance def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: dtype = sample.dtype (batch_size, channels, *remaining_dims) = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp(s, min=1, max=self.config.sample_max_value) s = s.unsqueeze(1) sample = torch.clamp(sample, -s, s) / s sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def step(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, generator=None, return_dict: bool=True) -> Union[DDPMSchedulerOutput, Tuple]: t = timestep prev_t = self.previous_timestep(t) if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ['learned', 'learned_range']: (model_output, predicted_variance) = torch.split(model_output, sample.shape[1], dim=1) else: predicted_variance = None alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev current_alpha_t = alpha_prod_t / alpha_prod_t_prev current_beta_t = 1 - current_alpha_t if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == 'sample': pred_original_sample = model_output elif self.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or `v_prediction` for the DDPMScheduler.') if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp(-self.config.clip_sample_range, self.config.clip_sample_range) pred_original_sample_coeff = alpha_prod_t_prev ** 0.5 * current_beta_t / beta_prod_t current_sample_coeff = current_alpha_t ** 0.5 * beta_prod_t_prev / beta_prod_t pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample variance = 0 if t > 0: device = model_output.device variance_noise = randn_tensor(model_output.shape, generator=generator, device=device, dtype=model_output.dtype) if self.variance_type == 'fixed_small_log': variance = self._get_variance(t, predicted_variance=predicted_variance) * variance_noise elif self.variance_type == 'learned_range': variance = self._get_variance(t, predicted_variance=predicted_variance) variance = torch.exp(0.5 * variance) * variance_noise else: variance = self._get_variance(t, predicted_variance=predicted_variance) ** 0.5 * variance_noise pred_prev_sample = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) timesteps = timesteps.to(sample.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps def previous_timestep(self, timestep): if self.custom_timesteps: index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] if index == self.timesteps.shape[0] - 1: prev_t = torch.tensor(-1) else: prev_t = self.timesteps[index + 1] else: num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps prev_t = timestep - self.config.num_train_timesteps // num_inference_steps return prev_t # File: diffusers-main/src/diffusers/schedulers/scheduling_ddpm_flax.py from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common @flax.struct.dataclass class DDPMSchedulerState: common: CommonSchedulerState init_noise_sigma: jnp.ndarray timesteps: jnp.ndarray num_inference_steps: Optional[int] = None @classmethod def create(cls, common: CommonSchedulerState, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray): return cls(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps) @dataclass class FlaxDDPMSchedulerOutput(FlaxSchedulerOutput): state: DDPMSchedulerState class FlaxDDPMScheduler(FlaxSchedulerMixin, ConfigMixin): _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] dtype: jnp.dtype @property def has_state(self): return True @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[jnp.ndarray]=None, variance_type: str='fixed_small', clip_sample: bool=True, prediction_type: str='epsilon', dtype: jnp.dtype=jnp.float32): self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState]=None) -> DDPMSchedulerState: if common is None: common = CommonSchedulerState.create(self) init_noise_sigma = jnp.array(1.0, dtype=self.dtype) timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] return DDPMSchedulerState.create(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps) def scale_model_input(self, state: DDPMSchedulerState, sample: jnp.ndarray, timestep: Optional[int]=None) -> jnp.ndarray: return sample def set_timesteps(self, state: DDPMSchedulerState, num_inference_steps: int, shape: Tuple=()) -> DDPMSchedulerState: step_ratio = self.config.num_train_timesteps // num_inference_steps timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1] return state.replace(num_inference_steps=num_inference_steps, timesteps=timesteps) def _get_variance(self, state: DDPMSchedulerState, t, predicted_variance=None, variance_type=None): alpha_prod_t = state.common.alphas_cumprod[t] alpha_prod_t_prev = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype)) variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: variance_type = self.config.variance_type if variance_type == 'fixed_small': variance = jnp.clip(variance, a_min=1e-20) elif variance_type == 'fixed_small_log': variance = jnp.log(jnp.clip(variance, a_min=1e-20)) elif variance_type == 'fixed_large': variance = state.common.betas[t] elif variance_type == 'fixed_large_log': variance = jnp.log(state.common.betas[t]) elif variance_type == 'learned': return predicted_variance elif variance_type == 'learned_range': min_log = variance max_log = state.common.betas[t] frac = (predicted_variance + 1) / 2 variance = frac * max_log + (1 - frac) * min_log return variance def step(self, state: DDPMSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, key: Optional[jax.Array]=None, return_dict: bool=True) -> Union[FlaxDDPMSchedulerOutput, Tuple]: t = timestep if key is None: key = jax.random.key(0) if len(model_output.shape) > 1 and model_output.shape[1] == sample.shape[1] * 2 and (self.config.variance_type in ['learned', 'learned_range']): (model_output, predicted_variance) = jnp.split(model_output, sample.shape[1], axis=1) else: predicted_variance = None alpha_prod_t = state.common.alphas_cumprod[t] alpha_prod_t_prev = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype)) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == 'sample': pred_original_sample = model_output elif self.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` for the FlaxDDPMScheduler.') if self.config.clip_sample: pred_original_sample = jnp.clip(pred_original_sample, -1, 1) pred_original_sample_coeff = alpha_prod_t_prev ** 0.5 * state.common.betas[t] / beta_prod_t current_sample_coeff = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample def random_variance(): split_key = jax.random.split(key, num=1)[0] noise = jax.random.normal(split_key, shape=model_output.shape, dtype=self.dtype) return self._get_variance(state, t, predicted_variance=predicted_variance) ** 0.5 * noise variance = jnp.where(t > 0, random_variance(), jnp.zeros(model_output.shape, dtype=self.dtype)) pred_prev_sample = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=pred_prev_sample, state=state) def add_noise(self, state: DDPMSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray) -> jnp.ndarray: return add_noise_common(state.common, original_samples, noise, timesteps) def get_velocity(self, state: DDPMSchedulerState, sample: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray) -> jnp.ndarray: return get_velocity_common(state.common, sample, noise, timesteps) def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_ddpm_parallel.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin @dataclass class DDPMParallelSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(betas): alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() alphas_bar_sqrt -= alphas_bar_sqrt_T alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class DDPMParallelScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 _is_ode_scheduler = False @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, variance_type: str='fixed_small', clip_sample: bool=True, prediction_type: str='epsilon', thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, clip_sample_range: float=1.0, sample_max_value: float=1.0, timestep_spacing: str='leading', steps_offset: int=0, rescale_betas_zero_snr: bool=False): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) elif beta_schedule == 'sigmoid': betas = torch.linspace(-6, 6, num_train_timesteps) self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.one = torch.tensor(1.0) self.init_noise_sigma = 1.0 self.custom_timesteps = False self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) self.variance_type = variance_type def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def set_timesteps(self, num_inference_steps: Optional[int]=None, device: Union[str, torch.device]=None, timesteps: Optional[List[int]]=None): if num_inference_steps is not None and timesteps is not None: raise ValueError('Can only pass one of `num_inference_steps` or `custom_timesteps`.') if timesteps is not None: for i in range(1, len(timesteps)): if timesteps[i] >= timesteps[i - 1]: raise ValueError('`custom_timesteps` must be in descending order.') if timesteps[0] >= self.config.num_train_timesteps: raise ValueError(f'`timesteps` must start before `self.config.train_timesteps`: {self.config.num_train_timesteps}.') timesteps = np.array(timesteps, dtype=np.int64) self.custom_timesteps = True else: if num_inference_steps > self.config.num_train_timesteps: raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`: {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle maximal {self.config.num_train_timesteps} timesteps.') self.num_inference_steps = num_inference_steps self.custom_timesteps = False if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps).round()[::-1].copy().astype(np.int64) elif self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / self.num_inference_steps timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") self.timesteps = torch.from_numpy(timesteps).to(device) def _get_variance(self, t, predicted_variance=None, variance_type=None): prev_t = self.previous_timestep(t) alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one current_beta_t = 1 - alpha_prod_t / alpha_prod_t_prev variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * current_beta_t variance = torch.clamp(variance, min=1e-20) if variance_type is None: variance_type = self.config.variance_type if variance_type == 'fixed_small': variance = variance elif variance_type == 'fixed_small_log': variance = torch.log(variance) variance = torch.exp(0.5 * variance) elif variance_type == 'fixed_large': variance = current_beta_t elif variance_type == 'fixed_large_log': variance = torch.log(current_beta_t) elif variance_type == 'learned': return predicted_variance elif variance_type == 'learned_range': min_log = torch.log(variance) max_log = torch.log(current_beta_t) frac = (predicted_variance + 1) / 2 variance = frac * max_log + (1 - frac) * min_log return variance def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: dtype = sample.dtype (batch_size, channels, *remaining_dims) = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp(s, min=1, max=self.config.sample_max_value) s = s.unsqueeze(1) sample = torch.clamp(sample, -s, s) / s sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def step(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, generator=None, return_dict: bool=True) -> Union[DDPMParallelSchedulerOutput, Tuple]: t = timestep prev_t = self.previous_timestep(t) if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ['learned', 'learned_range']: (model_output, predicted_variance) = torch.split(model_output, sample.shape[1], dim=1) else: predicted_variance = None alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev current_alpha_t = alpha_prod_t / alpha_prod_t_prev current_beta_t = 1 - current_alpha_t if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == 'sample': pred_original_sample = model_output elif self.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or `v_prediction` for the DDPMScheduler.') if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp(-self.config.clip_sample_range, self.config.clip_sample_range) pred_original_sample_coeff = alpha_prod_t_prev ** 0.5 * current_beta_t / beta_prod_t current_sample_coeff = current_alpha_t ** 0.5 * beta_prod_t_prev / beta_prod_t pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample variance = 0 if t > 0: device = model_output.device variance_noise = randn_tensor(model_output.shape, generator=generator, device=device, dtype=model_output.dtype) if self.variance_type == 'fixed_small_log': variance = self._get_variance(t, predicted_variance=predicted_variance) * variance_noise elif self.variance_type == 'learned_range': variance = self._get_variance(t, predicted_variance=predicted_variance) variance = torch.exp(0.5 * variance) * variance_noise else: variance = self._get_variance(t, predicted_variance=predicted_variance) ** 0.5 * variance_noise pred_prev_sample = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMParallelSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) def batch_step_no_noise(self, model_output: torch.Tensor, timesteps: List[int], sample: torch.Tensor) -> torch.Tensor: t = timesteps num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps prev_t = t - self.config.num_train_timesteps // num_inference_steps t = t.view(-1, *[1] * (model_output.ndim - 1)) prev_t = prev_t.view(-1, *[1] * (model_output.ndim - 1)) if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ['learned', 'learned_range']: (model_output, predicted_variance) = torch.split(model_output, sample.shape[1], dim=1) else: pass self.alphas_cumprod = self.alphas_cumprod.to(model_output.device) alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)] alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev current_alpha_t = alpha_prod_t / alpha_prod_t_prev current_beta_t = 1 - current_alpha_t if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == 'sample': pred_original_sample = model_output elif self.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or `v_prediction` for the DDPMParallelScheduler.') if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp(-self.config.clip_sample_range, self.config.clip_sample_range) pred_original_sample_coeff = alpha_prod_t_prev ** 0.5 * current_beta_t / beta_prod_t current_sample_coeff = current_alpha_t ** 0.5 * beta_prod_t_prev / beta_prod_t pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample return pred_prev_sample def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) timesteps = timesteps.to(sample.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps def previous_timestep(self, timestep): if self.custom_timesteps: index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] if index == self.timesteps.shape[0] - 1: prev_t = torch.tensor(-1) else: prev_t = self.timesteps[index + 1] else: num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps prev_t = timestep - self.config.num_train_timesteps // num_inference_steps return prev_t # File: diffusers-main/src/diffusers/schedulers/scheduling_ddpm_wuerstchen.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class DDPMWuerstchenSchedulerOutput(BaseOutput): prev_sample: torch.Tensor def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class DDPMWuerstchenScheduler(SchedulerMixin, ConfigMixin): @register_to_config def __init__(self, scaler: float=1.0, s: float=0.008): self.scaler = scaler self.s = torch.tensor([s]) self._init_alpha_cumprod = torch.cos(self.s / (1 + self.s) * torch.pi * 0.5) ** 2 self.init_noise_sigma = 1.0 def _alpha_cumprod(self, t, device): if self.scaler > 1: t = 1 - (1 - t) ** self.scaler elif self.scaler < 1: t = t ** self.scaler alpha_cumprod = torch.cos((t + self.s.to(device)) / (1 + self.s.to(device)) * torch.pi * 0.5) ** 2 / self._init_alpha_cumprod.to(device) return alpha_cumprod.clamp(0.0001, 0.9999) def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def set_timesteps(self, num_inference_steps: int=None, timesteps: Optional[List[int]]=None, device: Union[str, torch.device]=None): if timesteps is None: timesteps = torch.linspace(1.0, 0.0, num_inference_steps + 1, device=device) if not isinstance(timesteps, torch.Tensor): timesteps = torch.Tensor(timesteps).to(device) self.timesteps = timesteps def step(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, generator=None, return_dict: bool=True) -> Union[DDPMWuerstchenSchedulerOutput, Tuple]: dtype = model_output.dtype device = model_output.device t = timestep prev_t = self.previous_timestep(t) alpha_cumprod = self._alpha_cumprod(t, device).view(t.size(0), *[1 for _ in sample.shape[1:]]) alpha_cumprod_prev = self._alpha_cumprod(prev_t, device).view(prev_t.size(0), *[1 for _ in sample.shape[1:]]) alpha = alpha_cumprod / alpha_cumprod_prev mu = (1.0 / alpha).sqrt() * (sample - (1 - alpha) * model_output / (1 - alpha_cumprod).sqrt()) std_noise = randn_tensor(mu.shape, generator=generator, device=model_output.device, dtype=model_output.dtype) std = ((1 - alpha) * (1.0 - alpha_cumprod_prev) / (1.0 - alpha_cumprod)).sqrt() * std_noise pred = mu + std * (prev_t != 0).float().view(prev_t.size(0), *[1 for _ in sample.shape[1:]]) if not return_dict: return (pred.to(dtype),) return DDPMWuerstchenSchedulerOutput(prev_sample=pred.to(dtype)) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: device = original_samples.device dtype = original_samples.dtype alpha_cumprod = self._alpha_cumprod(timesteps, device=device).view(timesteps.size(0), *[1 for _ in original_samples.shape[1:]]) noisy_samples = alpha_cumprod.sqrt() * original_samples + (1 - alpha_cumprod).sqrt() * noise return noisy_samples.to(dtype=dtype) def __len__(self): return self.config.num_train_timesteps def previous_timestep(self, timestep): index = (self.timesteps - timestep[0]).abs().argmin().item() prev_t = self.timesteps[index + 1][None].expand(timestep.shape[0]) return prev_t # File: diffusers-main/src/diffusers/schedulers/scheduling_deis_multistep.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import deprecate from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class DEISMultistepScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[np.ndarray]=None, solver_order: int=2, prediction_type: str='epsilon', thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, sample_max_value: float=1.0, algorithm_type: str='deis', solver_type: str='logrho', lower_order_final: bool=True, use_karras_sigmas: Optional[bool]=False, timestep_spacing: str='linspace', steps_offset: int=0): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.alpha_t = torch.sqrt(self.alphas_cumprod) self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 self.init_noise_sigma = 1.0 if algorithm_type not in ['deis']: if algorithm_type in ['dpmsolver', 'dpmsolver++']: self.register_to_config(algorithm_type='deis') else: raise NotImplementedError(f'{algorithm_type} is not implemented for {self.__class__}') if solver_type not in ['logrho']: if solver_type in ['midpoint', 'heun', 'bh1', 'bh2']: self.register_to_config(solver_type='logrho') else: raise NotImplementedError(f'solver type {solver_type} is not implemented for {self.__class__}') self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() self.timesteps = torch.from_numpy(timesteps) self.model_outputs = [None] * solver_order self.lower_order_nums = 0 self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps + 1).round()[::-1][:-1].copy().astype(np.int64) elif self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // (num_inference_steps + 1) timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / num_inference_steps timesteps = np.arange(self.config.num_train_timesteps, 0, -step_ratio).round().copy().astype(np.int64) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) if self.config.use_karras_sigmas: log_sigmas = np.log(sigmas) sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) else: sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas) self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.num_inference_steps = len(timesteps) self.model_outputs = [None] * self.config.solver_order self.lower_order_nums = 0 self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: dtype = sample.dtype (batch_size, channels, *remaining_dims) = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp(s, min=1, max=self.config.sample_max_value) s = s.unsqueeze(1) sample = torch.clamp(sample, -s, s) / s sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = 1 / (sigma ** 2 + 1) ** 0.5 sigma_t = sigma * alpha_t return (alpha_t, sigma_t) def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: if hasattr(self.config, 'sigma_min'): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, 'sigma_max'): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def convert_model_output(self, model_output: torch.Tensor, *args, sample: torch.Tensor=None, **kwargs) -> torch.Tensor: timestep = args[0] if len(args) > 0 else kwargs.pop('timestep', None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError('missing `sample` as a required keyward argument') if timestep is not None: deprecate('timesteps', '1.0.0', 'Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) if self.config.prediction_type == 'epsilon': x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == 'sample': x0_pred = model_output elif self.config.prediction_type == 'v_prediction': x0_pred = alpha_t * sample - sigma_t * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction` for the DEISMultistepScheduler.') if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) if self.config.algorithm_type == 'deis': return (sample - alpha_t * x0_pred) / sigma_t else: raise NotImplementedError('only support log-rho multistep deis now') def deis_first_order_update(self, model_output: torch.Tensor, *args, sample: torch.Tensor=None, **kwargs) -> torch.Tensor: timestep = args[0] if len(args) > 0 else kwargs.pop('timestep', None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(' missing `sample` as a required keyward argument') if timestep is not None: deprecate('timesteps', '1.0.0', 'Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') (sigma_t, sigma_s) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s, sigma_s) = self._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s if self.config.algorithm_type == 'deis': x_t = alpha_t / alpha_s * sample - sigma_t * (torch.exp(h) - 1.0) * model_output else: raise NotImplementedError('only support log-rho multistep deis now') return x_t def multistep_deis_second_order_update(self, model_output_list: List[torch.Tensor], *args, sample: torch.Tensor=None, **kwargs) -> torch.Tensor: timestep_list = args[0] if len(args) > 0 else kwargs.pop('timestep_list', None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(' missing `sample` as a required keyward argument') if timestep_list is not None: deprecate('timestep_list', '1.0.0', 'Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') (sigma_t, sigma_s0, sigma_s1) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) (alpha_s1, sigma_s1) = self._sigma_to_alpha_sigma_t(sigma_s1) (m0, m1) = (model_output_list[-1], model_output_list[-2]) (rho_t, rho_s0, rho_s1) = (sigma_t / alpha_t, sigma_s0 / alpha_s0, sigma_s1 / alpha_s1) if self.config.algorithm_type == 'deis': def ind_fn(t, b, c): return t * (-np.log(c) + np.log(t) - 1) / (np.log(b) - np.log(c)) coef1 = ind_fn(rho_t, rho_s0, rho_s1) - ind_fn(rho_s0, rho_s0, rho_s1) coef2 = ind_fn(rho_t, rho_s1, rho_s0) - ind_fn(rho_s0, rho_s1, rho_s0) x_t = alpha_t * (sample / alpha_s0 + coef1 * m0 + coef2 * m1) return x_t else: raise NotImplementedError('only support log-rho multistep deis now') def multistep_deis_third_order_update(self, model_output_list: List[torch.Tensor], *args, sample: torch.Tensor=None, **kwargs) -> torch.Tensor: timestep_list = args[0] if len(args) > 0 else kwargs.pop('timestep_list', None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(' missing`sample` as a required keyward argument') if timestep_list is not None: deprecate('timestep_list', '1.0.0', 'Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') (sigma_t, sigma_s0, sigma_s1, sigma_s2) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1], self.sigmas[self.step_index - 2]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) (alpha_s1, sigma_s1) = self._sigma_to_alpha_sigma_t(sigma_s1) (alpha_s2, sigma_s2) = self._sigma_to_alpha_sigma_t(sigma_s2) (m0, m1, m2) = (model_output_list[-1], model_output_list[-2], model_output_list[-3]) (rho_t, rho_s0, rho_s1, rho_s2) = (sigma_t / alpha_t, sigma_s0 / alpha_s0, sigma_s1 / alpha_s1, sigma_s2 / alpha_s2) if self.config.algorithm_type == 'deis': def ind_fn(t, b, c, d): numerator = t * (np.log(c) * (np.log(d) - np.log(t) + 1) - np.log(d) * np.log(t) + np.log(d) + np.log(t) ** 2 - 2 * np.log(t) + 2) denominator = (np.log(b) - np.log(c)) * (np.log(b) - np.log(d)) return numerator / denominator coef1 = ind_fn(rho_t, rho_s0, rho_s1, rho_s2) - ind_fn(rho_s0, rho_s0, rho_s1, rho_s2) coef2 = ind_fn(rho_t, rho_s1, rho_s2, rho_s0) - ind_fn(rho_s0, rho_s1, rho_s2, rho_s0) coef3 = ind_fn(rho_t, rho_s2, rho_s0, rho_s1) - ind_fn(rho_s0, rho_s2, rho_s0, rho_s1) x_t = alpha_t * (sample / alpha_s0 + coef1 * m0 + coef2 * m1 + coef3 * m2) return x_t else: raise NotImplementedError('only support log-rho multistep deis now') def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps index_candidates = (schedule_timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() return step_index def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: torch.Tensor, timestep: Union[int, torch.Tensor], sample: torch.Tensor, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") if self.step_index is None: self._init_step_index(timestep) lower_order_final = self.step_index == len(self.timesteps) - 1 and self.config.lower_order_final and (len(self.timesteps) < 15) lower_order_second = self.step_index == len(self.timesteps) - 2 and self.config.lower_order_final and (len(self.timesteps) < 15) model_output = self.convert_model_output(model_output, sample=sample) for i in range(self.config.solver_order - 1): self.model_outputs[i] = self.model_outputs[i + 1] self.model_outputs[-1] = model_output if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: prev_sample = self.deis_first_order_update(model_output, sample=sample) elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: prev_sample = self.multistep_deis_second_order_update(self.model_outputs, sample=sample) else: prev_sample = self.multistep_deis_third_order_update(self.model_outputs, sample=sample) if self.lower_order_nums < self.config.solver_order: self.lower_order_nums += 1 self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: return sample def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) noisy_samples = alpha_t * original_samples + sigma_t * noise return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_dpm_cogvideox.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin @dataclass class DDIMSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(alphas_cumprod): alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() alphas_bar_sqrt -= alphas_bar_sqrt_T alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) alphas_bar = alphas_bar_sqrt ** 2 return alphas_bar class CogVideoXDPMScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.00085, beta_end: float=0.012, beta_schedule: str='scaled_linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, clip_sample: bool=True, set_alpha_to_one: bool=True, steps_offset: int=0, prediction_type: str='epsilon', clip_sample_range: float=1.0, sample_max_value: float=1.0, timestep_spacing: str='leading', rescale_betas_zero_snr: bool=False, snr_shift_scale: float=3.0): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float64) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.alphas_cumprod = self.alphas_cumprod / (snr_shift_scale + (1 - snr_shift_scale) * self.alphas_cumprod) if rescale_betas_zero_snr: self.alphas_cumprod = rescale_zero_terminal_snr(self.alphas_cumprod) self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] self.init_noise_sigma = 1.0 self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) def _get_variance(self, timestep, prev_timestep): alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = beta_prod_t_prev / beta_prod_t * (1 - alpha_prod_t / alpha_prod_t_prev) return variance def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): if num_inference_steps > self.config.num_train_timesteps: raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`: {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle maximal {self.config.num_train_timesteps} timesteps.') self.num_inference_steps = num_inference_steps if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps).round()[::-1].copy().astype(np.int64) elif self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / self.num_inference_steps timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'.") self.timesteps = torch.from_numpy(timesteps).to(device) def get_variables(self, alpha_prod_t, alpha_prod_t_prev, alpha_prod_t_back=None): lamb = ((alpha_prod_t / (1 - alpha_prod_t)) ** 0.5).log() lamb_next = ((alpha_prod_t_prev / (1 - alpha_prod_t_prev)) ** 0.5).log() h = lamb_next - lamb if alpha_prod_t_back is not None: lamb_previous = ((alpha_prod_t_back / (1 - alpha_prod_t_back)) ** 0.5).log() h_last = lamb - lamb_previous r = h_last / h return (h, r, lamb, lamb_next) else: return (h, None, lamb, lamb_next) def get_mult(self, h, r, alpha_prod_t, alpha_prod_t_prev, alpha_prod_t_back): mult1 = ((1 - alpha_prod_t_prev) / (1 - alpha_prod_t)) ** 0.5 * (-h).exp() mult2 = (-2 * h).expm1() * alpha_prod_t_prev ** 0.5 if alpha_prod_t_back is not None: mult3 = 1 + 1 / (2 * r) mult4 = 1 / (2 * r) return (mult1, mult2, mult3, mult4) else: return (mult1, mult2) def step(self, model_output: torch.Tensor, old_pred_original_sample: torch.Tensor, timestep: int, timestep_back: int, sample: torch.Tensor, eta: float=0.0, use_clipped_model_output: bool=False, generator=None, variance_noise: Optional[torch.Tensor]=None, return_dict: bool=False) -> Union[DDIMSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod alpha_prod_t_back = self.alphas_cumprod[timestep_back] if timestep_back is not None else None beta_prod_t = 1 - alpha_prod_t if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == 'sample': pred_original_sample = model_output elif self.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction`') (h, r, lamb, lamb_next) = self.get_variables(alpha_prod_t, alpha_prod_t_prev, alpha_prod_t_back) mult = list(self.get_mult(h, r, alpha_prod_t, alpha_prod_t_prev, alpha_prod_t_back)) mult_noise = (1 - alpha_prod_t_prev) ** 0.5 * (1 - (-2 * h).exp()) ** 0.5 noise = randn_tensor(sample.shape, generator=generator, device=sample.device, dtype=sample.dtype) prev_sample = mult[0] * sample - mult[1] * pred_original_sample + mult_noise * noise if old_pred_original_sample is None or prev_timestep < 0: return (prev_sample, pred_original_sample) else: denoised_d = mult[2] * pred_original_sample - mult[3] * old_pred_original_sample noise = randn_tensor(sample.shape, generator=generator, device=sample.device, dtype=sample.dtype) x_advanced = mult[0] * sample - mult[1] * denoised_d + mult_noise * noise prev_sample = x_advanced if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) timesteps = timesteps.to(sample.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import deprecate from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(betas): alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() alphas_bar_sqrt -= alphas_bar_sqrt_T alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, solver_order: int=2, prediction_type: str='epsilon', thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, sample_max_value: float=1.0, algorithm_type: str='dpmsolver++', solver_type: str='midpoint', lower_order_final: bool=True, euler_at_final: bool=False, use_karras_sigmas: Optional[bool]=False, use_lu_lambdas: Optional[bool]=False, final_sigmas_type: Optional[str]='zero', lambda_min_clipped: float=-float('inf'), variance_type: Optional[str]=None, timestep_spacing: str='linspace', steps_offset: int=0, rescale_betas_zero_snr: bool=False): if algorithm_type in ['dpmsolver', 'sde-dpmsolver']: deprecation_message = f'algorithm_type {algorithm_type} is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead' deprecate('algorithm_types dpmsolver and sde-dpmsolver', '1.0.0', deprecation_message) if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) if rescale_betas_zero_snr: self.alphas_cumprod[-1] = 2 ** (-24) self.alpha_t = torch.sqrt(self.alphas_cumprod) self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 self.init_noise_sigma = 1.0 if algorithm_type not in ['dpmsolver', 'dpmsolver++', 'sde-dpmsolver', 'sde-dpmsolver++']: if algorithm_type == 'deis': self.register_to_config(algorithm_type='dpmsolver++') else: raise NotImplementedError(f'{algorithm_type} is not implemented for {self.__class__}') if solver_type not in ['midpoint', 'heun']: if solver_type in ['logrho', 'bh1', 'bh2']: self.register_to_config(solver_type='midpoint') else: raise NotImplementedError(f'{solver_type} is not implemented for {self.__class__}') if algorithm_type not in ['dpmsolver++', 'sde-dpmsolver++'] and final_sigmas_type == 'zero': raise ValueError(f'`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please choose `sigma_min` instead.') self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() self.timesteps = torch.from_numpy(timesteps) self.model_outputs = [None] * solver_order self.lower_order_nums = 0 self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def set_timesteps(self, num_inference_steps: int=None, device: Union[str, torch.device]=None, timesteps: Optional[List[int]]=None): if num_inference_steps is None and timesteps is None: raise ValueError('Must pass exactly one of `num_inference_steps` or `timesteps`.') if num_inference_steps is not None and timesteps is not None: raise ValueError('Can only pass one of `num_inference_steps` or `custom_timesteps`.') if timesteps is not None and self.config.use_karras_sigmas: raise ValueError('Cannot use `timesteps` with `config.use_karras_sigmas = True`') if timesteps is not None and self.config.use_lu_lambdas: raise ValueError('Cannot use `timesteps` with `config.use_lu_lambdas = True`') if timesteps is not None: timesteps = np.array(timesteps).astype(np.int64) else: clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) last_timestep = (self.config.num_train_timesteps - clipped_idx).numpy().item() if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].copy().astype(np.int64) elif self.config.timestep_spacing == 'leading': step_ratio = last_timestep // (num_inference_steps + 1) timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / num_inference_steps timesteps = np.arange(last_timestep, 0, -step_ratio).round().copy().astype(np.int64) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) if self.config.use_karras_sigmas: sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() elif self.config.use_lu_lambdas: lambdas = np.flip(log_sigmas.copy()) lambdas = self._convert_to_lu(in_lambdas=lambdas, num_inference_steps=num_inference_steps) sigmas = np.exp(lambdas) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() else: sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) if self.config.final_sigmas_type == 'sigma_min': sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 elif self.config.final_sigmas_type == 'zero': sigma_last = 0 else: raise ValueError(f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}") sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas) self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.num_inference_steps = len(timesteps) self.model_outputs = [None] * self.config.solver_order self.lower_order_nums = 0 self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: dtype = sample.dtype (batch_size, channels, *remaining_dims) = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp(s, min=1, max=self.config.sample_max_value) s = s.unsqueeze(1) sample = torch.clamp(sample, -s, s) / s sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = 1 / (sigma ** 2 + 1) ** 0.5 sigma_t = sigma * alpha_t return (alpha_t, sigma_t) def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: if hasattr(self.config, 'sigma_min'): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, 'sigma_max'): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def _convert_to_lu(self, in_lambdas: torch.Tensor, num_inference_steps) -> torch.Tensor: lambda_min: float = in_lambdas[-1].item() lambda_max: float = in_lambdas[0].item() rho = 1.0 ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = lambda_min ** (1 / rho) max_inv_rho = lambda_max ** (1 / rho) lambdas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return lambdas def convert_model_output(self, model_output: torch.Tensor, *args, sample: torch.Tensor=None, **kwargs) -> torch.Tensor: timestep = args[0] if len(args) > 0 else kwargs.pop('timestep', None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError('missing `sample` as a required keyward argument') if timestep is not None: deprecate('timesteps', '1.0.0', 'Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if self.config.algorithm_type in ['dpmsolver++', 'sde-dpmsolver++']: if self.config.prediction_type == 'epsilon': if self.config.variance_type in ['learned', 'learned_range']: model_output = model_output[:, :3] sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == 'sample': x0_pred = model_output elif self.config.prediction_type == 'v_prediction': sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) x0_pred = alpha_t * sample - sigma_t * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction` for the DPMSolverMultistepScheduler.') if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) return x0_pred elif self.config.algorithm_type in ['dpmsolver', 'sde-dpmsolver']: if self.config.prediction_type == 'epsilon': if self.config.variance_type in ['learned', 'learned_range']: epsilon = model_output[:, :3] else: epsilon = model_output elif self.config.prediction_type == 'sample': sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) epsilon = (sample - alpha_t * model_output) / sigma_t elif self.config.prediction_type == 'v_prediction': sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) epsilon = alpha_t * model_output + sigma_t * sample else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction` for the DPMSolverMultistepScheduler.') if self.config.thresholding: sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) x0_pred = (sample - sigma_t * epsilon) / alpha_t x0_pred = self._threshold_sample(x0_pred) epsilon = (sample - alpha_t * x0_pred) / sigma_t return epsilon def dpm_solver_first_order_update(self, model_output: torch.Tensor, *args, sample: torch.Tensor=None, noise: Optional[torch.Tensor]=None, **kwargs) -> torch.Tensor: timestep = args[0] if len(args) > 0 else kwargs.pop('timestep', None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(' missing `sample` as a required keyward argument') if timestep is not None: deprecate('timesteps', '1.0.0', 'Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') (sigma_t, sigma_s) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s, sigma_s) = self._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s if self.config.algorithm_type == 'dpmsolver++': x_t = sigma_t / sigma_s * sample - alpha_t * (torch.exp(-h) - 1.0) * model_output elif self.config.algorithm_type == 'dpmsolver': x_t = alpha_t / alpha_s * sample - sigma_t * (torch.exp(h) - 1.0) * model_output elif self.config.algorithm_type == 'sde-dpmsolver++': assert noise is not None x_t = sigma_t / sigma_s * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * model_output + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise elif self.config.algorithm_type == 'sde-dpmsolver': assert noise is not None x_t = alpha_t / alpha_s * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * model_output + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise return x_t def multistep_dpm_solver_second_order_update(self, model_output_list: List[torch.Tensor], *args, sample: torch.Tensor=None, noise: Optional[torch.Tensor]=None, **kwargs) -> torch.Tensor: timestep_list = args[0] if len(args) > 0 else kwargs.pop('timestep_list', None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(' missing `sample` as a required keyward argument') if timestep_list is not None: deprecate('timestep_list', '1.0.0', 'Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') (sigma_t, sigma_s0, sigma_s1) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) (alpha_s1, sigma_s1) = self._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) (m0, m1) = (model_output_list[-1], model_output_list[-2]) (h, h_0) = (lambda_t - lambda_s0, lambda_s0 - lambda_s1) r0 = h_0 / h (D0, D1) = (m0, 1.0 / r0 * (m0 - m1)) if self.config.algorithm_type == 'dpmsolver++': if self.config.solver_type == 'midpoint': x_t = sigma_t / sigma_s0 * sample - alpha_t * (torch.exp(-h) - 1.0) * D0 - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 elif self.config.solver_type == 'heun': x_t = sigma_t / sigma_s0 * sample - alpha_t * (torch.exp(-h) - 1.0) * D0 + alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0) * D1 elif self.config.algorithm_type == 'dpmsolver': if self.config.solver_type == 'midpoint': x_t = alpha_t / alpha_s0 * sample - sigma_t * (torch.exp(h) - 1.0) * D0 - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1 elif self.config.solver_type == 'heun': x_t = alpha_t / alpha_s0 * sample - sigma_t * (torch.exp(h) - 1.0) * D0 - sigma_t * ((torch.exp(h) - 1.0) / h - 1.0) * D1 elif self.config.algorithm_type == 'sde-dpmsolver++': assert noise is not None if self.config.solver_type == 'midpoint': x_t = sigma_t / sigma_s0 * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise elif self.config.solver_type == 'heun': x_t = sigma_t / sigma_s0 * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * D0 + alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise elif self.config.algorithm_type == 'sde-dpmsolver': assert noise is not None if self.config.solver_type == 'midpoint': x_t = alpha_t / alpha_s0 * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 - sigma_t * (torch.exp(h) - 1.0) * D1 + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise elif self.config.solver_type == 'heun': x_t = alpha_t / alpha_s0 * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 - 2.0 * (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise return x_t def multistep_dpm_solver_third_order_update(self, model_output_list: List[torch.Tensor], *args, sample: torch.Tensor=None, **kwargs) -> torch.Tensor: timestep_list = args[0] if len(args) > 0 else kwargs.pop('timestep_list', None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(' missing`sample` as a required keyward argument') if timestep_list is not None: deprecate('timestep_list', '1.0.0', 'Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') (sigma_t, sigma_s0, sigma_s1, sigma_s2) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1], self.sigmas[self.step_index - 2]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) (alpha_s1, sigma_s1) = self._sigma_to_alpha_sigma_t(sigma_s1) (alpha_s2, sigma_s2) = self._sigma_to_alpha_sigma_t(sigma_s2) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) (m0, m1, m2) = (model_output_list[-1], model_output_list[-2], model_output_list[-3]) (h, h_0, h_1) = (lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2) (r0, r1) = (h_0 / h, h_1 / h) D0 = m0 (D1_0, D1_1) = (1.0 / r0 * (m0 - m1), 1.0 / r1 * (m1 - m2)) D1 = D1_0 + r0 / (r0 + r1) * (D1_0 - D1_1) D2 = 1.0 / (r0 + r1) * (D1_0 - D1_1) if self.config.algorithm_type == 'dpmsolver++': x_t = sigma_t / sigma_s0 * sample - alpha_t * (torch.exp(-h) - 1.0) * D0 + alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0) * D1 - alpha_t * ((torch.exp(-h) - 1.0 + h) / h ** 2 - 0.5) * D2 elif self.config.algorithm_type == 'dpmsolver': x_t = alpha_t / alpha_s0 * sample - sigma_t * (torch.exp(h) - 1.0) * D0 - sigma_t * ((torch.exp(h) - 1.0) / h - 1.0) * D1 - sigma_t * ((torch.exp(h) - 1.0 - h) / h ** 2 - 0.5) * D2 return x_t def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps index_candidates = (schedule_timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() return step_index def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: torch.Tensor, timestep: Union[int, torch.Tensor], sample: torch.Tensor, generator=None, variance_noise: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") if self.step_index is None: self._init_step_index(timestep) lower_order_final = self.step_index == len(self.timesteps) - 1 and (self.config.euler_at_final or (self.config.lower_order_final and len(self.timesteps) < 15) or self.config.final_sigmas_type == 'zero') lower_order_second = self.step_index == len(self.timesteps) - 2 and self.config.lower_order_final and (len(self.timesteps) < 15) model_output = self.convert_model_output(model_output, sample=sample) for i in range(self.config.solver_order - 1): self.model_outputs[i] = self.model_outputs[i + 1] self.model_outputs[-1] = model_output sample = sample.to(torch.float32) if self.config.algorithm_type in ['sde-dpmsolver', 'sde-dpmsolver++'] and variance_noise is None: noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device, dtype=torch.float32) elif self.config.algorithm_type in ['sde-dpmsolver', 'sde-dpmsolver++']: noise = variance_noise.to(device=model_output.device, dtype=torch.float32) else: noise = None if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) else: prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample) if self.lower_order_nums < self.config.solver_order: self.lower_order_nums += 1 prev_sample = prev_sample.to(model_output.dtype) self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: return sample def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) noisy_samples = alpha_t * original_samples + sigma_t * noise return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py from dataclasses import dataclass from typing import List, Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common @flax.struct.dataclass class DPMSolverMultistepSchedulerState: common: CommonSchedulerState alpha_t: jnp.ndarray sigma_t: jnp.ndarray lambda_t: jnp.ndarray init_noise_sigma: jnp.ndarray timesteps: jnp.ndarray num_inference_steps: Optional[int] = None model_outputs: Optional[jnp.ndarray] = None lower_order_nums: Optional[jnp.int32] = None prev_timestep: Optional[jnp.int32] = None cur_sample: Optional[jnp.ndarray] = None @classmethod def create(cls, common: CommonSchedulerState, alpha_t: jnp.ndarray, sigma_t: jnp.ndarray, lambda_t: jnp.ndarray, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray): return cls(common=common, alpha_t=alpha_t, sigma_t=sigma_t, lambda_t=lambda_t, init_noise_sigma=init_noise_sigma, timesteps=timesteps) @dataclass class FlaxDPMSolverMultistepSchedulerOutput(FlaxSchedulerOutput): state: DPMSolverMultistepSchedulerState class FlaxDPMSolverMultistepScheduler(FlaxSchedulerMixin, ConfigMixin): _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] dtype: jnp.dtype @property def has_state(self): return True @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[jnp.ndarray]=None, solver_order: int=2, prediction_type: str='epsilon', thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, sample_max_value: float=1.0, algorithm_type: str='dpmsolver++', solver_type: str='midpoint', lower_order_final: bool=True, timestep_spacing: str='linspace', dtype: jnp.dtype=jnp.float32): self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState]=None) -> DPMSolverMultistepSchedulerState: if common is None: common = CommonSchedulerState.create(self) alpha_t = jnp.sqrt(common.alphas_cumprod) sigma_t = jnp.sqrt(1 - common.alphas_cumprod) lambda_t = jnp.log(alpha_t) - jnp.log(sigma_t) if self.config.algorithm_type not in ['dpmsolver', 'dpmsolver++']: raise NotImplementedError(f'{self.config.algorithm_type} is not implemented for {self.__class__}') if self.config.solver_type not in ['midpoint', 'heun']: raise NotImplementedError(f'{self.config.solver_type} is not implemented for {self.__class__}') init_noise_sigma = jnp.array(1.0, dtype=self.dtype) timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] return DPMSolverMultistepSchedulerState.create(common=common, alpha_t=alpha_t, sigma_t=sigma_t, lambda_t=lambda_t, init_noise_sigma=init_noise_sigma, timesteps=timesteps) def set_timesteps(self, state: DPMSolverMultistepSchedulerState, num_inference_steps: int, shape: Tuple) -> DPMSolverMultistepSchedulerState: last_timestep = self.config.num_train_timesteps if self.config.timestep_spacing == 'linspace': timesteps = jnp.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].astype(jnp.int32) elif self.config.timestep_spacing == 'leading': step_ratio = last_timestep // (num_inference_steps + 1) timesteps = (jnp.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(jnp.int32) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / num_inference_steps timesteps = jnp.arange(last_timestep, 0, -step_ratio).round().copy().astype(jnp.int32) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") model_outputs = jnp.zeros((self.config.solver_order,) + shape, dtype=self.dtype) lower_order_nums = jnp.int32(0) prev_timestep = jnp.int32(-1) cur_sample = jnp.zeros(shape, dtype=self.dtype) return state.replace(num_inference_steps=num_inference_steps, timesteps=timesteps, model_outputs=model_outputs, lower_order_nums=lower_order_nums, prev_timestep=prev_timestep, cur_sample=cur_sample) def convert_model_output(self, state: DPMSolverMultistepSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray) -> jnp.ndarray: if self.config.algorithm_type == 'dpmsolver++': if self.config.prediction_type == 'epsilon': (alpha_t, sigma_t) = (state.alpha_t[timestep], state.sigma_t[timestep]) x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == 'sample': x0_pred = model_output elif self.config.prediction_type == 'v_prediction': (alpha_t, sigma_t) = (state.alpha_t[timestep], state.sigma_t[timestep]) x0_pred = alpha_t * sample - sigma_t * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction` for the FlaxDPMSolverMultistepScheduler.') if self.config.thresholding: dynamic_max_val = jnp.percentile(jnp.abs(x0_pred), self.config.dynamic_thresholding_ratio, axis=tuple(range(1, x0_pred.ndim))) dynamic_max_val = jnp.maximum(dynamic_max_val, self.config.sample_max_value * jnp.ones_like(dynamic_max_val)) x0_pred = jnp.clip(x0_pred, -dynamic_max_val, dynamic_max_val) / dynamic_max_val return x0_pred elif self.config.algorithm_type == 'dpmsolver': if self.config.prediction_type == 'epsilon': return model_output elif self.config.prediction_type == 'sample': (alpha_t, sigma_t) = (state.alpha_t[timestep], state.sigma_t[timestep]) epsilon = (sample - alpha_t * model_output) / sigma_t return epsilon elif self.config.prediction_type == 'v_prediction': (alpha_t, sigma_t) = (state.alpha_t[timestep], state.sigma_t[timestep]) epsilon = alpha_t * model_output + sigma_t * sample return epsilon else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction` for the FlaxDPMSolverMultistepScheduler.') def dpm_solver_first_order_update(self, state: DPMSolverMultistepSchedulerState, model_output: jnp.ndarray, timestep: int, prev_timestep: int, sample: jnp.ndarray) -> jnp.ndarray: (t, s0) = (prev_timestep, timestep) m0 = model_output (lambda_t, lambda_s) = (state.lambda_t[t], state.lambda_t[s0]) (alpha_t, alpha_s) = (state.alpha_t[t], state.alpha_t[s0]) (sigma_t, sigma_s) = (state.sigma_t[t], state.sigma_t[s0]) h = lambda_t - lambda_s if self.config.algorithm_type == 'dpmsolver++': x_t = sigma_t / sigma_s * sample - alpha_t * (jnp.exp(-h) - 1.0) * m0 elif self.config.algorithm_type == 'dpmsolver': x_t = alpha_t / alpha_s * sample - sigma_t * (jnp.exp(h) - 1.0) * m0 return x_t def multistep_dpm_solver_second_order_update(self, state: DPMSolverMultistepSchedulerState, model_output_list: jnp.ndarray, timestep_list: List[int], prev_timestep: int, sample: jnp.ndarray) -> jnp.ndarray: (t, s0, s1) = (prev_timestep, timestep_list[-1], timestep_list[-2]) (m0, m1) = (model_output_list[-1], model_output_list[-2]) (lambda_t, lambda_s0, lambda_s1) = (state.lambda_t[t], state.lambda_t[s0], state.lambda_t[s1]) (alpha_t, alpha_s0) = (state.alpha_t[t], state.alpha_t[s0]) (sigma_t, sigma_s0) = (state.sigma_t[t], state.sigma_t[s0]) (h, h_0) = (lambda_t - lambda_s0, lambda_s0 - lambda_s1) r0 = h_0 / h (D0, D1) = (m0, 1.0 / r0 * (m0 - m1)) if self.config.algorithm_type == 'dpmsolver++': if self.config.solver_type == 'midpoint': x_t = sigma_t / sigma_s0 * sample - alpha_t * (jnp.exp(-h) - 1.0) * D0 - 0.5 * (alpha_t * (jnp.exp(-h) - 1.0)) * D1 elif self.config.solver_type == 'heun': x_t = sigma_t / sigma_s0 * sample - alpha_t * (jnp.exp(-h) - 1.0) * D0 + alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0) * D1 elif self.config.algorithm_type == 'dpmsolver': if self.config.solver_type == 'midpoint': x_t = alpha_t / alpha_s0 * sample - sigma_t * (jnp.exp(h) - 1.0) * D0 - 0.5 * (sigma_t * (jnp.exp(h) - 1.0)) * D1 elif self.config.solver_type == 'heun': x_t = alpha_t / alpha_s0 * sample - sigma_t * (jnp.exp(h) - 1.0) * D0 - sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0) * D1 return x_t def multistep_dpm_solver_third_order_update(self, state: DPMSolverMultistepSchedulerState, model_output_list: jnp.ndarray, timestep_list: List[int], prev_timestep: int, sample: jnp.ndarray) -> jnp.ndarray: (t, s0, s1, s2) = (prev_timestep, timestep_list[-1], timestep_list[-2], timestep_list[-3]) (m0, m1, m2) = (model_output_list[-1], model_output_list[-2], model_output_list[-3]) (lambda_t, lambda_s0, lambda_s1, lambda_s2) = (state.lambda_t[t], state.lambda_t[s0], state.lambda_t[s1], state.lambda_t[s2]) (alpha_t, alpha_s0) = (state.alpha_t[t], state.alpha_t[s0]) (sigma_t, sigma_s0) = (state.sigma_t[t], state.sigma_t[s0]) (h, h_0, h_1) = (lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2) (r0, r1) = (h_0 / h, h_1 / h) D0 = m0 (D1_0, D1_1) = (1.0 / r0 * (m0 - m1), 1.0 / r1 * (m1 - m2)) D1 = D1_0 + r0 / (r0 + r1) * (D1_0 - D1_1) D2 = 1.0 / (r0 + r1) * (D1_0 - D1_1) if self.config.algorithm_type == 'dpmsolver++': x_t = sigma_t / sigma_s0 * sample - alpha_t * (jnp.exp(-h) - 1.0) * D0 + alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0) * D1 - alpha_t * ((jnp.exp(-h) - 1.0 + h) / h ** 2 - 0.5) * D2 elif self.config.algorithm_type == 'dpmsolver': x_t = alpha_t / alpha_s0 * sample - sigma_t * (jnp.exp(h) - 1.0) * D0 - sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0) * D1 - sigma_t * ((jnp.exp(h) - 1.0 - h) / h ** 2 - 0.5) * D2 return x_t def step(self, state: DPMSolverMultistepSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, return_dict: bool=True) -> Union[FlaxDPMSolverMultistepSchedulerOutput, Tuple]: if state.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") (step_index,) = jnp.where(state.timesteps == timestep, size=1) step_index = step_index[0] prev_timestep = jax.lax.select(step_index == len(state.timesteps) - 1, 0, state.timesteps[step_index + 1]) model_output = self.convert_model_output(state, model_output, timestep, sample) model_outputs_new = jnp.roll(state.model_outputs, -1, axis=0) model_outputs_new = model_outputs_new.at[-1].set(model_output) state = state.replace(model_outputs=model_outputs_new, prev_timestep=prev_timestep, cur_sample=sample) def step_1(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: return self.dpm_solver_first_order_update(state, state.model_outputs[-1], state.timesteps[step_index], state.prev_timestep, state.cur_sample) def step_23(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: def step_2(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: timestep_list = jnp.array([state.timesteps[step_index - 1], state.timesteps[step_index]]) return self.multistep_dpm_solver_second_order_update(state, state.model_outputs, timestep_list, state.prev_timestep, state.cur_sample) def step_3(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: timestep_list = jnp.array([state.timesteps[step_index - 2], state.timesteps[step_index - 1], state.timesteps[step_index]]) return self.multistep_dpm_solver_third_order_update(state, state.model_outputs, timestep_list, state.prev_timestep, state.cur_sample) step_2_output = step_2(state) step_3_output = step_3(state) if self.config.solver_order == 2: return step_2_output elif self.config.lower_order_final and len(state.timesteps) < 15: return jax.lax.select(state.lower_order_nums < 2, step_2_output, jax.lax.select(step_index == len(state.timesteps) - 2, step_2_output, step_3_output)) else: return jax.lax.select(state.lower_order_nums < 2, step_2_output, step_3_output) step_1_output = step_1(state) step_23_output = step_23(state) if self.config.solver_order == 1: prev_sample = step_1_output elif self.config.lower_order_final and len(state.timesteps) < 15: prev_sample = jax.lax.select(state.lower_order_nums < 1, step_1_output, jax.lax.select(step_index == len(state.timesteps) - 1, step_1_output, step_23_output)) else: prev_sample = jax.lax.select(state.lower_order_nums < 1, step_1_output, step_23_output) state = state.replace(lower_order_nums=jnp.minimum(state.lower_order_nums + 1, self.config.solver_order)) if not return_dict: return (prev_sample, state) return FlaxDPMSolverMultistepSchedulerOutput(prev_sample=prev_sample, state=state) def scale_model_input(self, state: DPMSolverMultistepSchedulerState, sample: jnp.ndarray, timestep: Optional[int]=None) -> jnp.ndarray: return sample def add_noise(self, state: DPMSolverMultistepSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray) -> jnp.ndarray: return add_noise_common(state.common, original_samples, noise, timesteps) def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_dpmsolver_multistep_inverse.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import deprecate from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, solver_order: int=2, prediction_type: str='epsilon', thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, sample_max_value: float=1.0, algorithm_type: str='dpmsolver++', solver_type: str='midpoint', lower_order_final: bool=True, euler_at_final: bool=False, use_karras_sigmas: Optional[bool]=False, lambda_min_clipped: float=-float('inf'), variance_type: Optional[str]=None, timestep_spacing: str='linspace', steps_offset: int=0): if algorithm_type in ['dpmsolver', 'sde-dpmsolver']: deprecation_message = f'algorithm_type {algorithm_type} is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead' deprecate('algorithm_types dpmsolver and sde-dpmsolver', '1.0.0', deprecation_message) if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.alpha_t = torch.sqrt(self.alphas_cumprod) self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 self.init_noise_sigma = 1.0 if algorithm_type not in ['dpmsolver', 'dpmsolver++', 'sde-dpmsolver', 'sde-dpmsolver++']: if algorithm_type == 'deis': self.register_to_config(algorithm_type='dpmsolver++') else: raise NotImplementedError(f'{algorithm_type} is not implemented for {self.__class__}') if solver_type not in ['midpoint', 'heun']: if solver_type in ['logrho', 'bh1', 'bh2']: self.register_to_config(solver_type='midpoint') else: raise NotImplementedError(f'{solver_type} is not implemented for {self.__class__}') self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32).copy() self.timesteps = torch.from_numpy(timesteps) self.model_outputs = [None] * solver_order self.lower_order_nums = 0 self._step_index = None self.sigmas = self.sigmas.to('cpu') self.use_karras_sigmas = use_karras_sigmas @property def step_index(self): return self._step_index def set_timesteps(self, num_inference_steps: int=None, device: Union[str, torch.device]=None): clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped).item() self.noisiest_timestep = self.config.num_train_timesteps - 1 - clipped_idx if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, self.noisiest_timestep, num_inference_steps + 1).round()[:-1].copy().astype(np.int64) elif self.config.timestep_spacing == 'leading': step_ratio = (self.noisiest_timestep + 1) // (num_inference_steps + 1) timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[:-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / num_inference_steps timesteps = np.arange(self.noisiest_timestep + 1, 0, -step_ratio).round()[::-1].copy().astype(np.int64) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) if self.config.use_karras_sigmas: sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() timesteps = timesteps.copy().astype(np.int64) sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) else: sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) sigma_max = ((1 - self.alphas_cumprod[self.noisiest_timestep]) / self.alphas_cumprod[self.noisiest_timestep]) ** 0.5 sigmas = np.concatenate([sigmas, [sigma_max]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas) (_, unique_indices) = np.unique(timesteps, return_index=True) timesteps = timesteps[np.sort(unique_indices)] self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.num_inference_steps = len(timesteps) self.model_outputs = [None] * self.config.solver_order self.lower_order_nums = 0 self._step_index = None self.sigmas = self.sigmas.to('cpu') def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: dtype = sample.dtype (batch_size, channels, *remaining_dims) = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp(s, min=1, max=self.config.sample_max_value) s = s.unsqueeze(1) sample = torch.clamp(sample, -s, s) / s sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = 1 / (sigma ** 2 + 1) ** 0.5 sigma_t = sigma * alpha_t return (alpha_t, sigma_t) def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: if hasattr(self.config, 'sigma_min'): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, 'sigma_max'): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def convert_model_output(self, model_output: torch.Tensor, *args, sample: torch.Tensor=None, **kwargs) -> torch.Tensor: timestep = args[0] if len(args) > 0 else kwargs.pop('timestep', None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError('missing `sample` as a required keyward argument') if timestep is not None: deprecate('timesteps', '1.0.0', 'Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if self.config.algorithm_type in ['dpmsolver++', 'sde-dpmsolver++']: if self.config.prediction_type == 'epsilon': if self.config.variance_type in ['learned', 'learned_range']: model_output = model_output[:, :3] sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == 'sample': x0_pred = model_output elif self.config.prediction_type == 'v_prediction': sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) x0_pred = alpha_t * sample - sigma_t * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction` for the DPMSolverMultistepScheduler.') if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) return x0_pred elif self.config.algorithm_type in ['dpmsolver', 'sde-dpmsolver']: if self.config.prediction_type == 'epsilon': if self.config.variance_type in ['learned', 'learned_range']: epsilon = model_output[:, :3] else: epsilon = model_output elif self.config.prediction_type == 'sample': sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) epsilon = (sample - alpha_t * model_output) / sigma_t elif self.config.prediction_type == 'v_prediction': sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) epsilon = alpha_t * model_output + sigma_t * sample else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction` for the DPMSolverMultistepScheduler.') if self.config.thresholding: sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) x0_pred = (sample - sigma_t * epsilon) / alpha_t x0_pred = self._threshold_sample(x0_pred) epsilon = (sample - alpha_t * x0_pred) / sigma_t return epsilon def dpm_solver_first_order_update(self, model_output: torch.Tensor, *args, sample: torch.Tensor=None, noise: Optional[torch.Tensor]=None, **kwargs) -> torch.Tensor: timestep = args[0] if len(args) > 0 else kwargs.pop('timestep', None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(' missing `sample` as a required keyward argument') if timestep is not None: deprecate('timesteps', '1.0.0', 'Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') (sigma_t, sigma_s) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s, sigma_s) = self._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s if self.config.algorithm_type == 'dpmsolver++': x_t = sigma_t / sigma_s * sample - alpha_t * (torch.exp(-h) - 1.0) * model_output elif self.config.algorithm_type == 'dpmsolver': x_t = alpha_t / alpha_s * sample - sigma_t * (torch.exp(h) - 1.0) * model_output elif self.config.algorithm_type == 'sde-dpmsolver++': assert noise is not None x_t = sigma_t / sigma_s * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * model_output + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise elif self.config.algorithm_type == 'sde-dpmsolver': assert noise is not None x_t = alpha_t / alpha_s * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * model_output + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise return x_t def multistep_dpm_solver_second_order_update(self, model_output_list: List[torch.Tensor], *args, sample: torch.Tensor=None, noise: Optional[torch.Tensor]=None, **kwargs) -> torch.Tensor: timestep_list = args[0] if len(args) > 0 else kwargs.pop('timestep_list', None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(' missing `sample` as a required keyward argument') if timestep_list is not None: deprecate('timestep_list', '1.0.0', 'Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') (sigma_t, sigma_s0, sigma_s1) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) (alpha_s1, sigma_s1) = self._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) (m0, m1) = (model_output_list[-1], model_output_list[-2]) (h, h_0) = (lambda_t - lambda_s0, lambda_s0 - lambda_s1) r0 = h_0 / h (D0, D1) = (m0, 1.0 / r0 * (m0 - m1)) if self.config.algorithm_type == 'dpmsolver++': if self.config.solver_type == 'midpoint': x_t = sigma_t / sigma_s0 * sample - alpha_t * (torch.exp(-h) - 1.0) * D0 - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 elif self.config.solver_type == 'heun': x_t = sigma_t / sigma_s0 * sample - alpha_t * (torch.exp(-h) - 1.0) * D0 + alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0) * D1 elif self.config.algorithm_type == 'dpmsolver': if self.config.solver_type == 'midpoint': x_t = alpha_t / alpha_s0 * sample - sigma_t * (torch.exp(h) - 1.0) * D0 - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1 elif self.config.solver_type == 'heun': x_t = alpha_t / alpha_s0 * sample - sigma_t * (torch.exp(h) - 1.0) * D0 - sigma_t * ((torch.exp(h) - 1.0) / h - 1.0) * D1 elif self.config.algorithm_type == 'sde-dpmsolver++': assert noise is not None if self.config.solver_type == 'midpoint': x_t = sigma_t / sigma_s0 * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise elif self.config.solver_type == 'heun': x_t = sigma_t / sigma_s0 * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * D0 + alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise elif self.config.algorithm_type == 'sde-dpmsolver': assert noise is not None if self.config.solver_type == 'midpoint': x_t = alpha_t / alpha_s0 * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 - sigma_t * (torch.exp(h) - 1.0) * D1 + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise elif self.config.solver_type == 'heun': x_t = alpha_t / alpha_s0 * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 - 2.0 * (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise return x_t def multistep_dpm_solver_third_order_update(self, model_output_list: List[torch.Tensor], *args, sample: torch.Tensor=None, **kwargs) -> torch.Tensor: timestep_list = args[0] if len(args) > 0 else kwargs.pop('timestep_list', None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(' missing`sample` as a required keyward argument') if timestep_list is not None: deprecate('timestep_list', '1.0.0', 'Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') (sigma_t, sigma_s0, sigma_s1, sigma_s2) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1], self.sigmas[self.step_index - 2]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) (alpha_s1, sigma_s1) = self._sigma_to_alpha_sigma_t(sigma_s1) (alpha_s2, sigma_s2) = self._sigma_to_alpha_sigma_t(sigma_s2) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) (m0, m1, m2) = (model_output_list[-1], model_output_list[-2], model_output_list[-3]) (h, h_0, h_1) = (lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2) (r0, r1) = (h_0 / h, h_1 / h) D0 = m0 (D1_0, D1_1) = (1.0 / r0 * (m0 - m1), 1.0 / r1 * (m1 - m2)) D1 = D1_0 + r0 / (r0 + r1) * (D1_0 - D1_1) D2 = 1.0 / (r0 + r1) * (D1_0 - D1_1) if self.config.algorithm_type == 'dpmsolver++': x_t = sigma_t / sigma_s0 * sample - alpha_t * (torch.exp(-h) - 1.0) * D0 + alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0) * D1 - alpha_t * ((torch.exp(-h) - 1.0 + h) / h ** 2 - 0.5) * D2 elif self.config.algorithm_type == 'dpmsolver': x_t = alpha_t / alpha_s0 * sample - sigma_t * (torch.exp(h) - 1.0) * D0 - sigma_t * ((torch.exp(h) - 1.0) / h - 1.0) * D1 - sigma_t * ((torch.exp(h) - 1.0 - h) / h ** 2 - 0.5) * D2 return x_t def _init_step_index(self, timestep): if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) index_candidates = (self.timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() self._step_index = step_index def step(self, model_output: torch.Tensor, timestep: Union[int, torch.Tensor], sample: torch.Tensor, generator=None, variance_noise: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") if self.step_index is None: self._init_step_index(timestep) lower_order_final = self.step_index == len(self.timesteps) - 1 and (self.config.euler_at_final or (self.config.lower_order_final and len(self.timesteps) < 15)) lower_order_second = self.step_index == len(self.timesteps) - 2 and self.config.lower_order_final and (len(self.timesteps) < 15) model_output = self.convert_model_output(model_output, sample=sample) for i in range(self.config.solver_order - 1): self.model_outputs[i] = self.model_outputs[i + 1] self.model_outputs[-1] = model_output if self.config.algorithm_type in ['sde-dpmsolver', 'sde-dpmsolver++'] and variance_noise is None: noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype) elif self.config.algorithm_type in ['sde-dpmsolver', 'sde-dpmsolver++']: noise = variance_noise else: noise = None if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) else: prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample) if self.lower_order_nums < self.config.solver_order: self.lower_order_nums += 1 self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: return sample def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) step_indices = [] for timestep in timesteps: index_candidates = (schedule_timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(schedule_timesteps) - 1 elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() step_indices.append(step_index) sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) noisy_samples = alpha_t * original_samples + sigma_t * noise return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_dpmsolver_sde.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch import torchsde from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput class BatchedBrownianTree: def __init__(self, x, t0, t1, seed=None, **kwargs): (t0, t1, self.sign) = self.sort(t0, t1) w0 = kwargs.get('w0', torch.zeros_like(x)) if seed is None: seed = torch.randint(0, 2 ** 63 - 1, []).item() self.batched = True try: assert len(seed) == x.shape[0] w0 = w0[0] except TypeError: seed = [seed] self.batched = False self.trees = [torchsde.BrownianInterval(t0=t0, t1=t1, size=w0.shape, dtype=w0.dtype, device=w0.device, entropy=s, tol=1e-06, pool_size=24, halfway_tree=True) for s in seed] @staticmethod def sort(a, b): return (a, b, 1) if a < b else (b, a, -1) def __call__(self, t0, t1): (t0, t1, sign) = self.sort(t0, t1) w = torch.stack([tree(t0, t1) for tree in self.trees]) * (self.sign * sign) return w if self.batched else w[0] class BrownianTreeNoiseSampler: def __init__(self, x, sigma_min, sigma_max, seed=None, transform=lambda x: x): self.transform = transform (t0, t1) = (self.transform(torch.as_tensor(sigma_min)), self.transform(torch.as_tensor(sigma_max))) self.tree = BatchedBrownianTree(x, t0, t1, seed) def __call__(self, sigma, sigma_next): (t0, t1) = (self.transform(torch.as_tensor(sigma)), self.transform(torch.as_tensor(sigma_next))) return self.tree(t0, t1) / (t1 - t0).abs().sqrt() def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 2 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.00085, beta_end: float=0.012, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, prediction_type: str='epsilon', use_karras_sigmas: Optional[bool]=False, noise_sampler_seed: Optional[int]=None, timestep_spacing: str='linspace', steps_offset: int=0): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.set_timesteps(num_train_timesteps, None, num_train_timesteps) self.use_karras_sigmas = use_karras_sigmas self.noise_sampler = None self.noise_sampler_seed = noise_sampler_seed self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index @property def init_noise_sigma(self): if self.config.timestep_spacing in ['linspace', 'trailing']: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] sigma_input = sigma if self.state_in_first_order else self.mid_point_sigma sample = sample / (sigma_input ** 2 + 1) ** 0.5 return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None, num_train_timesteps: Optional[int]=None): self.num_inference_steps = num_inference_steps num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy() elif self.config.timestep_spacing == 'leading': step_ratio = num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(float) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = num_train_timesteps / self.num_inference_steps timesteps = np.arange(num_train_timesteps, 0, -step_ratio).round().copy().astype(float) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) if self.config.use_karras_sigmas: sigmas = self._convert_to_karras(in_sigmas=sigmas) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) second_order_timesteps = self._second_order_timesteps(sigmas, log_sigmas) sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) sigmas = torch.from_numpy(sigmas).to(device=device) self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]]) timesteps = torch.from_numpy(timesteps) second_order_timesteps = torch.from_numpy(second_order_timesteps) timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)]) timesteps[1::2] = second_order_timesteps if str(device).startswith('mps'): self.timesteps = timesteps.to(device, dtype=torch.float32) else: self.timesteps = timesteps.to(device=device) self.sample = None self.mid_point_sigma = None self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') self.noise_sampler = None def _second_order_timesteps(self, sigmas, log_sigmas): def sigma_fn(_t): return np.exp(-_t) def t_fn(_sigma): return -np.log(_sigma) midpoint_ratio = 0.5 t = t_fn(sigmas) delta_time = np.diff(t) t_proposed = t[:-1] + delta_time * midpoint_ratio sig_proposed = sigma_fn(t_proposed) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sig_proposed]) return timesteps def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _convert_to_karras(self, in_sigmas: torch.Tensor) -> torch.Tensor: sigma_min: float = in_sigmas[-1].item() sigma_max: float = in_sigmas[0].item() rho = 7.0 ramp = np.linspace(0, 1, self.num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def state_in_first_order(self): return self.sample is None def step(self, model_output: Union[torch.Tensor, np.ndarray], timestep: Union[float, torch.Tensor], sample: Union[torch.Tensor, np.ndarray], return_dict: bool=True, s_noise: float=1.0) -> Union[SchedulerOutput, Tuple]: if self.step_index is None: self._init_step_index(timestep) if self.noise_sampler is None: (min_sigma, max_sigma) = (self.sigmas[self.sigmas > 0].min(), self.sigmas.max()) self.noise_sampler = BrownianTreeNoiseSampler(sample, min_sigma, max_sigma, self.noise_sampler_seed) def sigma_fn(_t: torch.Tensor) -> torch.Tensor: return _t.neg().exp() def t_fn(_sigma: torch.Tensor) -> torch.Tensor: return _sigma.log().neg() if self.state_in_first_order: sigma = self.sigmas[self.step_index] sigma_next = self.sigmas[self.step_index + 1] else: sigma = self.sigmas[self.step_index - 1] sigma_next = self.sigmas[self.step_index] midpoint_ratio = 0.5 (t, t_next) = (t_fn(sigma), t_fn(sigma_next)) delta_time = t_next - t t_proposed = t + delta_time * midpoint_ratio if self.config.prediction_type == 'epsilon': sigma_input = sigma if self.state_in_first_order else sigma_fn(t_proposed) pred_original_sample = sample - sigma_input * model_output elif self.config.prediction_type == 'v_prediction': sigma_input = sigma if self.state_in_first_order else sigma_fn(t_proposed) pred_original_sample = model_output * (-sigma_input / (sigma_input ** 2 + 1) ** 0.5) + sample / (sigma_input ** 2 + 1) elif self.config.prediction_type == 'sample': raise NotImplementedError('prediction_type not implemented yet: sample') else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`') if sigma_next == 0: derivative = (sample - pred_original_sample) / sigma dt = sigma_next - sigma prev_sample = sample + derivative * dt else: if self.state_in_first_order: t_next = t_proposed else: sample = self.sample sigma_from = sigma_fn(t) sigma_to = sigma_fn(t_next) sigma_up = min(sigma_to, (sigma_to ** 2 * (sigma_from ** 2 - sigma_to ** 2) / sigma_from ** 2) ** 0.5) sigma_down = (sigma_to ** 2 - sigma_up ** 2) ** 0.5 ancestral_t = t_fn(sigma_down) prev_sample = sigma_fn(ancestral_t) / sigma_fn(t) * sample - (t - ancestral_t).expm1() * pred_original_sample prev_sample = prev_sample + self.noise_sampler(sigma_fn(t), sigma_fn(t_next)) * s_noise * sigma_up if self.state_in_first_order: self.sample = sample self.mid_point_sigma = sigma_fn(t_next) else: self.sample = None self.mid_point_sigma = None self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import deprecate, logging from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput logger = logging.get_logger(__name__) def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[np.ndarray]=None, solver_order: int=2, prediction_type: str='epsilon', thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, sample_max_value: float=1.0, algorithm_type: str='dpmsolver++', solver_type: str='midpoint', lower_order_final: bool=False, use_karras_sigmas: Optional[bool]=False, final_sigmas_type: Optional[str]='zero', lambda_min_clipped: float=-float('inf'), variance_type: Optional[str]=None): if algorithm_type == 'dpmsolver': deprecation_message = 'algorithm_type `dpmsolver` is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead' deprecate('algorithm_types=dpmsolver', '1.0.0', deprecation_message) if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.alpha_t = torch.sqrt(self.alphas_cumprod) self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 self.init_noise_sigma = 1.0 if algorithm_type not in ['dpmsolver', 'dpmsolver++', 'sde-dpmsolver++']: if algorithm_type == 'deis': self.register_to_config(algorithm_type='dpmsolver++') else: raise NotImplementedError(f'{algorithm_type} is not implemented for {self.__class__}') if solver_type not in ['midpoint', 'heun']: if solver_type in ['logrho', 'bh1', 'bh2']: self.register_to_config(solver_type='midpoint') else: raise NotImplementedError(f'{solver_type} is not implemented for {self.__class__}') if algorithm_type not in ['dpmsolver++', 'sde-dpmsolver++'] and final_sigmas_type == 'zero': raise ValueError(f'`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please chooose `sigma_min` instead.') self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() self.timesteps = torch.from_numpy(timesteps) self.model_outputs = [None] * solver_order self.sample = None self.order_list = self.get_order_list(num_train_timesteps) self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def get_order_list(self, num_inference_steps: int) -> List[int]: steps = num_inference_steps order = self.config.solver_order if order > 3: raise ValueError('Order > 3 is not supported by this scheduler') if self.config.lower_order_final: if order == 3: if steps % 3 == 0: orders = [1, 2, 3] * (steps // 3 - 1) + [1, 2] + [1] elif steps % 3 == 1: orders = [1, 2, 3] * (steps // 3) + [1] else: orders = [1, 2, 3] * (steps // 3) + [1, 2] elif order == 2: if steps % 2 == 0: orders = [1, 2] * (steps // 2 - 1) + [1, 1] else: orders = [1, 2] * (steps // 2) + [1] elif order == 1: orders = [1] * steps elif order == 3: orders = [1, 2, 3] * (steps // 3) elif order == 2: orders = [1, 2] * (steps // 2) elif order == 1: orders = [1] * steps return orders @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def set_timesteps(self, num_inference_steps: int=None, device: Union[str, torch.device]=None, timesteps: Optional[List[int]]=None): if num_inference_steps is None and timesteps is None: raise ValueError('Must pass exactly one of `num_inference_steps` or `timesteps`.') if num_inference_steps is not None and timesteps is not None: raise ValueError('Must pass exactly one of `num_inference_steps` or `timesteps`.') if timesteps is not None and self.config.use_karras_sigmas: raise ValueError('Cannot use `timesteps` when `config.use_karras_sigmas=True`.') num_inference_steps = num_inference_steps or len(timesteps) self.num_inference_steps = num_inference_steps if timesteps is not None: timesteps = np.array(timesteps).astype(np.int64) else: clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) timesteps = np.linspace(0, self.config.num_train_timesteps - 1 - clipped_idx, num_inference_steps + 1).round()[::-1][:-1].copy().astype(np.int64) sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) if self.config.use_karras_sigmas: log_sigmas = np.log(sigmas) sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() else: sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) if self.config.final_sigmas_type == 'sigma_min': sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 elif self.config.final_sigmas_type == 'zero': sigma_last = 0 else: raise ValueError(f' `final_sigmas_type` must be one of `sigma_min` or `zero`, but got {self.config.final_sigmas_type}') sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas).to(device=device) self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.model_outputs = [None] * self.config.solver_order self.sample = None if not self.config.lower_order_final and num_inference_steps % self.config.solver_order != 0: logger.warning('Changing scheduler {self.config} to have `lower_order_final` set to True to handle uneven amount of inference steps. Please make sure to always use an even number of `num_inference steps when using `lower_order_final=False`.') self.register_to_config(lower_order_final=True) if not self.config.lower_order_final and self.config.final_sigmas_type == 'zero': logger.warning(" `last_sigmas_type='zero'` is not supported for `lower_order_final=False`. Changing scheduler {self.config} to have `lower_order_final` set to True.") self.register_to_config(lower_order_final=True) self.order_list = self.get_order_list(num_inference_steps) self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: dtype = sample.dtype (batch_size, channels, *remaining_dims) = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp(s, min=1, max=self.config.sample_max_value) s = s.unsqueeze(1) sample = torch.clamp(sample, -s, s) / s sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = 1 / (sigma ** 2 + 1) ** 0.5 sigma_t = sigma * alpha_t return (alpha_t, sigma_t) def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: if hasattr(self.config, 'sigma_min'): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, 'sigma_max'): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def convert_model_output(self, model_output: torch.Tensor, *args, sample: torch.Tensor=None, **kwargs) -> torch.Tensor: timestep = args[0] if len(args) > 0 else kwargs.pop('timestep', None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError('missing `sample` as a required keyward argument') if timestep is not None: deprecate('timesteps', '1.0.0', 'Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if self.config.algorithm_type in ['dpmsolver++', 'sde-dpmsolver++']: if self.config.prediction_type == 'epsilon': if self.config.variance_type in ['learned', 'learned_range']: model_output = model_output[:, :3] sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == 'sample': x0_pred = model_output elif self.config.prediction_type == 'v_prediction': sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) x0_pred = alpha_t * sample - sigma_t * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction` for the DPMSolverSinglestepScheduler.') if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) return x0_pred elif self.config.algorithm_type == 'dpmsolver': if self.config.prediction_type == 'epsilon': if self.config.variance_type in ['learned', 'learned_range']: epsilon = model_output[:, :3] else: epsilon = model_output elif self.config.prediction_type == 'sample': sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) epsilon = (sample - alpha_t * model_output) / sigma_t elif self.config.prediction_type == 'v_prediction': sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) epsilon = alpha_t * model_output + sigma_t * sample else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction` for the DPMSolverSinglestepScheduler.') if self.config.thresholding: (alpha_t, sigma_t) = (self.alpha_t[timestep], self.sigma_t[timestep]) x0_pred = (sample - sigma_t * epsilon) / alpha_t x0_pred = self._threshold_sample(x0_pred) epsilon = (sample - alpha_t * x0_pred) / sigma_t return epsilon def dpm_solver_first_order_update(self, model_output: torch.Tensor, *args, sample: torch.Tensor=None, noise: Optional[torch.Tensor]=None, **kwargs) -> torch.Tensor: timestep = args[0] if len(args) > 0 else kwargs.pop('timestep', None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(' missing `sample` as a required keyward argument') if timestep is not None: deprecate('timesteps', '1.0.0', 'Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') (sigma_t, sigma_s) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s, sigma_s) = self._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s if self.config.algorithm_type == 'dpmsolver++': x_t = sigma_t / sigma_s * sample - alpha_t * (torch.exp(-h) - 1.0) * model_output elif self.config.algorithm_type == 'dpmsolver': x_t = alpha_t / alpha_s * sample - sigma_t * (torch.exp(h) - 1.0) * model_output elif self.config.algorithm_type == 'sde-dpmsolver++': assert noise is not None x_t = sigma_t / sigma_s * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * model_output + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise return x_t def singlestep_dpm_solver_second_order_update(self, model_output_list: List[torch.Tensor], *args, sample: torch.Tensor=None, noise: Optional[torch.Tensor]=None, **kwargs) -> torch.Tensor: timestep_list = args[0] if len(args) > 0 else kwargs.pop('timestep_list', None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(' missing `sample` as a required keyward argument') if timestep_list is not None: deprecate('timestep_list', '1.0.0', 'Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') (sigma_t, sigma_s0, sigma_s1) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) (alpha_s1, sigma_s1) = self._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) (m0, m1) = (model_output_list[-1], model_output_list[-2]) (h, h_0) = (lambda_t - lambda_s1, lambda_s0 - lambda_s1) r0 = h_0 / h (D0, D1) = (m1, 1.0 / r0 * (m0 - m1)) if self.config.algorithm_type == 'dpmsolver++': if self.config.solver_type == 'midpoint': x_t = sigma_t / sigma_s1 * sample - alpha_t * (torch.exp(-h) - 1.0) * D0 - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 elif self.config.solver_type == 'heun': x_t = sigma_t / sigma_s1 * sample - alpha_t * (torch.exp(-h) - 1.0) * D0 + alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0) * D1 elif self.config.algorithm_type == 'dpmsolver': if self.config.solver_type == 'midpoint': x_t = alpha_t / alpha_s1 * sample - sigma_t * (torch.exp(h) - 1.0) * D0 - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1 elif self.config.solver_type == 'heun': x_t = alpha_t / alpha_s1 * sample - sigma_t * (torch.exp(h) - 1.0) * D0 - sigma_t * ((torch.exp(h) - 1.0) / h - 1.0) * D1 elif self.config.algorithm_type == 'sde-dpmsolver++': assert noise is not None if self.config.solver_type == 'midpoint': x_t = sigma_t / sigma_s1 * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise elif self.config.solver_type == 'heun': x_t = sigma_t / sigma_s1 * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * D0 + alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise return x_t def singlestep_dpm_solver_third_order_update(self, model_output_list: List[torch.Tensor], *args, sample: torch.Tensor=None, **kwargs) -> torch.Tensor: timestep_list = args[0] if len(args) > 0 else kwargs.pop('timestep_list', None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(' missing`sample` as a required keyward argument') if timestep_list is not None: deprecate('timestep_list', '1.0.0', 'Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') (sigma_t, sigma_s0, sigma_s1, sigma_s2) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1], self.sigmas[self.step_index - 2]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) (alpha_s1, sigma_s1) = self._sigma_to_alpha_sigma_t(sigma_s1) (alpha_s2, sigma_s2) = self._sigma_to_alpha_sigma_t(sigma_s2) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) (m0, m1, m2) = (model_output_list[-1], model_output_list[-2], model_output_list[-3]) (h, h_0, h_1) = (lambda_t - lambda_s2, lambda_s0 - lambda_s2, lambda_s1 - lambda_s2) (r0, r1) = (h_0 / h, h_1 / h) D0 = m2 (D1_0, D1_1) = (1.0 / r1 * (m1 - m2), 1.0 / r0 * (m0 - m2)) D1 = (r0 * D1_0 - r1 * D1_1) / (r0 - r1) D2 = 2.0 * (D1_1 - D1_0) / (r0 - r1) if self.config.algorithm_type == 'dpmsolver++': if self.config.solver_type == 'midpoint': x_t = sigma_t / sigma_s2 * sample - alpha_t * (torch.exp(-h) - 1.0) * D0 + alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0) * D1_1 elif self.config.solver_type == 'heun': x_t = sigma_t / sigma_s2 * sample - alpha_t * (torch.exp(-h) - 1.0) * D0 + alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0) * D1 - alpha_t * ((torch.exp(-h) - 1.0 + h) / h ** 2 - 0.5) * D2 elif self.config.algorithm_type == 'dpmsolver': if self.config.solver_type == 'midpoint': x_t = alpha_t / alpha_s2 * sample - sigma_t * (torch.exp(h) - 1.0) * D0 - sigma_t * ((torch.exp(h) - 1.0) / h - 1.0) * D1_1 elif self.config.solver_type == 'heun': x_t = alpha_t / alpha_s2 * sample - sigma_t * (torch.exp(h) - 1.0) * D0 - sigma_t * ((torch.exp(h) - 1.0) / h - 1.0) * D1 - sigma_t * ((torch.exp(h) - 1.0 - h) / h ** 2 - 0.5) * D2 return x_t def singlestep_dpm_solver_update(self, model_output_list: List[torch.Tensor], *args, sample: torch.Tensor=None, order: int=None, noise: Optional[torch.Tensor]=None, **kwargs) -> torch.Tensor: timestep_list = args[0] if len(args) > 0 else kwargs.pop('timestep_list', None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(' missing`sample` as a required keyward argument') if order is None: if len(args) > 3: order = args[3] else: raise ValueError(' missing `order` as a required keyward argument') if timestep_list is not None: deprecate('timestep_list', '1.0.0', 'Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') if order == 1: return self.dpm_solver_first_order_update(model_output_list[-1], sample=sample, noise=noise) elif order == 2: return self.singlestep_dpm_solver_second_order_update(model_output_list, sample=sample, noise=noise) elif order == 3: return self.singlestep_dpm_solver_third_order_update(model_output_list, sample=sample) else: raise ValueError(f'Order must be 1, 2, 3, got {order}') def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps index_candidates = (schedule_timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() return step_index def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: torch.Tensor, timestep: Union[int, torch.Tensor], sample: torch.Tensor, generator=None, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") if self.step_index is None: self._init_step_index(timestep) model_output = self.convert_model_output(model_output, sample=sample) for i in range(self.config.solver_order - 1): self.model_outputs[i] = self.model_outputs[i + 1] self.model_outputs[-1] = model_output if self.config.algorithm_type == 'sde-dpmsolver++': noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype) else: noise = None order = self.order_list[self.step_index] while self.model_outputs[-order] is None: order -= 1 if order == 1: self.sample = sample prev_sample = self.singlestep_dpm_solver_update(self.model_outputs, sample=self.sample, order=order, noise=noise) self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: return sample def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) noisy_samples = alpha_t * original_samples + sigma_t * noise return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_edm_dpmsolver_multistep.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput class EDMDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): _compatibles = [] order = 1 @register_to_config def __init__(self, sigma_min: float=0.002, sigma_max: float=80.0, sigma_data: float=0.5, sigma_schedule: str='karras', num_train_timesteps: int=1000, prediction_type: str='epsilon', rho: float=7.0, solver_order: int=2, thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, sample_max_value: float=1.0, algorithm_type: str='dpmsolver++', solver_type: str='midpoint', lower_order_final: bool=True, euler_at_final: bool=False, final_sigmas_type: Optional[str]='zero'): if algorithm_type not in ['dpmsolver++', 'sde-dpmsolver++']: if algorithm_type == 'deis': self.register_to_config(algorithm_type='dpmsolver++') else: raise NotImplementedError(f'{algorithm_type} is not implemented for {self.__class__}') if solver_type not in ['midpoint', 'heun']: if solver_type in ['logrho', 'bh1', 'bh2']: self.register_to_config(solver_type='midpoint') else: raise NotImplementedError(f'{solver_type} is not implemented for {self.__class__}') if algorithm_type not in ['dpmsolver++', 'sde-dpmsolver++'] and final_sigmas_type == 'zero': raise ValueError(f'`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please choose `sigma_min` instead.') ramp = torch.linspace(0, 1, num_train_timesteps) if sigma_schedule == 'karras': sigmas = self._compute_karras_sigmas(ramp) elif sigma_schedule == 'exponential': sigmas = self._compute_exponential_sigmas(ramp) self.timesteps = self.precondition_noise(sigmas) self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) self.num_inference_steps = None self.model_outputs = [None] * solver_order self.lower_order_nums = 0 self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def init_noise_sigma(self): return (self.config.sigma_max ** 2 + 1) ** 0.5 @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def precondition_inputs(self, sample, sigma): c_in = 1 / (sigma ** 2 + self.config.sigma_data ** 2) ** 0.5 scaled_sample = sample * c_in return scaled_sample def precondition_noise(self, sigma): if not isinstance(sigma, torch.Tensor): sigma = torch.tensor([sigma]) c_noise = 0.25 * torch.log(sigma) return c_noise def precondition_outputs(self, sample, model_output, sigma): sigma_data = self.config.sigma_data c_skip = sigma_data ** 2 / (sigma ** 2 + sigma_data ** 2) if self.config.prediction_type == 'epsilon': c_out = sigma * sigma_data / (sigma ** 2 + sigma_data ** 2) ** 0.5 elif self.config.prediction_type == 'v_prediction': c_out = -sigma * sigma_data / (sigma ** 2 + sigma_data ** 2) ** 0.5 else: raise ValueError(f'Prediction type {self.config.prediction_type} is not supported.') denoised = c_skip * sample + c_out * model_output return denoised def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] sample = self.precondition_inputs(sample, sigma) self.is_scale_input_called = True return sample def set_timesteps(self, num_inference_steps: int=None, device: Union[str, torch.device]=None): self.num_inference_steps = num_inference_steps ramp = torch.linspace(0, 1, self.num_inference_steps) if self.config.sigma_schedule == 'karras': sigmas = self._compute_karras_sigmas(ramp) elif self.config.sigma_schedule == 'exponential': sigmas = self._compute_exponential_sigmas(ramp) sigmas = sigmas.to(dtype=torch.float32, device=device) self.timesteps = self.precondition_noise(sigmas) if self.config.final_sigmas_type == 'sigma_min': sigma_last = self.config.sigma_min elif self.config.final_sigmas_type == 'zero': sigma_last = 0 else: raise ValueError(f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}") self.sigmas = torch.cat([sigmas, torch.tensor([sigma_last], dtype=torch.float32, device=device)]) self.model_outputs = [None] * self.config.solver_order self.lower_order_nums = 0 self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def _compute_karras_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: sigma_min = sigma_min or self.config.sigma_min sigma_max = sigma_max or self.config.sigma_max rho = self.config.rho min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def _compute_exponential_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: sigma_min = sigma_min or self.config.sigma_min sigma_max = sigma_max or self.config.sigma_max sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), len(ramp)).exp().flip(0) return sigmas def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: dtype = sample.dtype (batch_size, channels, *remaining_dims) = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp(s, min=1, max=self.config.sample_max_value) s = s.unsqueeze(1) sample = torch.clamp(sample, -s, s) / s sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = torch.tensor(1) sigma_t = sigma return (alpha_t, sigma_t) def convert_model_output(self, model_output: torch.Tensor, sample: torch.Tensor=None) -> torch.Tensor: sigma = self.sigmas[self.step_index] x0_pred = self.precondition_outputs(sample, model_output, sigma) if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) return x0_pred def dpm_solver_first_order_update(self, model_output: torch.Tensor, sample: torch.Tensor=None, noise: Optional[torch.Tensor]=None) -> torch.Tensor: (sigma_t, sigma_s) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s, sigma_s) = self._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s if self.config.algorithm_type == 'dpmsolver++': x_t = sigma_t / sigma_s * sample - alpha_t * (torch.exp(-h) - 1.0) * model_output elif self.config.algorithm_type == 'sde-dpmsolver++': assert noise is not None x_t = sigma_t / sigma_s * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * model_output + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise return x_t def multistep_dpm_solver_second_order_update(self, model_output_list: List[torch.Tensor], sample: torch.Tensor=None, noise: Optional[torch.Tensor]=None) -> torch.Tensor: (sigma_t, sigma_s0, sigma_s1) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) (alpha_s1, sigma_s1) = self._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) (m0, m1) = (model_output_list[-1], model_output_list[-2]) (h, h_0) = (lambda_t - lambda_s0, lambda_s0 - lambda_s1) r0 = h_0 / h (D0, D1) = (m0, 1.0 / r0 * (m0 - m1)) if self.config.algorithm_type == 'dpmsolver++': if self.config.solver_type == 'midpoint': x_t = sigma_t / sigma_s0 * sample - alpha_t * (torch.exp(-h) - 1.0) * D0 - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 elif self.config.solver_type == 'heun': x_t = sigma_t / sigma_s0 * sample - alpha_t * (torch.exp(-h) - 1.0) * D0 + alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0) * D1 elif self.config.algorithm_type == 'sde-dpmsolver++': assert noise is not None if self.config.solver_type == 'midpoint': x_t = sigma_t / sigma_s0 * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise elif self.config.solver_type == 'heun': x_t = sigma_t / sigma_s0 * torch.exp(-h) * sample + alpha_t * (1 - torch.exp(-2.0 * h)) * D0 + alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise return x_t def multistep_dpm_solver_third_order_update(self, model_output_list: List[torch.Tensor], sample: torch.Tensor=None) -> torch.Tensor: (sigma_t, sigma_s0, sigma_s1, sigma_s2) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1], self.sigmas[self.step_index - 2]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) (alpha_s1, sigma_s1) = self._sigma_to_alpha_sigma_t(sigma_s1) (alpha_s2, sigma_s2) = self._sigma_to_alpha_sigma_t(sigma_s2) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) (m0, m1, m2) = (model_output_list[-1], model_output_list[-2], model_output_list[-3]) (h, h_0, h_1) = (lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2) (r0, r1) = (h_0 / h, h_1 / h) D0 = m0 (D1_0, D1_1) = (1.0 / r0 * (m0 - m1), 1.0 / r1 * (m1 - m2)) D1 = D1_0 + r0 / (r0 + r1) * (D1_0 - D1_1) D2 = 1.0 / (r0 + r1) * (D1_0 - D1_1) if self.config.algorithm_type == 'dpmsolver++': x_t = sigma_t / sigma_s0 * sample - alpha_t * (torch.exp(-h) - 1.0) * D0 + alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0) * D1 - alpha_t * ((torch.exp(-h) - 1.0 + h) / h ** 2 - 0.5) * D2 return x_t def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps index_candidates = (schedule_timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() return step_index def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: torch.Tensor, timestep: Union[int, torch.Tensor], sample: torch.Tensor, generator=None, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") if self.step_index is None: self._init_step_index(timestep) lower_order_final = self.step_index == len(self.timesteps) - 1 and (self.config.euler_at_final or (self.config.lower_order_final and len(self.timesteps) < 15) or self.config.final_sigmas_type == 'zero') lower_order_second = self.step_index == len(self.timesteps) - 2 and self.config.lower_order_final and (len(self.timesteps) < 15) model_output = self.convert_model_output(model_output, sample=sample) for i in range(self.config.solver_order - 1): self.model_outputs[i] = self.model_outputs[i + 1] self.model_outputs[-1] = model_output if self.config.algorithm_type == 'sde-dpmsolver++': noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype) else: noise = None if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) else: prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample) if self.lower_order_nums < self.config.solver_order: self.lower_order_nums += 1 self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_edm_euler.py import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, logging from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin logger = logging.get_logger(__name__) @dataclass class EDMEulerSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None class EDMEulerScheduler(SchedulerMixin, ConfigMixin): _compatibles = [] order = 1 @register_to_config def __init__(self, sigma_min: float=0.002, sigma_max: float=80.0, sigma_data: float=0.5, sigma_schedule: str='karras', num_train_timesteps: int=1000, prediction_type: str='epsilon', rho: float=7.0): if sigma_schedule not in ['karras', 'exponential']: raise ValueError(f'Wrong value for provided for `sigma_schedule={sigma_schedule!r}`.`') self.num_inference_steps = None ramp = torch.linspace(0, 1, num_train_timesteps) if sigma_schedule == 'karras': sigmas = self._compute_karras_sigmas(ramp) elif sigma_schedule == 'exponential': sigmas = self._compute_exponential_sigmas(ramp) self.timesteps = self.precondition_noise(sigmas) self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) self.is_scale_input_called = False self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def init_noise_sigma(self): return (self.config.sigma_max ** 2 + 1) ** 0.5 @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def precondition_inputs(self, sample, sigma): c_in = 1 / (sigma ** 2 + self.config.sigma_data ** 2) ** 0.5 scaled_sample = sample * c_in return scaled_sample def precondition_noise(self, sigma): if not isinstance(sigma, torch.Tensor): sigma = torch.tensor([sigma]) c_noise = 0.25 * torch.log(sigma) return c_noise def precondition_outputs(self, sample, model_output, sigma): sigma_data = self.config.sigma_data c_skip = sigma_data ** 2 / (sigma ** 2 + sigma_data ** 2) if self.config.prediction_type == 'epsilon': c_out = sigma * sigma_data / (sigma ** 2 + sigma_data ** 2) ** 0.5 elif self.config.prediction_type == 'v_prediction': c_out = -sigma * sigma_data / (sigma ** 2 + sigma_data ** 2) ** 0.5 else: raise ValueError(f'Prediction type {self.config.prediction_type} is not supported.') denoised = c_skip * sample + c_out * model_output return denoised def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] sample = self.precondition_inputs(sample, sigma) self.is_scale_input_called = True return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): self.num_inference_steps = num_inference_steps ramp = torch.linspace(0, 1, self.num_inference_steps) if self.config.sigma_schedule == 'karras': sigmas = self._compute_karras_sigmas(ramp) elif self.config.sigma_schedule == 'exponential': sigmas = self._compute_exponential_sigmas(ramp) sigmas = sigmas.to(dtype=torch.float32, device=device) self.timesteps = self.precondition_noise(sigmas) self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def _compute_karras_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: sigma_min = sigma_min or self.config.sigma_min sigma_max = sigma_max or self.config.sigma_max rho = self.config.rho min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def _compute_exponential_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: sigma_min = sigma_min or self.config.sigma_min sigma_max = sigma_max or self.config.sigma_max sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), len(ramp)).exp().flip(0) return sigmas def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: torch.Tensor, timestep: Union[float, torch.Tensor], sample: torch.Tensor, s_churn: float=0.0, s_tmin: float=0.0, s_tmax: float=float('inf'), s_noise: float=1.0, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[EDMEulerSchedulerOutput, Tuple]: if isinstance(timestep, (int, torch.IntTensor, torch.LongTensor)): raise ValueError('Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to `EDMEulerScheduler.step()` is not supported. Make sure to pass one of the `scheduler.timesteps` as a timestep.') if not self.is_scale_input_called: logger.warning('The `scale_model_input` function should be called before `step` to ensure correct denoising. See `StableDiffusionPipeline` for a usage example.') if self.step_index is None: self._init_step_index(timestep) sample = sample.to(torch.float32) sigma = self.sigmas[self.step_index] gamma = min(s_churn / (len(self.sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigma <= s_tmax else 0.0 noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator) eps = noise * s_noise sigma_hat = sigma * (gamma + 1) if gamma > 0: sample = sample + eps * (sigma_hat ** 2 - sigma ** 2) ** 0.5 pred_original_sample = self.precondition_outputs(sample, model_output, sigma_hat) derivative = (sample - pred_original_sample) / sigma_hat dt = self.sigmas[self.step_index + 1] - sigma_hat prev_sample = sample + derivative * dt prev_sample = prev_sample.to(model_output.dtype) self._step_index += 1 if not return_dict: return (prev_sample,) return EDMEulerSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, logging from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin logger = logging.get_logger(__name__) @dataclass class EulerAncestralDiscreteSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(betas): alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() alphas_bar_sqrt -= alphas_bar_sqrt_T alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, prediction_type: str='epsilon', timestep_spacing: str='linspace', steps_offset: int=0, rescale_betas_zero_snr: bool=False): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) if rescale_betas_zero_snr: self.alphas_cumprod[-1] = 2 ** (-24) sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas) self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy() self.timesteps = torch.from_numpy(timesteps) self.is_scale_input_called = False self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def init_noise_sigma(self): if self.config.timestep_spacing in ['linspace', 'trailing']: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] sample = sample / (sigma ** 2 + 1) ** 0.5 self.is_scale_input_called = True return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): self.num_inference_steps = num_inference_steps if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy() elif self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / self.num_inference_steps timesteps = np.arange(self.config.num_train_timesteps, 0, -step_ratio).round().copy().astype(np.float32) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas).to(device=device) self.timesteps = torch.from_numpy(timesteps).to(device=device) self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: torch.Tensor, timestep: Union[float, torch.Tensor], sample: torch.Tensor, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[EulerAncestralDiscreteSchedulerOutput, Tuple]: if isinstance(timestep, (int, torch.IntTensor, torch.LongTensor)): raise ValueError('Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to `EulerDiscreteScheduler.step()` is not supported. Make sure to pass one of the `scheduler.timesteps` as a timestep.') if not self.is_scale_input_called: logger.warning('The `scale_model_input` function should be called before `step` to ensure correct denoising. See `StableDiffusionPipeline` for a usage example.') if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] sample = sample.to(torch.float32) if self.config.prediction_type == 'epsilon': pred_original_sample = sample - sigma * model_output elif self.config.prediction_type == 'v_prediction': pred_original_sample = model_output * (-sigma / (sigma ** 2 + 1) ** 0.5) + sample / (sigma ** 2 + 1) elif self.config.prediction_type == 'sample': raise NotImplementedError('prediction_type not implemented yet: sample') else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`') sigma_from = self.sigmas[self.step_index] sigma_to = self.sigmas[self.step_index + 1] sigma_up = (sigma_to ** 2 * (sigma_from ** 2 - sigma_to ** 2) / sigma_from ** 2) ** 0.5 sigma_down = (sigma_to ** 2 - sigma_up ** 2) ** 0.5 derivative = (sample - pred_original_sample) / sigma dt = sigma_down - sigma prev_sample = sample + derivative * dt device = model_output.device noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=device, generator=generator) prev_sample = prev_sample + noise * sigma_up prev_sample = prev_sample.to(model_output.dtype) self._step_index += 1 if not return_dict: return (prev_sample,) return EulerAncestralDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_euler_discrete.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, logging from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin logger = logging.get_logger(__name__) @dataclass class EulerDiscreteSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(betas): alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() alphas_bar_sqrt -= alphas_bar_sqrt_T alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class EulerDiscreteScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, prediction_type: str='epsilon', interpolation_type: str='linear', use_karras_sigmas: Optional[bool]=False, sigma_min: Optional[float]=None, sigma_max: Optional[float]=None, timestep_spacing: str='linspace', timestep_type: str='discrete', steps_offset: int=0, rescale_betas_zero_snr: bool=False, final_sigmas_type: str='zero'): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) if rescale_betas_zero_snr: self.alphas_cumprod[-1] = 2 ** (-24) sigmas = (((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5).flip(0) timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy() timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32) self.num_inference_steps = None if timestep_type == 'continuous' and prediction_type == 'v_prediction': self.timesteps = torch.Tensor([0.25 * sigma.log() for sigma in sigmas]) else: self.timesteps = timesteps self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) self.is_scale_input_called = False self.use_karras_sigmas = use_karras_sigmas self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def init_noise_sigma(self): max_sigma = max(self.sigmas) if isinstance(self.sigmas, list) else self.sigmas.max() if self.config.timestep_spacing in ['linspace', 'trailing']: return max_sigma return (max_sigma ** 2 + 1) ** 0.5 @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] sample = sample / (sigma ** 2 + 1) ** 0.5 self.is_scale_input_called = True return sample def set_timesteps(self, num_inference_steps: int=None, device: Union[str, torch.device]=None, timesteps: Optional[List[int]]=None, sigmas: Optional[List[float]]=None): if timesteps is not None and sigmas is not None: raise ValueError('Only one of `timesteps` or `sigmas` should be set.') if num_inference_steps is None and timesteps is None and (sigmas is None): raise ValueError('Must pass exactly one of `num_inference_steps` or `timesteps` or `sigmas.') if num_inference_steps is not None and (timesteps is not None or sigmas is not None): raise ValueError('Can only pass one of `num_inference_steps` or `timesteps` or `sigmas`.') if timesteps is not None and self.config.use_karras_sigmas: raise ValueError('Cannot set `timesteps` with `config.use_karras_sigmas = True`.') if timesteps is not None and self.config.timestep_type == 'continuous' and (self.config.prediction_type == 'v_prediction'): raise ValueError("Cannot set `timesteps` with `config.timestep_type = 'continuous'` and `config.prediction_type = 'v_prediction'`.") if num_inference_steps is None: num_inference_steps = len(timesteps) if timesteps is not None else len(sigmas) - 1 self.num_inference_steps = num_inference_steps if sigmas is not None: log_sigmas = np.log(np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)) sigmas = np.array(sigmas).astype(np.float32) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas[:-1]]) else: if timesteps is not None: timesteps = np.array(timesteps).astype(np.float32) elif self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy() elif self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / self.num_inference_steps timesteps = np.arange(self.config.num_train_timesteps, 0, -step_ratio).round().copy().astype(np.float32) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) if self.config.interpolation_type == 'linear': sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) elif self.config.interpolation_type == 'log_linear': sigmas = torch.linspace(np.log(sigmas[-1]), np.log(sigmas[0]), num_inference_steps + 1).exp().numpy() else: raise ValueError(f"{self.config.interpolation_type} is not implemented. Please specify interpolation_type to either 'linear' or 'log_linear'") if self.config.use_karras_sigmas: sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) if self.config.final_sigmas_type == 'sigma_min': sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 elif self.config.final_sigmas_type == 'zero': sigma_last = 0 else: raise ValueError(f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}") sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device) if self.config.timestep_type == 'continuous' and self.config.prediction_type == 'v_prediction': self.timesteps = torch.Tensor([0.25 * sigma.log() for sigma in sigmas[:-1]]).to(device=device) else: self.timesteps = torch.from_numpy(timesteps.astype(np.float32)).to(device=device) self._step_index = None self._begin_index = None self.sigmas = sigmas.to('cpu') def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: if hasattr(self.config, 'sigma_min'): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, 'sigma_max'): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: torch.Tensor, timestep: Union[float, torch.Tensor], sample: torch.Tensor, s_churn: float=0.0, s_tmin: float=0.0, s_tmax: float=float('inf'), s_noise: float=1.0, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[EulerDiscreteSchedulerOutput, Tuple]: if isinstance(timestep, (int, torch.IntTensor, torch.LongTensor)): raise ValueError('Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to `EulerDiscreteScheduler.step()` is not supported. Make sure to pass one of the `scheduler.timesteps` as a timestep.') if not self.is_scale_input_called: logger.warning('The `scale_model_input` function should be called before `step` to ensure correct denoising. See `StableDiffusionPipeline` for a usage example.') if self.step_index is None: self._init_step_index(timestep) sample = sample.to(torch.float32) sigma = self.sigmas[self.step_index] gamma = min(s_churn / (len(self.sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigma <= s_tmax else 0.0 noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator) eps = noise * s_noise sigma_hat = sigma * (gamma + 1) if gamma > 0: sample = sample + eps * (sigma_hat ** 2 - sigma ** 2) ** 0.5 if self.config.prediction_type == 'original_sample' or self.config.prediction_type == 'sample': pred_original_sample = model_output elif self.config.prediction_type == 'epsilon': pred_original_sample = sample - sigma_hat * model_output elif self.config.prediction_type == 'v_prediction': pred_original_sample = model_output * (-sigma / (sigma ** 2 + 1) ** 0.5) + sample / (sigma ** 2 + 1) else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`') derivative = (sample - pred_original_sample) / sigma_hat dt = self.sigmas[self.step_index + 1] - sigma_hat prev_sample = sample + derivative * dt prev_sample = prev_sample.to(model_output.dtype) self._step_index += 1 if not return_dict: return (prev_sample,) return EulerDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) noisy_samples = original_samples + noise * sigma return noisy_samples def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: if isinstance(timesteps, int) or isinstance(timesteps, torch.IntTensor) or isinstance(timesteps, torch.LongTensor): raise ValueError('Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to `EulerDiscreteScheduler.get_velocity()` is not supported. Make sure to pass one of the `scheduler.timesteps` as a timestep.') if sample.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(sample.device, dtype=torch.float32) timesteps = timesteps.to(sample.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(sample.device) timesteps = timesteps.to(sample.device) step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] alphas_cumprod = self.alphas_cumprod.to(sample) sqrt_alpha_prod = alphas_cumprod[step_indices] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[step_indices]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_euler_discrete_flax.py from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left @flax.struct.dataclass class EulerDiscreteSchedulerState: common: CommonSchedulerState init_noise_sigma: jnp.ndarray timesteps: jnp.ndarray sigmas: jnp.ndarray num_inference_steps: Optional[int] = None @classmethod def create(cls, common: CommonSchedulerState, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray, sigmas: jnp.ndarray): return cls(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas) @dataclass class FlaxEulerDiscreteSchedulerOutput(FlaxSchedulerOutput): state: EulerDiscreteSchedulerState class FlaxEulerDiscreteScheduler(FlaxSchedulerMixin, ConfigMixin): _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] dtype: jnp.dtype @property def has_state(self): return True @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[jnp.ndarray]=None, prediction_type: str='epsilon', timestep_spacing: str='linspace', dtype: jnp.dtype=jnp.float32): self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState]=None) -> EulerDiscreteSchedulerState: if common is None: common = CommonSchedulerState.create(self) timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] sigmas = ((1 - common.alphas_cumprod) / common.alphas_cumprod) ** 0.5 sigmas = jnp.interp(timesteps, jnp.arange(0, len(sigmas)), sigmas) sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)]) if self.config.timestep_spacing in ['linspace', 'trailing']: init_noise_sigma = sigmas.max() else: init_noise_sigma = (sigmas.max() ** 2 + 1) ** 0.5 return EulerDiscreteSchedulerState.create(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas) def scale_model_input(self, state: EulerDiscreteSchedulerState, sample: jnp.ndarray, timestep: int) -> jnp.ndarray: (step_index,) = jnp.where(state.timesteps == timestep, size=1) step_index = step_index[0] sigma = state.sigmas[step_index] sample = sample / (sigma ** 2 + 1) ** 0.5 return sample def set_timesteps(self, state: EulerDiscreteSchedulerState, num_inference_steps: int, shape: Tuple=()) -> EulerDiscreteSchedulerState: if self.config.timestep_spacing == 'linspace': timesteps = jnp.linspace(self.config.num_train_timesteps - 1, 0, num_inference_steps, dtype=self.dtype) elif self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // num_inference_steps timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(float) timesteps += 1 else: raise ValueError(f"timestep_spacing must be one of ['linspace', 'leading'], got {self.config.timestep_spacing}") sigmas = ((1 - state.common.alphas_cumprod) / state.common.alphas_cumprod) ** 0.5 sigmas = jnp.interp(timesteps, jnp.arange(0, len(sigmas)), sigmas) sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)]) if self.config.timestep_spacing in ['linspace', 'trailing']: init_noise_sigma = sigmas.max() else: init_noise_sigma = (sigmas.max() ** 2 + 1) ** 0.5 return state.replace(timesteps=timesteps, sigmas=sigmas, num_inference_steps=num_inference_steps, init_noise_sigma=init_noise_sigma) def step(self, state: EulerDiscreteSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, return_dict: bool=True) -> Union[FlaxEulerDiscreteSchedulerOutput, Tuple]: if state.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") (step_index,) = jnp.where(state.timesteps == timestep, size=1) step_index = step_index[0] sigma = state.sigmas[step_index] if self.config.prediction_type == 'epsilon': pred_original_sample = sample - sigma * model_output elif self.config.prediction_type == 'v_prediction': pred_original_sample = model_output * (-sigma / (sigma ** 2 + 1) ** 0.5) + sample / (sigma ** 2 + 1) else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`') derivative = (sample - pred_original_sample) / sigma dt = state.sigmas[step_index + 1] - sigma prev_sample = sample + derivative * dt if not return_dict: return (prev_sample, state) return FlaxEulerDiscreteSchedulerOutput(prev_sample=prev_sample, state=state) def add_noise(self, state: EulerDiscreteSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray) -> jnp.ndarray: sigma = state.sigmas[timesteps].flatten() sigma = broadcast_to_shape_from_left(sigma, noise.shape) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_flow_match_euler_discrete.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, logging from .scheduling_utils import SchedulerMixin logger = logging.get_logger(__name__) @dataclass class FlowMatchEulerDiscreteSchedulerOutput(BaseOutput): prev_sample: torch.FloatTensor class FlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin): _compatibles = [] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, shift: float=1.0, use_dynamic_shifting=False, base_shift: Optional[float]=0.5, max_shift: Optional[float]=1.15, base_image_seq_len: Optional[int]=256, max_image_seq_len: Optional[int]=4096): timesteps = np.linspace(1, num_train_timesteps, num_train_timesteps, dtype=np.float32)[::-1].copy() timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32) sigmas = timesteps / num_train_timesteps if not use_dynamic_shifting: sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) self.timesteps = sigmas * num_train_timesteps self._step_index = None self._begin_index = None self.sigmas = sigmas.to('cpu') self.sigma_min = self.sigmas[-1].item() self.sigma_max = self.sigmas[0].item() @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def scale_noise(self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor], noise: Optional[torch.FloatTensor]=None) -> torch.FloatTensor: sigmas = self.sigmas.to(device=sample.device, dtype=sample.dtype) if sample.device.type == 'mps' and torch.is_floating_point(timestep): schedule_timesteps = self.timesteps.to(sample.device, dtype=torch.float32) timestep = timestep.to(sample.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(sample.device) timestep = timestep.to(sample.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timestep] elif self.step_index is not None: step_indices = [self.step_index] * timestep.shape[0] else: step_indices = [self.begin_index] * timestep.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(sample.shape): sigma = sigma.unsqueeze(-1) sample = sigma * noise + (1.0 - sigma) * sample return sample def _sigma_to_t(self, sigma): return sigma * self.config.num_train_timesteps def time_shift(self, mu: float, sigma: float, t: torch.Tensor): return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma) def set_timesteps(self, num_inference_steps: int=None, device: Union[str, torch.device]=None, sigmas: Optional[List[float]]=None, mu: Optional[float]=None): if self.config.use_dynamic_shifting and mu is None: raise ValueError(' you have a pass a value for `mu` when `use_dynamic_shifting` is set to be `True`') if sigmas is None: self.num_inference_steps = num_inference_steps timesteps = np.linspace(self._sigma_to_t(self.sigma_max), self._sigma_to_t(self.sigma_min), num_inference_steps) sigmas = timesteps / self.config.num_train_timesteps if self.config.use_dynamic_shifting: sigmas = self.time_shift(mu, 1.0, sigmas) else: sigmas = self.config.shift * sigmas / (1 + (self.config.shift - 1) * sigmas) sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device) timesteps = sigmas * self.config.num_train_timesteps self.timesteps = timesteps.to(device=device) self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) self._step_index = None self._begin_index = None def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: torch.FloatTensor, timestep: Union[float, torch.FloatTensor], sample: torch.FloatTensor, s_churn: float=0.0, s_tmin: float=0.0, s_tmax: float=float('inf'), s_noise: float=1.0, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[FlowMatchEulerDiscreteSchedulerOutput, Tuple]: if isinstance(timestep, int) or isinstance(timestep, torch.IntTensor) or isinstance(timestep, torch.LongTensor): raise ValueError('Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to `EulerDiscreteScheduler.step()` is not supported. Make sure to pass one of the `scheduler.timesteps` as a timestep.') if self.step_index is None: self._init_step_index(timestep) sample = sample.to(torch.float32) sigma = self.sigmas[self.step_index] sigma_next = self.sigmas[self.step_index + 1] prev_sample = sample + (sigma_next - sigma) * model_output prev_sample = prev_sample.to(model_output.dtype) self._step_index += 1 if not return_dict: return (prev_sample,) return FlowMatchEulerDiscreteSchedulerOutput(prev_sample=prev_sample) def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_flow_match_heun_discrete.py from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, logging from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin logger = logging.get_logger(__name__) @dataclass class FlowMatchHeunDiscreteSchedulerOutput(BaseOutput): prev_sample: torch.FloatTensor class FlowMatchHeunDiscreteScheduler(SchedulerMixin, ConfigMixin): _compatibles = [] order = 2 @register_to_config def __init__(self, num_train_timesteps: int=1000, shift: float=1.0): timesteps = np.linspace(1, num_train_timesteps, num_train_timesteps, dtype=np.float32)[::-1].copy() timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32) sigmas = timesteps / num_train_timesteps sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) self.timesteps = sigmas * num_train_timesteps self._step_index = None self._begin_index = None self.sigmas = sigmas.to('cpu') self.sigma_min = self.sigmas[-1].item() self.sigma_max = self.sigmas[0].item() @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def scale_noise(self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor], noise: Optional[torch.FloatTensor]=None) -> torch.FloatTensor: if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] sample = sigma * noise + (1.0 - sigma) * sample return sample def _sigma_to_t(self, sigma): return sigma * self.config.num_train_timesteps def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): self.num_inference_steps = num_inference_steps timesteps = np.linspace(self._sigma_to_t(self.sigma_max), self._sigma_to_t(self.sigma_min), num_inference_steps) sigmas = timesteps / self.config.num_train_timesteps sigmas = self.config.shift * sigmas / (1 + (self.config.shift - 1) * sigmas) sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device) timesteps = sigmas * self.config.num_train_timesteps timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)]) self.timesteps = timesteps.to(device=device) sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]]) self.prev_derivative = None self.dt = None self._step_index = None self._begin_index = None def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index @property def state_in_first_order(self): return self.dt is None def step(self, model_output: torch.FloatTensor, timestep: Union[float, torch.FloatTensor], sample: torch.FloatTensor, s_churn: float=0.0, s_tmin: float=0.0, s_tmax: float=float('inf'), s_noise: float=1.0, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[FlowMatchHeunDiscreteSchedulerOutput, Tuple]: if isinstance(timestep, int) or isinstance(timestep, torch.IntTensor) or isinstance(timestep, torch.LongTensor): raise ValueError('Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to `HeunDiscreteScheduler.step()` is not supported. Make sure to pass one of the `scheduler.timesteps` as a timestep.') if self.step_index is None: self._init_step_index(timestep) sample = sample.to(torch.float32) if self.state_in_first_order: sigma = self.sigmas[self.step_index] sigma_next = self.sigmas[self.step_index + 1] else: sigma = self.sigmas[self.step_index - 1] sigma_next = self.sigmas[self.step_index] gamma = min(s_churn / (len(self.sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigma <= s_tmax else 0.0 noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator) eps = noise * s_noise sigma_hat = sigma * (gamma + 1) if gamma > 0: sample = sample + eps * (sigma_hat ** 2 - sigma ** 2) ** 0.5 if self.state_in_first_order: denoised = sample - model_output * sigma derivative = (sample - denoised) / sigma_hat dt = sigma_next - sigma_hat self.prev_derivative = derivative self.dt = dt self.sample = sample else: denoised = sample - model_output * sigma_next derivative = (sample - denoised) / sigma_next derivative = 0.5 * (self.prev_derivative + derivative) dt = self.dt sample = self.sample self.prev_derivative = None self.dt = None self.sample = None prev_sample = sample + derivative * dt prev_sample = prev_sample.to(model_output.dtype) self._step_index += 1 if not return_dict: return (prev_sample,) return FlowMatchHeunDiscreteSchedulerOutput(prev_sample=prev_sample) def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_heun_discrete.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 2 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.00085, beta_end: float=0.012, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, prediction_type: str='epsilon', use_karras_sigmas: Optional[bool]=False, clip_sample: Optional[bool]=False, clip_sample_range: float=1.0, timestep_spacing: str='linspace', steps_offset: int=0): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type='cosine') elif beta_schedule == 'exp': self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type='exp') else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.set_timesteps(num_train_timesteps, None, num_train_timesteps) self.use_karras_sigmas = use_karras_sigmas self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() @property def init_noise_sigma(self): if self.config.timestep_spacing in ['linspace', 'trailing']: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] sample = sample / (sigma ** 2 + 1) ** 0.5 return sample def set_timesteps(self, num_inference_steps: Optional[int]=None, device: Union[str, torch.device]=None, num_train_timesteps: Optional[int]=None, timesteps: Optional[List[int]]=None): if num_inference_steps is None and timesteps is None: raise ValueError('Must pass exactly one of `num_inference_steps` or `custom_timesteps`.') if num_inference_steps is not None and timesteps is not None: raise ValueError('Can only pass one of `num_inference_steps` or `custom_timesteps`.') if timesteps is not None and self.config.use_karras_sigmas: raise ValueError('Cannot use `timesteps` with `config.use_karras_sigmas = True`') num_inference_steps = num_inference_steps or len(timesteps) self.num_inference_steps = num_inference_steps num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps if timesteps is not None: timesteps = np.array(timesteps, dtype=np.float32) elif self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy() elif self.config.timestep_spacing == 'leading': step_ratio = num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = num_train_timesteps / self.num_inference_steps timesteps = np.arange(num_train_timesteps, 0, -step_ratio).round().copy().astype(np.float32) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) if self.config.use_karras_sigmas: sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) sigmas = torch.from_numpy(sigmas).to(device=device) self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]]) timesteps = torch.from_numpy(timesteps) timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)]) self.timesteps = timesteps.to(device=device) self.prev_derivative = None self.dt = None self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: if hasattr(self.config, 'sigma_min'): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, 'sigma_max'): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def state_in_first_order(self): return self.dt is None def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: Union[torch.Tensor, np.ndarray], timestep: Union[float, torch.Tensor], sample: Union[torch.Tensor, np.ndarray], return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.step_index is None: self._init_step_index(timestep) if self.state_in_first_order: sigma = self.sigmas[self.step_index] sigma_next = self.sigmas[self.step_index + 1] else: sigma = self.sigmas[self.step_index - 1] sigma_next = self.sigmas[self.step_index] gamma = 0 sigma_hat = sigma * (gamma + 1) if self.config.prediction_type == 'epsilon': sigma_input = sigma_hat if self.state_in_first_order else sigma_next pred_original_sample = sample - sigma_input * model_output elif self.config.prediction_type == 'v_prediction': sigma_input = sigma_hat if self.state_in_first_order else sigma_next pred_original_sample = model_output * (-sigma_input / (sigma_input ** 2 + 1) ** 0.5) + sample / (sigma_input ** 2 + 1) elif self.config.prediction_type == 'sample': pred_original_sample = model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`') if self.config.clip_sample: pred_original_sample = pred_original_sample.clamp(-self.config.clip_sample_range, self.config.clip_sample_range) if self.state_in_first_order: derivative = (sample - pred_original_sample) / sigma_hat dt = sigma_next - sigma_hat self.prev_derivative = derivative self.dt = dt self.sample = sample else: derivative = (sample - pred_original_sample) / sigma_next derivative = (self.prev_derivative + derivative) / 2 dt = self.dt sample = self.sample self.prev_derivative = None self.dt = None self.sample = None prev_sample = sample + derivative * dt self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_ipndm.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class IPNDMScheduler(SchedulerMixin, ConfigMixin): order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, trained_betas: Optional[Union[np.ndarray, List[float]]]=None): self.set_timesteps(num_train_timesteps) self.init_noise_sigma = 1.0 self.pndm_order = 4 self.ets = [] self._step_index = None self._begin_index = None @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): self.num_inference_steps = num_inference_steps steps = torch.linspace(1, 0, num_inference_steps + 1)[:-1] steps = torch.cat([steps, torch.tensor([0.0])]) if self.config.trained_betas is not None: self.betas = torch.tensor(self.config.trained_betas, dtype=torch.float32) else: self.betas = torch.sin(steps * math.pi / 2) ** 2 self.alphas = (1.0 - self.betas ** 2) ** 0.5 timesteps = (torch.atan2(self.betas, self.alphas) / math.pi * 2)[:-1] self.timesteps = timesteps.to(device) self.ets = [] self._step_index = None self._begin_index = None def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: torch.Tensor, timestep: Union[int, torch.Tensor], sample: torch.Tensor, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") if self.step_index is None: self._init_step_index(timestep) timestep_index = self.step_index prev_timestep_index = self.step_index + 1 ets = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(ets) if len(self.ets) == 1: ets = self.ets[-1] elif len(self.ets) == 2: ets = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets) == 3: ets = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: ets = 1 / 24 * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) prev_sample = self._get_prev_sample(sample, timestep_index, prev_timestep_index, ets) self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: return sample def _get_prev_sample(self, sample, timestep_index, prev_timestep_index, ets): alpha = self.alphas[timestep_index] sigma = self.betas[timestep_index] next_alpha = self.alphas[prev_timestep_index] next_sigma = self.betas[prev_timestep_index] pred = (sample - sigma * ets) / max(alpha, 1e-08) prev_sample = next_alpha * pred + ets * next_sigma return prev_sample def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class KDPM2AncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 2 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.00085, beta_end: float=0.012, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, use_karras_sigmas: Optional[bool]=False, prediction_type: str='epsilon', timestep_spacing: str='linspace', steps_offset: int=0): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.set_timesteps(num_train_timesteps, None, num_train_timesteps) self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def init_noise_sigma(self): if self.config.timestep_spacing in ['linspace', 'trailing']: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: if self.step_index is None: self._init_step_index(timestep) if self.state_in_first_order: sigma = self.sigmas[self.step_index] else: sigma = self.sigmas_interpol[self.step_index - 1] sample = sample / (sigma ** 2 + 1) ** 0.5 return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None, num_train_timesteps: Optional[int]=None): self.num_inference_steps = num_inference_steps num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy() elif self.config.timestep_spacing == 'leading': step_ratio = num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = num_train_timesteps / self.num_inference_steps timesteps = np.arange(num_train_timesteps, 0, -step_ratio).round().copy().astype(np.float32) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) if self.config.use_karras_sigmas: sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() self.log_sigmas = torch.from_numpy(log_sigmas).to(device) sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) sigmas = torch.from_numpy(sigmas).to(device=device) sigmas_next = sigmas.roll(-1) sigmas_next[-1] = 0.0 sigmas_up = (sigmas_next ** 2 * (sigmas ** 2 - sigmas_next ** 2) / sigmas ** 2) ** 0.5 sigmas_down = (sigmas_next ** 2 - sigmas_up ** 2) ** 0.5 sigmas_down[-1] = 0.0 sigmas_interpol = sigmas.log().lerp(sigmas_down.log(), 0.5).exp() sigmas_interpol[-2:] = 0.0 self.sigmas = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]]) self.sigmas_interpol = torch.cat([sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]]) self.sigmas_up = torch.cat([sigmas_up[:1], sigmas_up[1:].repeat_interleave(2), sigmas_up[-1:]]) self.sigmas_down = torch.cat([sigmas_down[:1], sigmas_down[1:].repeat_interleave(2), sigmas_down[-1:]]) if str(device).startswith('mps'): timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32) else: timesteps = torch.from_numpy(timesteps).to(device) sigmas_interpol = sigmas_interpol.cpu() log_sigmas = self.log_sigmas.cpu() timesteps_interpol = np.array([self._sigma_to_t(sigma_interpol, log_sigmas) for sigma_interpol in sigmas_interpol]) timesteps_interpol = torch.from_numpy(timesteps_interpol).to(device, dtype=timesteps.dtype) interleaved_timesteps = torch.stack((timesteps_interpol[:-2, None], timesteps[1:, None]), dim=-1).flatten() self.timesteps = torch.cat([timesteps[:1], interleaved_timesteps]) self.sample = None self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: if hasattr(self.config, 'sigma_min'): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, 'sigma_max'): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def state_in_first_order(self): return self.sample is None def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: Union[torch.Tensor, np.ndarray], timestep: Union[float, torch.Tensor], sample: Union[torch.Tensor, np.ndarray], generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.step_index is None: self._init_step_index(timestep) if self.state_in_first_order: sigma = self.sigmas[self.step_index] sigma_interpol = self.sigmas_interpol[self.step_index] sigma_up = self.sigmas_up[self.step_index] sigma_down = self.sigmas_down[self.step_index - 1] else: sigma = self.sigmas[self.step_index - 1] sigma_interpol = self.sigmas_interpol[self.step_index - 1] sigma_up = self.sigmas_up[self.step_index - 1] sigma_down = self.sigmas_down[self.step_index - 1] gamma = 0 sigma_hat = sigma * (gamma + 1) device = model_output.device noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=device, generator=generator) if self.config.prediction_type == 'epsilon': sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol pred_original_sample = sample - sigma_input * model_output elif self.config.prediction_type == 'v_prediction': sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol pred_original_sample = model_output * (-sigma_input / (sigma_input ** 2 + 1) ** 0.5) + sample / (sigma_input ** 2 + 1) elif self.config.prediction_type == 'sample': raise NotImplementedError('prediction_type not implemented yet: sample') else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`') if self.state_in_first_order: derivative = (sample - pred_original_sample) / sigma_hat dt = sigma_interpol - sigma_hat self.sample = sample self.dt = dt prev_sample = sample + derivative * dt else: derivative = (sample - pred_original_sample) / sigma_interpol dt = sigma_down - sigma_hat sample = self.sample self.sample = None prev_sample = sample + derivative * dt prev_sample = prev_sample + noise * sigma_up self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class KDPM2DiscreteScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 2 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.00085, beta_end: float=0.012, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, use_karras_sigmas: Optional[bool]=False, prediction_type: str='epsilon', timestep_spacing: str='linspace', steps_offset: int=0): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.set_timesteps(num_train_timesteps, None, num_train_timesteps) self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def init_noise_sigma(self): if self.config.timestep_spacing in ['linspace', 'trailing']: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: if self.step_index is None: self._init_step_index(timestep) if self.state_in_first_order: sigma = self.sigmas[self.step_index] else: sigma = self.sigmas_interpol[self.step_index] sample = sample / (sigma ** 2 + 1) ** 0.5 return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None, num_train_timesteps: Optional[int]=None): self.num_inference_steps = num_inference_steps num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy() elif self.config.timestep_spacing == 'leading': step_ratio = num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = num_train_timesteps / self.num_inference_steps timesteps = np.arange(num_train_timesteps, 0, -step_ratio).round().copy().astype(np.float32) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) if self.config.use_karras_sigmas: sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() self.log_sigmas = torch.from_numpy(log_sigmas).to(device=device) sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) sigmas = torch.from_numpy(sigmas).to(device=device) sigmas_interpol = sigmas.log().lerp(sigmas.roll(1).log(), 0.5).exp() self.sigmas = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]]) self.sigmas_interpol = torch.cat([sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]]) timesteps = torch.from_numpy(timesteps).to(device) sigmas_interpol = sigmas_interpol.cpu() log_sigmas = self.log_sigmas.cpu() timesteps_interpol = np.array([self._sigma_to_t(sigma_interpol, log_sigmas) for sigma_interpol in sigmas_interpol]) timesteps_interpol = torch.from_numpy(timesteps_interpol).to(device, dtype=timesteps.dtype) interleaved_timesteps = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1).flatten() self.timesteps = torch.cat([timesteps[:1], interleaved_timesteps]) self.sample = None self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def state_in_first_order(self): return self.sample is None def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: if hasattr(self.config, 'sigma_min'): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, 'sigma_max'): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def step(self, model_output: Union[torch.Tensor, np.ndarray], timestep: Union[float, torch.Tensor], sample: Union[torch.Tensor, np.ndarray], return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.step_index is None: self._init_step_index(timestep) if self.state_in_first_order: sigma = self.sigmas[self.step_index] sigma_interpol = self.sigmas_interpol[self.step_index + 1] sigma_next = self.sigmas[self.step_index + 1] else: sigma = self.sigmas[self.step_index - 1] sigma_interpol = self.sigmas_interpol[self.step_index] sigma_next = self.sigmas[self.step_index] gamma = 0 sigma_hat = sigma * (gamma + 1) if self.config.prediction_type == 'epsilon': sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol pred_original_sample = sample - sigma_input * model_output elif self.config.prediction_type == 'v_prediction': sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol pred_original_sample = model_output * (-sigma_input / (sigma_input ** 2 + 1) ** 0.5) + sample / (sigma_input ** 2 + 1) elif self.config.prediction_type == 'sample': raise NotImplementedError('prediction_type not implemented yet: sample') else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`') if self.state_in_first_order: derivative = (sample - pred_original_sample) / sigma_hat dt = sigma_interpol - sigma_hat self.sample = sample else: derivative = (sample - pred_original_sample) / sigma_interpol dt = sigma_next - sigma_hat sample = self.sample self.sample = None self._step_index += 1 prev_sample = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_karras_ve_flax.py from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class KarrasVeSchedulerState: num_inference_steps: Optional[int] = None timesteps: Optional[jnp.ndarray] = None schedule: Optional[jnp.ndarray] = None @classmethod def create(cls): return cls() @dataclass class FlaxKarrasVeOutput(BaseOutput): prev_sample: jnp.ndarray derivative: jnp.ndarray state: KarrasVeSchedulerState class FlaxKarrasVeScheduler(FlaxSchedulerMixin, ConfigMixin): @property def has_state(self): return True @register_to_config def __init__(self, sigma_min: float=0.02, sigma_max: float=100, s_noise: float=1.007, s_churn: float=80, s_min: float=0.05, s_max: float=50): pass def create_state(self): return KarrasVeSchedulerState.create() def set_timesteps(self, state: KarrasVeSchedulerState, num_inference_steps: int, shape: Tuple=()) -> KarrasVeSchedulerState: timesteps = jnp.arange(0, num_inference_steps)[::-1].copy() schedule = [self.config.sigma_max ** 2 * (self.config.sigma_min ** 2 / self.config.sigma_max ** 2) ** (i / (num_inference_steps - 1)) for i in timesteps] return state.replace(num_inference_steps=num_inference_steps, schedule=jnp.array(schedule, dtype=jnp.float32), timesteps=timesteps) def add_noise_to_input(self, state: KarrasVeSchedulerState, sample: jnp.ndarray, sigma: float, key: jax.Array) -> Tuple[jnp.ndarray, float]: if self.config.s_min <= sigma <= self.config.s_max: gamma = min(self.config.s_churn / state.num_inference_steps, 2 ** 0.5 - 1) else: gamma = 0 key = random.split(key, num=1) eps = self.config.s_noise * random.normal(key=key, shape=sample.shape) sigma_hat = sigma + gamma * sigma sample_hat = sample + (sigma_hat ** 2 - sigma ** 2) ** 0.5 * eps return (sample_hat, sigma_hat) def step(self, state: KarrasVeSchedulerState, model_output: jnp.ndarray, sigma_hat: float, sigma_prev: float, sample_hat: jnp.ndarray, return_dict: bool=True) -> Union[FlaxKarrasVeOutput, Tuple]: pred_original_sample = sample_hat + sigma_hat * model_output derivative = (sample_hat - pred_original_sample) / sigma_hat sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) def step_correct(self, state: KarrasVeSchedulerState, model_output: jnp.ndarray, sigma_hat: float, sigma_prev: float, sample_hat: jnp.ndarray, sample_prev: jnp.ndarray, derivative: jnp.ndarray, return_dict: bool=True) -> Union[FlaxKarrasVeOutput, Tuple]: pred_original_sample = sample_prev + sigma_prev * model_output derivative_corr = (sample_prev - pred_original_sample) / sigma_prev sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) def add_noise(self, state: KarrasVeSchedulerState, original_samples, noise, timesteps): raise NotImplementedError() # File: diffusers-main/src/diffusers/schedulers/scheduling_lcm.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, logging from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin logger = logging.get_logger(__name__) @dataclass class LCMSchedulerOutput(BaseOutput): prev_sample: torch.Tensor denoised: Optional[torch.Tensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(betas: torch.Tensor) -> torch.Tensor: alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() alphas_bar_sqrt -= alphas_bar_sqrt_T alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class LCMScheduler(SchedulerMixin, ConfigMixin): order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.00085, beta_end: float=0.012, beta_schedule: str='scaled_linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, original_inference_steps: int=50, clip_sample: bool=False, clip_sample_range: float=1.0, set_alpha_to_one: bool=True, steps_offset: int=0, prediction_type: str='epsilon', thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, sample_max_value: float=1.0, timestep_spacing: str='leading', timestep_scaling: float=10.0, rescale_betas_zero_snr: bool=False): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] self.init_noise_sigma = 1.0 self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) self.custom_timesteps = False self._step_index = None self._begin_index = None def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: dtype = sample.dtype (batch_size, channels, *remaining_dims) = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp(s, min=1, max=self.config.sample_max_value) s = s.unsqueeze(1) sample = torch.clamp(sample, -s, s) / s sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def set_timesteps(self, num_inference_steps: Optional[int]=None, device: Union[str, torch.device]=None, original_inference_steps: Optional[int]=None, timesteps: Optional[List[int]]=None, strength: int=1.0): if num_inference_steps is None and timesteps is None: raise ValueError('Must pass exactly one of `num_inference_steps` or `custom_timesteps`.') if num_inference_steps is not None and timesteps is not None: raise ValueError('Can only pass one of `num_inference_steps` or `custom_timesteps`.') original_steps = original_inference_steps if original_inference_steps is not None else self.config.original_inference_steps if original_steps > self.config.num_train_timesteps: raise ValueError(f'`original_steps`: {original_steps} cannot be larger than `self.config.train_timesteps`: {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle maximal {self.config.num_train_timesteps} timesteps.') k = self.config.num_train_timesteps // original_steps lcm_origin_timesteps = np.asarray(list(range(1, int(original_steps * strength) + 1))) * k - 1 if timesteps is not None: train_timesteps = set(lcm_origin_timesteps) non_train_timesteps = [] for i in range(1, len(timesteps)): if timesteps[i] >= timesteps[i - 1]: raise ValueError('`custom_timesteps` must be in descending order.') if timesteps[i] not in train_timesteps: non_train_timesteps.append(timesteps[i]) if timesteps[0] >= self.config.num_train_timesteps: raise ValueError(f'`timesteps` must start before `self.config.train_timesteps`: {self.config.num_train_timesteps}.') if strength == 1.0 and timesteps[0] != self.config.num_train_timesteps - 1: logger.warning(f'The first timestep on the custom timestep schedule is {timesteps[0]}, not `self.config.num_train_timesteps - 1`: {self.config.num_train_timesteps - 1}. You may get unexpected results when using this timestep schedule.') if non_train_timesteps: logger.warning(f'The custom timestep schedule contains the following timesteps which are not on the original training/distillation timestep schedule: {non_train_timesteps}. You may get unexpected results when using this timestep schedule.') if len(timesteps) > original_steps: logger.warning(f'The number of timesteps in the custom timestep schedule is {len(timesteps)}, which exceeds the the length of the timestep schedule used for training: {original_steps}. You may get some unexpected results when using this timestep schedule.') timesteps = np.array(timesteps, dtype=np.int64) self.num_inference_steps = len(timesteps) self.custom_timesteps = True init_timestep = min(int(self.num_inference_steps * strength), self.num_inference_steps) t_start = max(self.num_inference_steps - init_timestep, 0) timesteps = timesteps[t_start * self.order:] else: if num_inference_steps > self.config.num_train_timesteps: raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`: {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle maximal {self.config.num_train_timesteps} timesteps.') skipping_step = len(lcm_origin_timesteps) // num_inference_steps if skipping_step < 1: raise ValueError(f'The combination of `original_steps x strength`: {original_steps} x {strength} is smaller than `num_inference_steps`: {num_inference_steps}. Make sure to either reduce `num_inference_steps` to a value smaller than {int(original_steps * strength)} or increase `strength` to a value higher than {float(num_inference_steps / original_steps)}.') self.num_inference_steps = num_inference_steps if num_inference_steps > original_steps: raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`: {original_steps} because the final timestep schedule will be a subset of the `original_inference_steps`-sized initial timestep schedule.') lcm_origin_timesteps = lcm_origin_timesteps[::-1].copy() inference_indices = np.linspace(0, len(lcm_origin_timesteps), num=num_inference_steps, endpoint=False) inference_indices = np.floor(inference_indices).astype(np.int64) timesteps = lcm_origin_timesteps[inference_indices] self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.long) self._step_index = None self._begin_index = None def get_scalings_for_boundary_condition_discrete(self, timestep): self.sigma_data = 0.5 scaled_timestep = timestep * self.config.timestep_scaling c_skip = self.sigma_data ** 2 / (scaled_timestep ** 2 + self.sigma_data ** 2) c_out = scaled_timestep / (scaled_timestep ** 2 + self.sigma_data ** 2) ** 0.5 return (c_skip, c_out) def step(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[LCMSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") if self.step_index is None: self._init_step_index(timestep) prev_step_index = self.step_index + 1 if prev_step_index < len(self.timesteps): prev_timestep = self.timesteps[prev_step_index] else: prev_timestep = timestep alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev (c_skip, c_out) = self.get_scalings_for_boundary_condition_discrete(timestep) if self.config.prediction_type == 'epsilon': predicted_original_sample = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt() elif self.config.prediction_type == 'sample': predicted_original_sample = model_output elif self.config.prediction_type == 'v_prediction': predicted_original_sample = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or `v_prediction` for `LCMScheduler`.') if self.config.thresholding: predicted_original_sample = self._threshold_sample(predicted_original_sample) elif self.config.clip_sample: predicted_original_sample = predicted_original_sample.clamp(-self.config.clip_sample_range, self.config.clip_sample_range) denoised = c_out * predicted_original_sample + c_skip * sample if self.step_index != self.num_inference_steps - 1: noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device, dtype=denoised.dtype) prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise else: prev_sample = denoised self._step_index += 1 if not return_dict: return (prev_sample, denoised) return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) timesteps = timesteps.to(sample.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps def previous_timestep(self, timestep): if self.custom_timesteps: index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] if index == self.timesteps.shape[0] - 1: prev_t = torch.tensor(-1) else: prev_t = self.timesteps[index + 1] else: num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps prev_t = timestep - self.config.num_train_timesteps // num_inference_steps return prev_t # File: diffusers-main/src/diffusers/schedulers/scheduling_lms_discrete.py import math import warnings from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from scipy import integrate from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin @dataclass class LMSDiscreteSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, use_karras_sigmas: Optional[bool]=False, prediction_type: str='epsilon', timestep_spacing: str='linspace', steps_offset: int=0): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas) self.num_inference_steps = None self.use_karras_sigmas = use_karras_sigmas self.set_timesteps(num_train_timesteps, None) self.derivatives = [] self.is_scale_input_called = False self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def init_noise_sigma(self): if self.config.timestep_spacing in ['linspace', 'trailing']: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] sample = sample / (sigma ** 2 + 1) ** 0.5 self.is_scale_input_called = True return sample def get_lms_coefficient(self, order, t, current_order): def lms_derivative(tau): prod = 1.0 for k in range(order): if current_order == k: continue prod *= (tau - self.sigmas[t - k]) / (self.sigmas[t - current_order] - self.sigmas[t - k]) return prod integrated_coeff = integrate.quad(lms_derivative, self.sigmas[t], self.sigmas[t + 1], epsrel=0.0001)[0] return integrated_coeff def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): self.num_inference_steps = num_inference_steps if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy() elif self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / self.num_inference_steps timesteps = np.arange(self.config.num_train_timesteps, 0, -step_ratio).round().copy().astype(np.float32) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) if self.config.use_karras_sigmas: sigmas = self._convert_to_karras(in_sigmas=sigmas) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas).to(device=device) self.timesteps = torch.from_numpy(timesteps).to(device=device) self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') self.derivatives = [] def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _convert_to_karras(self, in_sigmas: torch.Tensor) -> torch.Tensor: sigma_min: float = in_sigmas[-1].item() sigma_max: float = in_sigmas[0].item() rho = 7.0 ramp = np.linspace(0, 1, self.num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def step(self, model_output: torch.Tensor, timestep: Union[float, torch.Tensor], sample: torch.Tensor, order: int=4, return_dict: bool=True) -> Union[LMSDiscreteSchedulerOutput, Tuple]: if not self.is_scale_input_called: warnings.warn('The `scale_model_input` function should be called before `step` to ensure correct denoising. See `StableDiffusionPipeline` for a usage example.') if self.step_index is None: self._init_step_index(timestep) sigma = self.sigmas[self.step_index] if self.config.prediction_type == 'epsilon': pred_original_sample = sample - sigma * model_output elif self.config.prediction_type == 'v_prediction': pred_original_sample = model_output * (-sigma / (sigma ** 2 + 1) ** 0.5) + sample / (sigma ** 2 + 1) elif self.config.prediction_type == 'sample': pred_original_sample = model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`') derivative = (sample - pred_original_sample) / sigma self.derivatives.append(derivative) if len(self.derivatives) > order: self.derivatives.pop(0) order = min(self.step_index + 1, order) lms_coeffs = [self.get_lms_coefficient(order, self.step_index, curr_order) for curr_order in range(order)] prev_sample = sample + sum((coeff * derivative for (coeff, derivative) in zip(lms_coeffs, reversed(self.derivatives)))) self._step_index += 1 if not return_dict: return (prev_sample,) return LMSDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_lms_discrete_flax.py from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from scipy import integrate from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left @flax.struct.dataclass class LMSDiscreteSchedulerState: common: CommonSchedulerState init_noise_sigma: jnp.ndarray timesteps: jnp.ndarray sigmas: jnp.ndarray num_inference_steps: Optional[int] = None derivatives: Optional[jnp.ndarray] = None @classmethod def create(cls, common: CommonSchedulerState, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray, sigmas: jnp.ndarray): return cls(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas) @dataclass class FlaxLMSSchedulerOutput(FlaxSchedulerOutput): state: LMSDiscreteSchedulerState class FlaxLMSDiscreteScheduler(FlaxSchedulerMixin, ConfigMixin): _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] dtype: jnp.dtype @property def has_state(self): return True @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[jnp.ndarray]=None, prediction_type: str='epsilon', dtype: jnp.dtype=jnp.float32): self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState]=None) -> LMSDiscreteSchedulerState: if common is None: common = CommonSchedulerState.create(self) timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] sigmas = ((1 - common.alphas_cumprod) / common.alphas_cumprod) ** 0.5 init_noise_sigma = sigmas.max() return LMSDiscreteSchedulerState.create(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas) def scale_model_input(self, state: LMSDiscreteSchedulerState, sample: jnp.ndarray, timestep: int) -> jnp.ndarray: (step_index,) = jnp.where(state.timesteps == timestep, size=1) step_index = step_index[0] sigma = state.sigmas[step_index] sample = sample / (sigma ** 2 + 1) ** 0.5 return sample def get_lms_coefficient(self, state: LMSDiscreteSchedulerState, order, t, current_order): def lms_derivative(tau): prod = 1.0 for k in range(order): if current_order == k: continue prod *= (tau - state.sigmas[t - k]) / (state.sigmas[t - current_order] - state.sigmas[t - k]) return prod integrated_coeff = integrate.quad(lms_derivative, state.sigmas[t], state.sigmas[t + 1], epsrel=0.0001)[0] return integrated_coeff def set_timesteps(self, state: LMSDiscreteSchedulerState, num_inference_steps: int, shape: Tuple=()) -> LMSDiscreteSchedulerState: timesteps = jnp.linspace(self.config.num_train_timesteps - 1, 0, num_inference_steps, dtype=self.dtype) low_idx = jnp.floor(timesteps).astype(jnp.int32) high_idx = jnp.ceil(timesteps).astype(jnp.int32) frac = jnp.mod(timesteps, 1.0) sigmas = ((1 - state.common.alphas_cumprod) / state.common.alphas_cumprod) ** 0.5 sigmas = (1 - frac) * sigmas[low_idx] + frac * sigmas[high_idx] sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)]) timesteps = timesteps.astype(jnp.int32) derivatives = jnp.zeros((0,) + shape, dtype=self.dtype) return state.replace(timesteps=timesteps, sigmas=sigmas, num_inference_steps=num_inference_steps, derivatives=derivatives) def step(self, state: LMSDiscreteSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, order: int=4, return_dict: bool=True) -> Union[FlaxLMSSchedulerOutput, Tuple]: if state.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") sigma = state.sigmas[timestep] if self.config.prediction_type == 'epsilon': pred_original_sample = sample - sigma * model_output elif self.config.prediction_type == 'v_prediction': pred_original_sample = model_output * (-sigma / (sigma ** 2 + 1) ** 0.5) + sample / (sigma ** 2 + 1) else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`') derivative = (sample - pred_original_sample) / sigma state = state.replace(derivatives=jnp.append(state.derivatives, derivative)) if len(state.derivatives) > order: state = state.replace(derivatives=jnp.delete(state.derivatives, 0)) order = min(timestep + 1, order) lms_coeffs = [self.get_lms_coefficient(state, order, timestep, curr_order) for curr_order in range(order)] prev_sample = sample + sum((coeff * derivative for (coeff, derivative) in zip(lms_coeffs, reversed(state.derivatives)))) if not return_dict: return (prev_sample, state) return FlaxLMSSchedulerOutput(prev_sample=prev_sample, state=state) def add_noise(self, state: LMSDiscreteSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray) -> jnp.ndarray: sigma = state.sigmas[timesteps].flatten() sigma = broadcast_to_shape_from_left(sigma, noise.shape) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_pndm.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class PNDMScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, skip_prk_steps: bool=False, set_alpha_to_one: bool=False, prediction_type: str='epsilon', timestep_spacing: str='leading', steps_offset: int=0): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] self.init_noise_sigma = 1.0 self.pndm_order = 4 self.cur_model_output = 0 self.counter = 0 self.cur_sample = None self.ets = [] self.num_inference_steps = None self._timesteps = np.arange(0, num_train_timesteps)[::-1].copy() self.prk_timesteps = None self.plms_timesteps = None self.timesteps = None def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): self.num_inference_steps = num_inference_steps if self.config.timestep_spacing == 'linspace': self._timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps).round().astype(np.int64) elif self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // self.num_inference_steps self._timesteps = (np.arange(0, num_inference_steps) * step_ratio).round() self._timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / self.num_inference_steps self._timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio))[::-1].astype(np.int64) self._timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") if self.config.skip_prk_steps: self.prk_timesteps = np.array([]) self.plms_timesteps = np.concatenate([self._timesteps[:-1], self._timesteps[-2:-1], self._timesteps[-1:]])[::-1].copy() else: prk_timesteps = np.array(self._timesteps[-self.pndm_order:]).repeat(2) + np.tile(np.array([0, self.config.num_train_timesteps // num_inference_steps // 2]), self.pndm_order) self.prk_timesteps = prk_timesteps[:-1].repeat(2)[1:-1][::-1].copy() self.plms_timesteps = self._timesteps[:-3][::-1].copy() timesteps = np.concatenate([self.prk_timesteps, self.plms_timesteps]).astype(np.int64) self.timesteps = torch.from_numpy(timesteps).to(device) self.ets = [] self.counter = 0 self.cur_model_output = 0 def step(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.counter < len(self.prk_timesteps) and (not self.config.skip_prk_steps): return self.step_prk(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict) else: return self.step_plms(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict) def step_prk(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") diff_to_prev = 0 if self.counter % 2 else self.config.num_train_timesteps // self.num_inference_steps // 2 prev_timestep = timestep - diff_to_prev timestep = self.prk_timesteps[self.counter // 4 * 4] if self.counter % 4 == 0: self.cur_model_output += 1 / 6 * model_output self.ets.append(model_output) self.cur_sample = sample elif (self.counter - 1) % 4 == 0: self.cur_model_output += 1 / 3 * model_output elif (self.counter - 2) % 4 == 0: self.cur_model_output += 1 / 3 * model_output elif (self.counter - 3) % 4 == 0: model_output = self.cur_model_output + 1 / 6 * model_output self.cur_model_output = 0 cur_sample = self.cur_sample if self.cur_sample is not None else sample prev_sample = self._get_prev_sample(cur_sample, timestep, prev_timestep, model_output) self.counter += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def step_plms(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") if not self.config.skip_prk_steps and len(self.ets) < 3: raise ValueError(f"{self.__class__} can only be run AFTER scheduler has been run in 'prk' mode for at least 12 iterations See: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_pndm.py for more information.") prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps if self.counter != 1: self.ets = self.ets[-3:] self.ets.append(model_output) else: prev_timestep = timestep timestep = timestep + self.config.num_train_timesteps // self.num_inference_steps if len(self.ets) == 1 and self.counter == 0: model_output = model_output self.cur_sample = sample elif len(self.ets) == 1 and self.counter == 1: model_output = (model_output + self.ets[-1]) / 2 sample = self.cur_sample self.cur_sample = None elif len(self.ets) == 2: model_output = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets) == 3: model_output = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: model_output = 1 / 24 * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) prev_sample = self._get_prev_sample(sample, timestep, prev_timestep, model_output) self.counter += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: return sample def _get_prev_sample(self, sample, timestep, prev_timestep, model_output): alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev if self.config.prediction_type == 'v_prediction': model_output = alpha_prod_t ** 0.5 * model_output + beta_prod_t ** 0.5 * sample elif self.config.prediction_type != 'epsilon': raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `v_prediction`') sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** 0.5 model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** 0.5 + (alpha_prod_t * beta_prod_t * alpha_prod_t_prev) ** 0.5 prev_sample = sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff return prev_sample def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_pndm_flax.py from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common @flax.struct.dataclass class PNDMSchedulerState: common: CommonSchedulerState final_alpha_cumprod: jnp.ndarray init_noise_sigma: jnp.ndarray timesteps: jnp.ndarray num_inference_steps: Optional[int] = None prk_timesteps: Optional[jnp.ndarray] = None plms_timesteps: Optional[jnp.ndarray] = None cur_model_output: Optional[jnp.ndarray] = None counter: Optional[jnp.int32] = None cur_sample: Optional[jnp.ndarray] = None ets: Optional[jnp.ndarray] = None @classmethod def create(cls, common: CommonSchedulerState, final_alpha_cumprod: jnp.ndarray, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray): return cls(common=common, final_alpha_cumprod=final_alpha_cumprod, init_noise_sigma=init_noise_sigma, timesteps=timesteps) @dataclass class FlaxPNDMSchedulerOutput(FlaxSchedulerOutput): state: PNDMSchedulerState class FlaxPNDMScheduler(FlaxSchedulerMixin, ConfigMixin): _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] dtype: jnp.dtype pndm_order: int @property def has_state(self): return True @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[jnp.ndarray]=None, skip_prk_steps: bool=False, set_alpha_to_one: bool=False, steps_offset: int=0, prediction_type: str='epsilon', dtype: jnp.dtype=jnp.float32): self.dtype = dtype self.pndm_order = 4 def create_state(self, common: Optional[CommonSchedulerState]=None) -> PNDMSchedulerState: if common is None: common = CommonSchedulerState.create(self) final_alpha_cumprod = jnp.array(1.0, dtype=self.dtype) if self.config.set_alpha_to_one else common.alphas_cumprod[0] init_noise_sigma = jnp.array(1.0, dtype=self.dtype) timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] return PNDMSchedulerState.create(common=common, final_alpha_cumprod=final_alpha_cumprod, init_noise_sigma=init_noise_sigma, timesteps=timesteps) def set_timesteps(self, state: PNDMSchedulerState, num_inference_steps: int, shape: Tuple) -> PNDMSchedulerState: step_ratio = self.config.num_train_timesteps // num_inference_steps _timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round() + self.config.steps_offset if self.config.skip_prk_steps: prk_timesteps = jnp.array([], dtype=jnp.int32) plms_timesteps = jnp.concatenate([_timesteps[:-1], _timesteps[-2:-1], _timesteps[-1:]])[::-1] else: prk_timesteps = _timesteps[-self.pndm_order:].repeat(2) + jnp.tile(jnp.array([0, self.config.num_train_timesteps // num_inference_steps // 2], dtype=jnp.int32), self.pndm_order) prk_timesteps = prk_timesteps[:-1].repeat(2)[1:-1][::-1] plms_timesteps = _timesteps[:-3][::-1] timesteps = jnp.concatenate([prk_timesteps, plms_timesteps]) cur_model_output = jnp.zeros(shape, dtype=self.dtype) counter = jnp.int32(0) cur_sample = jnp.zeros(shape, dtype=self.dtype) ets = jnp.zeros((4,) + shape, dtype=self.dtype) return state.replace(timesteps=timesteps, num_inference_steps=num_inference_steps, prk_timesteps=prk_timesteps, plms_timesteps=plms_timesteps, cur_model_output=cur_model_output, counter=counter, cur_sample=cur_sample, ets=ets) def scale_model_input(self, state: PNDMSchedulerState, sample: jnp.ndarray, timestep: Optional[int]=None) -> jnp.ndarray: return sample def step(self, state: PNDMSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, return_dict: bool=True) -> Union[FlaxPNDMSchedulerOutput, Tuple]: if state.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") if self.config.skip_prk_steps: (prev_sample, state) = self.step_plms(state, model_output, timestep, sample) else: (prk_prev_sample, prk_state) = self.step_prk(state, model_output, timestep, sample) (plms_prev_sample, plms_state) = self.step_plms(state, model_output, timestep, sample) cond = state.counter < len(state.prk_timesteps) prev_sample = jax.lax.select(cond, prk_prev_sample, plms_prev_sample) state = state.replace(cur_model_output=jax.lax.select(cond, prk_state.cur_model_output, plms_state.cur_model_output), ets=jax.lax.select(cond, prk_state.ets, plms_state.ets), cur_sample=jax.lax.select(cond, prk_state.cur_sample, plms_state.cur_sample), counter=jax.lax.select(cond, prk_state.counter, plms_state.counter)) if not return_dict: return (prev_sample, state) return FlaxPNDMSchedulerOutput(prev_sample=prev_sample, state=state) def step_prk(self, state: PNDMSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray) -> Union[FlaxPNDMSchedulerOutput, Tuple]: if state.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") diff_to_prev = jnp.where(state.counter % 2, 0, self.config.num_train_timesteps // state.num_inference_steps // 2) prev_timestep = timestep - diff_to_prev timestep = state.prk_timesteps[state.counter // 4 * 4] model_output = jax.lax.select(state.counter % 4 != 3, model_output, state.cur_model_output + 1 / 6 * model_output) state = state.replace(cur_model_output=jax.lax.select_n(state.counter % 4, state.cur_model_output + 1 / 6 * model_output, state.cur_model_output + 1 / 3 * model_output, state.cur_model_output + 1 / 3 * model_output, jnp.zeros_like(state.cur_model_output)), ets=jax.lax.select(state.counter % 4 == 0, state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), state.ets), cur_sample=jax.lax.select(state.counter % 4 == 0, sample, state.cur_sample)) cur_sample = state.cur_sample prev_sample = self._get_prev_sample(state, cur_sample, timestep, prev_timestep, model_output) state = state.replace(counter=state.counter + 1) return (prev_sample, state) def step_plms(self, state: PNDMSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray) -> Union[FlaxPNDMSchedulerOutput, Tuple]: if state.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps prev_timestep = jnp.where(prev_timestep > 0, prev_timestep, 0) prev_timestep = jnp.where(state.counter == 1, timestep, prev_timestep) timestep = jnp.where(state.counter == 1, timestep + self.config.num_train_timesteps // state.num_inference_steps, timestep) state = state.replace(ets=jax.lax.select(state.counter != 1, state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), state.ets), cur_sample=jax.lax.select(state.counter != 1, sample, state.cur_sample)) state = state.replace(cur_model_output=jax.lax.select_n(jnp.clip(state.counter, 0, 4), model_output, (model_output + state.ets[-1]) / 2, (3 * state.ets[-1] - state.ets[-2]) / 2, (23 * state.ets[-1] - 16 * state.ets[-2] + 5 * state.ets[-3]) / 12, 1 / 24 * (55 * state.ets[-1] - 59 * state.ets[-2] + 37 * state.ets[-3] - 9 * state.ets[-4]))) sample = state.cur_sample model_output = state.cur_model_output prev_sample = self._get_prev_sample(state, sample, timestep, prev_timestep, model_output) state = state.replace(counter=state.counter + 1) return (prev_sample, state) def _get_prev_sample(self, state: PNDMSchedulerState, sample, timestep, prev_timestep, model_output): alpha_prod_t = state.common.alphas_cumprod[timestep] alpha_prod_t_prev = jnp.where(prev_timestep >= 0, state.common.alphas_cumprod[prev_timestep], state.final_alpha_cumprod) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev if self.config.prediction_type == 'v_prediction': model_output = alpha_prod_t ** 0.5 * model_output + beta_prod_t ** 0.5 * sample elif self.config.prediction_type != 'epsilon': raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `v_prediction`') sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** 0.5 model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** 0.5 + (alpha_prod_t * beta_prod_t * alpha_prod_t_prev) ** 0.5 prev_sample = sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff return prev_sample def add_noise(self, state: PNDMSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray) -> jnp.ndarray: return add_noise_common(state.common, original_samples, noise, timesteps) def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_repaint.py import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class RePaintSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: torch.Tensor def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class RePaintScheduler(SchedulerMixin, ConfigMixin): order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', eta: float=0.0, trained_betas: Optional[np.ndarray]=None, clip_sample: bool=True): if trained_betas is not None: self.betas = torch.from_numpy(trained_betas) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) elif beta_schedule == 'sigmoid': betas = torch.linspace(-6, 6, num_train_timesteps) self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.one = torch.tensor(1.0) self.final_alpha_cumprod = torch.tensor(1.0) self.init_noise_sigma = 1.0 self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) self.eta = eta def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def set_timesteps(self, num_inference_steps: int, jump_length: int=10, jump_n_sample: int=10, device: Union[str, torch.device]=None): num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps) self.num_inference_steps = num_inference_steps timesteps = [] jumps = {} for j in range(0, num_inference_steps - jump_length, jump_length): jumps[j] = jump_n_sample - 1 t = num_inference_steps while t >= 1: t = t - 1 timesteps.append(t) if jumps.get(t, 0) > 0: jumps[t] = jumps[t] - 1 for _ in range(jump_length): t = t + 1 timesteps.append(t) timesteps = np.array(timesteps) * (self.config.num_train_timesteps // self.num_inference_steps) self.timesteps = torch.from_numpy(timesteps).to(device) def _get_variance(self, t): prev_timestep = t - self.config.num_train_timesteps // self.num_inference_steps alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = beta_prod_t_prev / beta_prod_t * (1 - alpha_prod_t / alpha_prod_t_prev) return variance def step(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, original_image: torch.Tensor, mask: torch.Tensor, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[RePaintSchedulerOutput, Tuple]: t = timestep prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 if self.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -1, 1) device = model_output.device noise = randn_tensor(model_output.shape, generator=generator, device=device, dtype=model_output.dtype) std_dev_t = self.eta * self._get_variance(timestep) ** 0.5 variance = 0 if t > 0 and self.eta > 0: variance = std_dev_t * noise pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t ** 2) ** 0.5 * model_output prev_unknown_part = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction + variance prev_known_part = alpha_prod_t_prev ** 0.5 * original_image + (1 - alpha_prod_t_prev) ** 0.5 * noise pred_prev_sample = mask * prev_known_part + (1.0 - mask) * prev_unknown_part if not return_dict: return (pred_prev_sample, pred_original_sample) return RePaintSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) def undo_step(self, sample, timestep, generator=None): n = self.config.num_train_timesteps // self.num_inference_steps for i in range(n): beta = self.betas[timestep + i] if sample.device.type == 'mps': noise = randn_tensor(sample.shape, dtype=sample.dtype, generator=generator) noise = noise.to(sample.device) else: noise = randn_tensor(sample.shape, generator=generator, device=sample.device, dtype=sample.dtype) sample = (1 - beta) ** 0.5 * sample + beta ** 0.5 * noise return sample def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: raise NotImplementedError('Use `DDPMScheduler.add_noise()` to train for sampling with RePaint.') def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_sasolver.py import math from typing import Callable, List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import deprecate from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class SASolverScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, predictor_order: int=2, corrector_order: int=2, prediction_type: str='epsilon', tau_func: Optional[Callable]=None, thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, sample_max_value: float=1.0, algorithm_type: str='data_prediction', lower_order_final: bool=True, use_karras_sigmas: Optional[bool]=False, lambda_min_clipped: float=-float('inf'), variance_type: Optional[str]=None, timestep_spacing: str='linspace', steps_offset: int=0): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.alpha_t = torch.sqrt(self.alphas_cumprod) self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 self.init_noise_sigma = 1.0 if algorithm_type not in ['data_prediction', 'noise_prediction']: raise NotImplementedError(f'{algorithm_type} is not implemented for {self.__class__}') self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() self.timesteps = torch.from_numpy(timesteps) self.timestep_list = [None] * max(predictor_order, corrector_order - 1) self.model_outputs = [None] * max(predictor_order, corrector_order - 1) if tau_func is None: self.tau_func = lambda t: 1 if t >= 200 and t <= 800 else 0 else: self.tau_func = tau_func self.predict_x0 = algorithm_type == 'data_prediction' self.lower_order_nums = 0 self.last_sample = None self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def set_timesteps(self, num_inference_steps: int=None, device: Union[str, torch.device]=None): clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) last_timestep = (self.config.num_train_timesteps - clipped_idx).numpy().item() if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].copy().astype(np.int64) elif self.config.timestep_spacing == 'leading': step_ratio = last_timestep // (num_inference_steps + 1) timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / num_inference_steps timesteps = np.arange(last_timestep, 0, -step_ratio).round().copy().astype(np.int64) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) if self.config.use_karras_sigmas: log_sigmas = np.log(sigmas) sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) else: sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas) self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.num_inference_steps = len(timesteps) self.model_outputs = [None] * max(self.config.predictor_order, self.config.corrector_order - 1) self.lower_order_nums = 0 self.last_sample = None self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: dtype = sample.dtype (batch_size, channels, *remaining_dims) = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp(s, min=1, max=self.config.sample_max_value) s = s.unsqueeze(1) sample = torch.clamp(sample, -s, s) / s sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = 1 / (sigma ** 2 + 1) ** 0.5 sigma_t = sigma * alpha_t return (alpha_t, sigma_t) def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: if hasattr(self.config, 'sigma_min'): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, 'sigma_max'): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def convert_model_output(self, model_output: torch.Tensor, *args, sample: torch.Tensor=None, **kwargs) -> torch.Tensor: timestep = args[0] if len(args) > 0 else kwargs.pop('timestep', None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError('missing `sample` as a required keyward argument') if timestep is not None: deprecate('timesteps', '1.0.0', 'Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) if self.config.algorithm_type in ['data_prediction']: if self.config.prediction_type == 'epsilon': if self.config.variance_type in ['learned', 'learned_range']: model_output = model_output[:, :3] x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == 'sample': x0_pred = model_output elif self.config.prediction_type == 'v_prediction': x0_pred = alpha_t * sample - sigma_t * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction` for the SASolverScheduler.') if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) return x0_pred elif self.config.algorithm_type in ['noise_prediction']: if self.config.prediction_type == 'epsilon': if self.config.variance_type in ['learned', 'learned_range']: epsilon = model_output[:, :3] else: epsilon = model_output elif self.config.prediction_type == 'sample': epsilon = (sample - alpha_t * model_output) / sigma_t elif self.config.prediction_type == 'v_prediction': epsilon = alpha_t * model_output + sigma_t * sample else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction` for the SASolverScheduler.') if self.config.thresholding: (alpha_t, sigma_t) = (self.alpha_t[timestep], self.sigma_t[timestep]) x0_pred = (sample - sigma_t * epsilon) / alpha_t x0_pred = self._threshold_sample(x0_pred) epsilon = (sample - alpha_t * x0_pred) / sigma_t return epsilon def get_coefficients_exponential_negative(self, order, interval_start, interval_end): assert order in [0, 1, 2, 3], 'order is only supported for 0, 1, 2 and 3' if order == 0: return torch.exp(-interval_end) * (torch.exp(interval_end - interval_start) - 1) elif order == 1: return torch.exp(-interval_end) * ((interval_start + 1) * torch.exp(interval_end - interval_start) - (interval_end + 1)) elif order == 2: return torch.exp(-interval_end) * ((interval_start ** 2 + 2 * interval_start + 2) * torch.exp(interval_end - interval_start) - (interval_end ** 2 + 2 * interval_end + 2)) elif order == 3: return torch.exp(-interval_end) * ((interval_start ** 3 + 3 * interval_start ** 2 + 6 * interval_start + 6) * torch.exp(interval_end - interval_start) - (interval_end ** 3 + 3 * interval_end ** 2 + 6 * interval_end + 6)) def get_coefficients_exponential_positive(self, order, interval_start, interval_end, tau): assert order in [0, 1, 2, 3], 'order is only supported for 0, 1, 2 and 3' interval_end_cov = (1 + tau ** 2) * interval_end interval_start_cov = (1 + tau ** 2) * interval_start if order == 0: return torch.exp(interval_end_cov) * (1 - torch.exp(-(interval_end_cov - interval_start_cov))) / (1 + tau ** 2) elif order == 1: return torch.exp(interval_end_cov) * (interval_end_cov - 1 - (interval_start_cov - 1) * torch.exp(-(interval_end_cov - interval_start_cov))) / (1 + tau ** 2) ** 2 elif order == 2: return torch.exp(interval_end_cov) * (interval_end_cov ** 2 - 2 * interval_end_cov + 2 - (interval_start_cov ** 2 - 2 * interval_start_cov + 2) * torch.exp(-(interval_end_cov - interval_start_cov))) / (1 + tau ** 2) ** 3 elif order == 3: return torch.exp(interval_end_cov) * (interval_end_cov ** 3 - 3 * interval_end_cov ** 2 + 6 * interval_end_cov - 6 - (interval_start_cov ** 3 - 3 * interval_start_cov ** 2 + 6 * interval_start_cov - 6) * torch.exp(-(interval_end_cov - interval_start_cov))) / (1 + tau ** 2) ** 4 def lagrange_polynomial_coefficient(self, order, lambda_list): assert order in [0, 1, 2, 3] assert order == len(lambda_list) - 1 if order == 0: return [[1]] elif order == 1: return [[1 / (lambda_list[0] - lambda_list[1]), -lambda_list[1] / (lambda_list[0] - lambda_list[1])], [1 / (lambda_list[1] - lambda_list[0]), -lambda_list[0] / (lambda_list[1] - lambda_list[0])]] elif order == 2: denominator1 = (lambda_list[0] - lambda_list[1]) * (lambda_list[0] - lambda_list[2]) denominator2 = (lambda_list[1] - lambda_list[0]) * (lambda_list[1] - lambda_list[2]) denominator3 = (lambda_list[2] - lambda_list[0]) * (lambda_list[2] - lambda_list[1]) return [[1 / denominator1, (-lambda_list[1] - lambda_list[2]) / denominator1, lambda_list[1] * lambda_list[2] / denominator1], [1 / denominator2, (-lambda_list[0] - lambda_list[2]) / denominator2, lambda_list[0] * lambda_list[2] / denominator2], [1 / denominator3, (-lambda_list[0] - lambda_list[1]) / denominator3, lambda_list[0] * lambda_list[1] / denominator3]] elif order == 3: denominator1 = (lambda_list[0] - lambda_list[1]) * (lambda_list[0] - lambda_list[2]) * (lambda_list[0] - lambda_list[3]) denominator2 = (lambda_list[1] - lambda_list[0]) * (lambda_list[1] - lambda_list[2]) * (lambda_list[1] - lambda_list[3]) denominator3 = (lambda_list[2] - lambda_list[0]) * (lambda_list[2] - lambda_list[1]) * (lambda_list[2] - lambda_list[3]) denominator4 = (lambda_list[3] - lambda_list[0]) * (lambda_list[3] - lambda_list[1]) * (lambda_list[3] - lambda_list[2]) return [[1 / denominator1, (-lambda_list[1] - lambda_list[2] - lambda_list[3]) / denominator1, (lambda_list[1] * lambda_list[2] + lambda_list[1] * lambda_list[3] + lambda_list[2] * lambda_list[3]) / denominator1, -lambda_list[1] * lambda_list[2] * lambda_list[3] / denominator1], [1 / denominator2, (-lambda_list[0] - lambda_list[2] - lambda_list[3]) / denominator2, (lambda_list[0] * lambda_list[2] + lambda_list[0] * lambda_list[3] + lambda_list[2] * lambda_list[3]) / denominator2, -lambda_list[0] * lambda_list[2] * lambda_list[3] / denominator2], [1 / denominator3, (-lambda_list[0] - lambda_list[1] - lambda_list[3]) / denominator3, (lambda_list[0] * lambda_list[1] + lambda_list[0] * lambda_list[3] + lambda_list[1] * lambda_list[3]) / denominator3, -lambda_list[0] * lambda_list[1] * lambda_list[3] / denominator3], [1 / denominator4, (-lambda_list[0] - lambda_list[1] - lambda_list[2]) / denominator4, (lambda_list[0] * lambda_list[1] + lambda_list[0] * lambda_list[2] + lambda_list[1] * lambda_list[2]) / denominator4, -lambda_list[0] * lambda_list[1] * lambda_list[2] / denominator4]] def get_coefficients_fn(self, order, interval_start, interval_end, lambda_list, tau): assert order in [1, 2, 3, 4] assert order == len(lambda_list), 'the length of lambda list must be equal to the order' coefficients = [] lagrange_coefficient = self.lagrange_polynomial_coefficient(order - 1, lambda_list) for i in range(order): coefficient = 0 for j in range(order): if self.predict_x0: coefficient += lagrange_coefficient[i][j] * self.get_coefficients_exponential_positive(order - 1 - j, interval_start, interval_end, tau) else: coefficient += lagrange_coefficient[i][j] * self.get_coefficients_exponential_negative(order - 1 - j, interval_start, interval_end) coefficients.append(coefficient) assert len(coefficients) == order, 'the length of coefficients does not match the order' return coefficients def stochastic_adams_bashforth_update(self, model_output: torch.Tensor, *args, sample: torch.Tensor, noise: torch.Tensor, order: int, tau: torch.Tensor, **kwargs) -> torch.Tensor: prev_timestep = args[0] if len(args) > 0 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError(' missing `sample` as a required keyward argument') if noise is None: if len(args) > 2: noise = args[2] else: raise ValueError(' missing `noise` as a required keyward argument') if order is None: if len(args) > 3: order = args[3] else: raise ValueError(' missing `order` as a required keyward argument') if tau is None: if len(args) > 4: tau = args[4] else: raise ValueError(' missing `tau` as a required keyward argument') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') model_output_list = self.model_outputs (sigma_t, sigma_s0) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) gradient_part = torch.zeros_like(sample) h = lambda_t - lambda_s0 lambda_list = [] for i in range(order): si = self.step_index - i (alpha_si, sigma_si) = self._sigma_to_alpha_sigma_t(self.sigmas[si]) lambda_si = torch.log(alpha_si) - torch.log(sigma_si) lambda_list.append(lambda_si) gradient_coefficients = self.get_coefficients_fn(order, lambda_s0, lambda_t, lambda_list, tau) x = sample if self.predict_x0: if order == 2: temp_sigma = self.sigmas[self.step_index - 1] (temp_alpha_s, temp_sigma_s) = self._sigma_to_alpha_sigma_t(temp_sigma) temp_lambda_s = torch.log(temp_alpha_s) - torch.log(temp_sigma_s) gradient_coefficients[0] += 1.0 * torch.exp((1 + tau ** 2) * lambda_t) * (h ** 2 / 2 - (h * (1 + tau ** 2) - 1 + torch.exp((1 + tau ** 2) * -h)) / (1 + tau ** 2) ** 2) / (lambda_s0 - temp_lambda_s) gradient_coefficients[1] -= 1.0 * torch.exp((1 + tau ** 2) * lambda_t) * (h ** 2 / 2 - (h * (1 + tau ** 2) - 1 + torch.exp((1 + tau ** 2) * -h)) / (1 + tau ** 2) ** 2) / (lambda_s0 - temp_lambda_s) for i in range(order): if self.predict_x0: gradient_part += (1 + tau ** 2) * sigma_t * torch.exp(-tau ** 2 * lambda_t) * gradient_coefficients[i] * model_output_list[-(i + 1)] else: gradient_part += -(1 + tau ** 2) * alpha_t * gradient_coefficients[i] * model_output_list[-(i + 1)] if self.predict_x0: noise_part = sigma_t * torch.sqrt(1 - torch.exp(-2 * tau ** 2 * h)) * noise else: noise_part = tau * sigma_t * torch.sqrt(torch.exp(2 * h) - 1) * noise if self.predict_x0: x_t = torch.exp(-tau ** 2 * h) * (sigma_t / sigma_s0) * x + gradient_part + noise_part else: x_t = alpha_t / alpha_s0 * x + gradient_part + noise_part x_t = x_t.to(x.dtype) return x_t def stochastic_adams_moulton_update(self, this_model_output: torch.Tensor, *args, last_sample: torch.Tensor, last_noise: torch.Tensor, this_sample: torch.Tensor, order: int, tau: torch.Tensor, **kwargs) -> torch.Tensor: this_timestep = args[0] if len(args) > 0 else kwargs.pop('this_timestep', None) if last_sample is None: if len(args) > 1: last_sample = args[1] else: raise ValueError(' missing`last_sample` as a required keyward argument') if last_noise is None: if len(args) > 2: last_noise = args[2] else: raise ValueError(' missing`last_noise` as a required keyward argument') if this_sample is None: if len(args) > 3: this_sample = args[3] else: raise ValueError(' missing`this_sample` as a required keyward argument') if order is None: if len(args) > 4: order = args[4] else: raise ValueError(' missing`order` as a required keyward argument') if tau is None: if len(args) > 5: tau = args[5] else: raise ValueError(' missing`tau` as a required keyward argument') if this_timestep is not None: deprecate('this_timestep', '1.0.0', 'Passing `this_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') model_output_list = self.model_outputs (sigma_t, sigma_s0) = (self.sigmas[self.step_index], self.sigmas[self.step_index - 1]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) gradient_part = torch.zeros_like(this_sample) h = lambda_t - lambda_s0 lambda_list = [] for i in range(order): si = self.step_index - i (alpha_si, sigma_si) = self._sigma_to_alpha_sigma_t(self.sigmas[si]) lambda_si = torch.log(alpha_si) - torch.log(sigma_si) lambda_list.append(lambda_si) model_prev_list = model_output_list + [this_model_output] gradient_coefficients = self.get_coefficients_fn(order, lambda_s0, lambda_t, lambda_list, tau) x = last_sample if self.predict_x0: if order == 2: gradient_coefficients[0] += 1.0 * torch.exp((1 + tau ** 2) * lambda_t) * (h / 2 - (h * (1 + tau ** 2) - 1 + torch.exp((1 + tau ** 2) * -h)) / ((1 + tau ** 2) ** 2 * h)) gradient_coefficients[1] -= 1.0 * torch.exp((1 + tau ** 2) * lambda_t) * (h / 2 - (h * (1 + tau ** 2) - 1 + torch.exp((1 + tau ** 2) * -h)) / ((1 + tau ** 2) ** 2 * h)) for i in range(order): if self.predict_x0: gradient_part += (1 + tau ** 2) * sigma_t * torch.exp(-tau ** 2 * lambda_t) * gradient_coefficients[i] * model_prev_list[-(i + 1)] else: gradient_part += -(1 + tau ** 2) * alpha_t * gradient_coefficients[i] * model_prev_list[-(i + 1)] if self.predict_x0: noise_part = sigma_t * torch.sqrt(1 - torch.exp(-2 * tau ** 2 * h)) * last_noise else: noise_part = tau * sigma_t * torch.sqrt(torch.exp(2 * h) - 1) * last_noise if self.predict_x0: x_t = torch.exp(-tau ** 2 * h) * (sigma_t / sigma_s0) * x + gradient_part + noise_part else: x_t = alpha_t / alpha_s0 * x + gradient_part + noise_part x_t = x_t.to(x.dtype) return x_t def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps index_candidates = (schedule_timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() return step_index def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, generator=None, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") if self.step_index is None: self._init_step_index(timestep) use_corrector = self.step_index > 0 and self.last_sample is not None model_output_convert = self.convert_model_output(model_output, sample=sample) if use_corrector: current_tau = self.tau_func(self.timestep_list[-1]) sample = self.stochastic_adams_moulton_update(this_model_output=model_output_convert, last_sample=self.last_sample, last_noise=self.last_noise, this_sample=sample, order=self.this_corrector_order, tau=current_tau) for i in range(max(self.config.predictor_order, self.config.corrector_order - 1) - 1): self.model_outputs[i] = self.model_outputs[i + 1] self.timestep_list[i] = self.timestep_list[i + 1] self.model_outputs[-1] = model_output_convert self.timestep_list[-1] = timestep noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype) if self.config.lower_order_final: this_predictor_order = min(self.config.predictor_order, len(self.timesteps) - self.step_index) this_corrector_order = min(self.config.corrector_order, len(self.timesteps) - self.step_index + 1) else: this_predictor_order = self.config.predictor_order this_corrector_order = self.config.corrector_order self.this_predictor_order = min(this_predictor_order, self.lower_order_nums + 1) self.this_corrector_order = min(this_corrector_order, self.lower_order_nums + 2) assert self.this_predictor_order > 0 assert self.this_corrector_order > 0 self.last_sample = sample self.last_noise = noise current_tau = self.tau_func(self.timestep_list[-1]) prev_sample = self.stochastic_adams_bashforth_update(model_output=model_output_convert, sample=sample, noise=noise, order=self.this_predictor_order, tau=current_tau) if self.lower_order_nums < max(self.config.predictor_order, self.config.corrector_order - 1): self.lower_order_nums += 1 self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: return sample def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_sde_ve.py import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class SdeVeOutput(BaseOutput): prev_sample: torch.Tensor prev_sample_mean: torch.Tensor class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin): order = 1 @register_to_config def __init__(self, num_train_timesteps: int=2000, snr: float=0.15, sigma_min: float=0.01, sigma_max: float=1348.0, sampling_eps: float=1e-05, correct_steps: int=1): self.init_noise_sigma = sigma_max self.timesteps = None self.set_sigmas(num_train_timesteps, sigma_min, sigma_max, sampling_eps) def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def set_timesteps(self, num_inference_steps: int, sampling_eps: float=None, device: Union[str, torch.device]=None): sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps self.timesteps = torch.linspace(1, sampling_eps, num_inference_steps, device=device) def set_sigmas(self, num_inference_steps: int, sigma_min: float=None, sigma_max: float=None, sampling_eps: float=None): sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(num_inference_steps, sampling_eps) self.sigmas = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) self.discrete_sigmas = torch.exp(torch.linspace(math.log(sigma_min), math.log(sigma_max), num_inference_steps)) self.sigmas = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) def get_adjacent_sigma(self, timesteps, t): return torch.where(timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device)) def step_pred(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[SdeVeOutput, Tuple]: if self.timesteps is None: raise ValueError("`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler") timestep = timestep * torch.ones(sample.shape[0], device=sample.device) timesteps = (timestep * (len(self.timesteps) - 1)).long() timesteps = timesteps.to(self.discrete_sigmas.device) sigma = self.discrete_sigmas[timesteps].to(sample.device) adjacent_sigma = self.get_adjacent_sigma(timesteps, timestep).to(sample.device) drift = torch.zeros_like(sample) diffusion = (sigma ** 2 - adjacent_sigma ** 2) ** 0.5 diffusion = diffusion.flatten() while len(diffusion.shape) < len(sample.shape): diffusion = diffusion.unsqueeze(-1) drift = drift - diffusion ** 2 * model_output noise = randn_tensor(sample.shape, layout=sample.layout, generator=generator, device=sample.device, dtype=sample.dtype) prev_sample_mean = sample - drift prev_sample = prev_sample_mean + diffusion * noise if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean) def step_correct(self, model_output: torch.Tensor, sample: torch.Tensor, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.timesteps is None: raise ValueError("`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler") noise = randn_tensor(sample.shape, layout=sample.layout, generator=generator).to(sample.device) grad_norm = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean() noise_norm = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean() step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 step_size = step_size * torch.ones(sample.shape[0]).to(sample.device) step_size = step_size.flatten() while len(step_size.shape) < len(sample.shape): step_size = step_size.unsqueeze(-1) prev_sample_mean = sample + step_size * model_output prev_sample = prev_sample_mean + (step_size * 2) ** 0.5 * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: timesteps = timesteps.to(original_samples.device) sigmas = self.discrete_sigmas.to(original_samples.device)[timesteps] noise = noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(original_samples) * sigmas[:, None, None, None] noisy_samples = noise + original_samples return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_sde_ve_flax.py from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left @flax.struct.dataclass class ScoreSdeVeSchedulerState: timesteps: Optional[jnp.ndarray] = None discrete_sigmas: Optional[jnp.ndarray] = None sigmas: Optional[jnp.ndarray] = None @classmethod def create(cls): return cls() @dataclass class FlaxSdeVeOutput(FlaxSchedulerOutput): state: ScoreSdeVeSchedulerState prev_sample: jnp.ndarray prev_sample_mean: Optional[jnp.ndarray] = None class FlaxScoreSdeVeScheduler(FlaxSchedulerMixin, ConfigMixin): @property def has_state(self): return True @register_to_config def __init__(self, num_train_timesteps: int=2000, snr: float=0.15, sigma_min: float=0.01, sigma_max: float=1348.0, sampling_eps: float=1e-05, correct_steps: int=1): pass def create_state(self): state = ScoreSdeVeSchedulerState.create() return self.set_sigmas(state, self.config.num_train_timesteps, self.config.sigma_min, self.config.sigma_max, self.config.sampling_eps) def set_timesteps(self, state: ScoreSdeVeSchedulerState, num_inference_steps: int, shape: Tuple=(), sampling_eps: float=None) -> ScoreSdeVeSchedulerState: sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps timesteps = jnp.linspace(1, sampling_eps, num_inference_steps) return state.replace(timesteps=timesteps) def set_sigmas(self, state: ScoreSdeVeSchedulerState, num_inference_steps: int, sigma_min: float=None, sigma_max: float=None, sampling_eps: float=None) -> ScoreSdeVeSchedulerState: sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps if state.timesteps is None: state = self.set_timesteps(state, num_inference_steps, sampling_eps) discrete_sigmas = jnp.exp(jnp.linspace(jnp.log(sigma_min), jnp.log(sigma_max), num_inference_steps)) sigmas = jnp.array([sigma_min * (sigma_max / sigma_min) ** t for t in state.timesteps]) return state.replace(discrete_sigmas=discrete_sigmas, sigmas=sigmas) def get_adjacent_sigma(self, state, timesteps, t): return jnp.where(timesteps == 0, jnp.zeros_like(t), state.discrete_sigmas[timesteps - 1]) def step_pred(self, state: ScoreSdeVeSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, key: jax.Array, return_dict: bool=True) -> Union[FlaxSdeVeOutput, Tuple]: if state.timesteps is None: raise ValueError("`state.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler") timestep = timestep * jnp.ones(sample.shape[0]) timesteps = (timestep * (len(state.timesteps) - 1)).long() sigma = state.discrete_sigmas[timesteps] adjacent_sigma = self.get_adjacent_sigma(state, timesteps, timestep) drift = jnp.zeros_like(sample) diffusion = (sigma ** 2 - adjacent_sigma ** 2) ** 0.5 diffusion = diffusion.flatten() diffusion = broadcast_to_shape_from_left(diffusion, sample.shape) drift = drift - diffusion ** 2 * model_output key = random.split(key, num=1) noise = random.normal(key=key, shape=sample.shape) prev_sample_mean = sample - drift prev_sample = prev_sample_mean + diffusion * noise if not return_dict: return (prev_sample, prev_sample_mean, state) return FlaxSdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean, state=state) def step_correct(self, state: ScoreSdeVeSchedulerState, model_output: jnp.ndarray, sample: jnp.ndarray, key: jax.Array, return_dict: bool=True) -> Union[FlaxSdeVeOutput, Tuple]: if state.timesteps is None: raise ValueError("`state.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler") key = random.split(key, num=1) noise = random.normal(key=key, shape=sample.shape) grad_norm = jnp.linalg.norm(model_output) noise_norm = jnp.linalg.norm(noise) step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 step_size = step_size * jnp.ones(sample.shape[0]) step_size = step_size.flatten() step_size = broadcast_to_shape_from_left(step_size, sample.shape) prev_sample_mean = sample + step_size * model_output prev_sample = prev_sample_mean + (step_size * 2) ** 0.5 * noise if not return_dict: return (prev_sample, state) return FlaxSdeVeOutput(prev_sample=prev_sample, state=state) def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_tcd.py import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..schedulers.scheduling_utils import SchedulerMixin from ..utils import BaseOutput, logging from ..utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) @dataclass class TCDSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_noised_sample: Optional[torch.Tensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(betas: torch.Tensor) -> torch.Tensor: alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() alphas_bar_sqrt -= alphas_bar_sqrt_T alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class TCDScheduler(SchedulerMixin, ConfigMixin): order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.00085, beta_end: float=0.012, beta_schedule: str='scaled_linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, original_inference_steps: int=50, clip_sample: bool=False, clip_sample_range: float=1.0, set_alpha_to_one: bool=True, steps_offset: int=0, prediction_type: str='epsilon', thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, sample_max_value: float=1.0, timestep_spacing: str='leading', timestep_scaling: float=10.0, rescale_betas_zero_snr: bool=False): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] self.init_noise_sigma = 1.0 self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) self.custom_timesteps = False self._step_index = None self._begin_index = None def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def _get_variance(self, timestep, prev_timestep): alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = beta_prod_t_prev / beta_prod_t * (1 - alpha_prod_t / alpha_prod_t_prev) return variance def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: dtype = sample.dtype (batch_size, channels, *remaining_dims) = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp(s, min=1, max=self.config.sample_max_value) s = s.unsqueeze(1) sample = torch.clamp(sample, -s, s) / s sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def set_timesteps(self, num_inference_steps: Optional[int]=None, device: Union[str, torch.device]=None, original_inference_steps: Optional[int]=None, timesteps: Optional[List[int]]=None, strength: float=1.0): if num_inference_steps is None and timesteps is None: raise ValueError('Must pass exactly one of `num_inference_steps` or `custom_timesteps`.') if num_inference_steps is not None and timesteps is not None: raise ValueError('Can only pass one of `num_inference_steps` or `custom_timesteps`.') original_steps = original_inference_steps if original_inference_steps is not None else self.config.original_inference_steps if original_inference_steps is None: if original_steps > self.config.num_train_timesteps: raise ValueError(f'`original_steps`: {original_steps} cannot be larger than `self.config.train_timesteps`: {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle maximal {self.config.num_train_timesteps} timesteps.') k = self.config.num_train_timesteps // original_steps tcd_origin_timesteps = np.asarray(list(range(1, int(original_steps * strength) + 1))) * k - 1 else: tcd_origin_timesteps = np.asarray(list(range(0, int(self.config.num_train_timesteps * strength)))) if timesteps is not None: train_timesteps = set(tcd_origin_timesteps) non_train_timesteps = [] for i in range(1, len(timesteps)): if timesteps[i] >= timesteps[i - 1]: raise ValueError('`custom_timesteps` must be in descending order.') if timesteps[i] not in train_timesteps: non_train_timesteps.append(timesteps[i]) if timesteps[0] >= self.config.num_train_timesteps: raise ValueError(f'`timesteps` must start before `self.config.train_timesteps`: {self.config.num_train_timesteps}.') if strength == 1.0 and timesteps[0] != self.config.num_train_timesteps - 1: logger.warning(f'The first timestep on the custom timestep schedule is {timesteps[0]}, not `self.config.num_train_timesteps - 1`: {self.config.num_train_timesteps - 1}. You may get unexpected results when using this timestep schedule.') if non_train_timesteps: logger.warning(f'The custom timestep schedule contains the following timesteps which are not on the original training/distillation timestep schedule: {non_train_timesteps}. You may get unexpected results when using this timestep schedule.') if original_steps is not None: if len(timesteps) > original_steps: logger.warning(f'The number of timesteps in the custom timestep schedule is {len(timesteps)}, which exceeds the the length of the timestep schedule used for training: {original_steps}. You may get some unexpected results when using this timestep schedule.') elif len(timesteps) > self.config.num_train_timesteps: logger.warning(f'The number of timesteps in the custom timestep schedule is {len(timesteps)}, which exceeds the the length of the timestep schedule used for training: {self.config.num_train_timesteps}. You may get some unexpected results when using this timestep schedule.') timesteps = np.array(timesteps, dtype=np.int64) self.num_inference_steps = len(timesteps) self.custom_timesteps = True init_timestep = min(int(self.num_inference_steps * strength), self.num_inference_steps) t_start = max(self.num_inference_steps - init_timestep, 0) timesteps = timesteps[t_start * self.order:] else: if num_inference_steps > self.config.num_train_timesteps: raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`: {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle maximal {self.config.num_train_timesteps} timesteps.') if original_steps is not None: skipping_step = len(tcd_origin_timesteps) // num_inference_steps if skipping_step < 1: raise ValueError(f'The combination of `original_steps x strength`: {original_steps} x {strength} is smaller than `num_inference_steps`: {num_inference_steps}. Make sure to either reduce `num_inference_steps` to a value smaller than {int(original_steps * strength)} or increase `strength` to a value higher than {float(num_inference_steps / original_steps)}.') self.num_inference_steps = num_inference_steps if original_steps is not None: if num_inference_steps > original_steps: raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`: {original_steps} because the final timestep schedule will be a subset of the `original_inference_steps`-sized initial timestep schedule.') elif num_inference_steps > self.config.num_train_timesteps: raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `num_train_timesteps`: {self.config.num_train_timesteps} because the final timestep schedule will be a subset of the `num_train_timesteps`-sized initial timestep schedule.') tcd_origin_timesteps = tcd_origin_timesteps[::-1].copy() inference_indices = np.linspace(0, len(tcd_origin_timesteps), num=num_inference_steps, endpoint=False) inference_indices = np.floor(inference_indices).astype(np.int64) timesteps = tcd_origin_timesteps[inference_indices] self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.long) self._step_index = None self._begin_index = None def step(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, eta: float=0.3, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[TCDSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") if self.step_index is None: self._init_step_index(timestep) assert 0 <= eta <= 1.0, 'gamma must be less than or equal to 1.0' prev_step_index = self.step_index + 1 if prev_step_index < len(self.timesteps): prev_timestep = self.timesteps[prev_step_index] else: prev_timestep = torch.tensor(0) timestep_s = torch.floor((1 - eta) * prev_timestep).to(dtype=torch.long) alpha_prod_t = self.alphas_cumprod[timestep] beta_prod_t = 1 - alpha_prod_t alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod alpha_prod_s = self.alphas_cumprod[timestep_s] beta_prod_s = 1 - alpha_prod_s if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt() pred_epsilon = model_output pred_noised_sample = alpha_prod_s.sqrt() * pred_original_sample + beta_prod_s.sqrt() * pred_epsilon elif self.config.prediction_type == 'sample': pred_original_sample = model_output pred_epsilon = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 pred_noised_sample = alpha_prod_s.sqrt() * pred_original_sample + beta_prod_s.sqrt() * pred_epsilon elif self.config.prediction_type == 'v_prediction': pred_original_sample = alpha_prod_t ** 0.5 * sample - beta_prod_t ** 0.5 * model_output pred_epsilon = alpha_prod_t ** 0.5 * model_output + beta_prod_t ** 0.5 * sample pred_noised_sample = alpha_prod_s.sqrt() * pred_original_sample + beta_prod_s.sqrt() * pred_epsilon else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or `v_prediction` for `TCDScheduler`.') if eta > 0: if self.step_index != self.num_inference_steps - 1: noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device, dtype=pred_noised_sample.dtype) prev_sample = (alpha_prod_t_prev / alpha_prod_s).sqrt() * pred_noised_sample + (1 - alpha_prod_t_prev / alpha_prod_s).sqrt() * noise else: prev_sample = pred_noised_sample else: prev_sample = pred_noised_sample self._step_index += 1 if not return_dict: return (prev_sample, pred_noised_sample) return TCDSchedulerOutput(prev_sample=prev_sample, pred_noised_sample=pred_noised_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) timesteps = timesteps.to(sample.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps def previous_timestep(self, timestep): if self.custom_timesteps: index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] if index == self.timesteps.shape[0] - 1: prev_t = torch.tensor(-1) else: prev_t = self.timesteps[index + 1] else: num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps prev_t = timestep - self.config.num_train_timesteps // num_inference_steps return prev_t # File: diffusers-main/src/diffusers/schedulers/scheduling_unclip.py import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class UnCLIPSchedulerOutput(BaseOutput): prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class UnCLIPScheduler(SchedulerMixin, ConfigMixin): @register_to_config def __init__(self, num_train_timesteps: int=1000, variance_type: str='fixed_small_log', clip_sample: bool=True, clip_sample_range: Optional[float]=1.0, prediction_type: str='epsilon', beta_schedule: str='squaredcos_cap_v2'): if beta_schedule != 'squaredcos_cap_v2': raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'") self.betas = betas_for_alpha_bar(num_train_timesteps) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.one = torch.tensor(1.0) self.init_noise_sigma = 1.0 self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) self.variance_type = variance_type def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int]=None) -> torch.Tensor: return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): self.num_inference_steps = num_inference_steps step_ratio = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) self.timesteps = torch.from_numpy(timesteps).to(device) def _get_variance(self, t, prev_timestep=None, predicted_variance=None, variance_type=None): if prev_timestep is None: prev_timestep = t - 1 alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev if prev_timestep == t - 1: beta = self.betas[t] else: beta = 1 - alpha_prod_t / alpha_prod_t_prev variance = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: variance_type = self.config.variance_type if variance_type == 'fixed_small_log': variance = torch.log(torch.clamp(variance, min=1e-20)) variance = torch.exp(0.5 * variance) elif variance_type == 'learned_range': min_log = variance.log() max_log = beta.log() frac = (predicted_variance + 1) / 2 variance = frac * max_log + (1 - frac) * min_log return variance def step(self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, prev_timestep: Optional[int]=None, generator=None, return_dict: bool=True) -> Union[UnCLIPSchedulerOutput, Tuple]: t = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == 'learned_range': (model_output, predicted_variance) = torch.split(model_output, sample.shape[1], dim=1) else: predicted_variance = None if prev_timestep is None: prev_timestep = t - 1 alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev if prev_timestep == t - 1: beta = self.betas[t] alpha = self.alphas[t] else: beta = 1 - alpha_prod_t / alpha_prod_t_prev alpha = 1 - beta if self.config.prediction_type == 'epsilon': pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == 'sample': pred_original_sample = model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample` for the UnCLIPScheduler.') if self.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -self.config.clip_sample_range, self.config.clip_sample_range) pred_original_sample_coeff = alpha_prod_t_prev ** 0.5 * beta / beta_prod_t current_sample_coeff = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample variance = 0 if t > 0: variance_noise = randn_tensor(model_output.shape, dtype=model_output.dtype, generator=generator, device=model_output.device) variance = self._get_variance(t, predicted_variance=predicted_variance, prev_timestep=prev_timestep) if self.variance_type == 'fixed_small_log': variance = variance elif self.variance_type == 'learned_range': variance = (0.5 * variance).exp() else: raise ValueError(f'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range` for the UnCLIPScheduler.') variance = variance * variance_noise pred_prev_sample = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples # File: diffusers-main/src/diffusers/schedulers/scheduling_unipc_multistep.py import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import deprecate from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999, alpha_transform_type='cosine'): if alpha_transform_type == 'cosine': def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == 'exp': def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f'Unsupported alpha_transform_type: {alpha_transform_type}') betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(betas): alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() alphas_bar_sqrt -= alphas_bar_sqrt_T alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin): _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[np.ndarray, List[float]]]=None, solver_order: int=2, prediction_type: str='epsilon', thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, sample_max_value: float=1.0, predict_x0: bool=True, solver_type: str='bh2', lower_order_final: bool=True, disable_corrector: List[int]=[], solver_p: SchedulerMixin=None, use_karras_sigmas: Optional[bool]=False, timestep_spacing: str='linspace', steps_offset: int=0, final_sigmas_type: Optional[str]='zero', rescale_betas_zero_snr: bool=False): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == 'linear': self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == 'scaled_linear': self.betas = torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == 'squaredcos_cap_v2': self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f'{beta_schedule} is not implemented for {self.__class__}') if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) if rescale_betas_zero_snr: self.alphas_cumprod[-1] = 2 ** (-24) self.alpha_t = torch.sqrt(self.alphas_cumprod) self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 self.init_noise_sigma = 1.0 if solver_type not in ['bh1', 'bh2']: if solver_type in ['midpoint', 'heun', 'logrho']: self.register_to_config(solver_type='bh2') else: raise NotImplementedError(f'{solver_type} is not implemented for {self.__class__}') self.predict_x0 = predict_x0 self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() self.timesteps = torch.from_numpy(timesteps) self.model_outputs = [None] * solver_order self.timestep_list = [None] * solver_order self.lower_order_nums = 0 self.disable_corrector = disable_corrector self.solver_p = solver_p self.last_sample = None self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index def set_begin_index(self, begin_index: int=0): self._begin_index = begin_index def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): if self.config.timestep_spacing == 'linspace': timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps + 1).round()[::-1][:-1].copy().astype(np.int64) elif self.config.timestep_spacing == 'leading': step_ratio = self.config.num_train_timesteps // (num_inference_steps + 1) timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == 'trailing': step_ratio = self.config.num_train_timesteps / num_inference_steps timesteps = np.arange(self.config.num_train_timesteps, 0, -step_ratio).round().copy().astype(np.int64) timesteps -= 1 else: raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.") sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) if self.config.use_karras_sigmas: log_sigmas = np.log(sigmas) sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() if self.config.final_sigmas_type == 'sigma_min': sigma_last = sigmas[-1] elif self.config.final_sigmas_type == 'zero': sigma_last = 0 else: raise ValueError(f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}") sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) else: sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) if self.config.final_sigmas_type == 'sigma_min': sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 elif self.config.final_sigmas_type == 'zero': sigma_last = 0 else: raise ValueError(f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}") sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas) self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.num_inference_steps = len(timesteps) self.model_outputs = [None] * self.config.solver_order self.lower_order_nums = 0 self.last_sample = None if self.solver_p: self.solver_p.set_timesteps(self.num_inference_steps, device=device) self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to('cpu') def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: dtype = sample.dtype (batch_size, channels, *remaining_dims) = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp(s, min=1, max=self.config.sample_max_value) s = s.unsqueeze(1) sample = torch.clamp(sample, -s, s) / s sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def _sigma_to_t(self, sigma, log_sigmas): log_sigma = np.log(np.maximum(sigma, 1e-10)) dists = log_sigma - log_sigmas[:, np.newaxis] low_idx = np.cumsum(dists >= 0, axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = 1 / (sigma ** 2 + 1) ** 0.5 sigma_t = sigma * alpha_t return (alpha_t, sigma_t) def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: if hasattr(self.config, 'sigma_min'): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, 'sigma_max'): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def convert_model_output(self, model_output: torch.Tensor, *args, sample: torch.Tensor=None, **kwargs) -> torch.Tensor: timestep = args[0] if len(args) > 0 else kwargs.pop('timestep', None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError('missing `sample` as a required keyward argument') if timestep is not None: deprecate('timesteps', '1.0.0', 'Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') sigma = self.sigmas[self.step_index] (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) if self.predict_x0: if self.config.prediction_type == 'epsilon': x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == 'sample': x0_pred = model_output elif self.config.prediction_type == 'v_prediction': x0_pred = alpha_t * sample - sigma_t * model_output else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction` for the UniPCMultistepScheduler.') if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) return x0_pred elif self.config.prediction_type == 'epsilon': return model_output elif self.config.prediction_type == 'sample': epsilon = (sample - alpha_t * model_output) / sigma_t return epsilon elif self.config.prediction_type == 'v_prediction': epsilon = alpha_t * model_output + sigma_t * sample return epsilon else: raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction` for the UniPCMultistepScheduler.') def multistep_uni_p_bh_update(self, model_output: torch.Tensor, *args, sample: torch.Tensor=None, order: int=None, **kwargs) -> torch.Tensor: prev_timestep = args[0] if len(args) > 0 else kwargs.pop('prev_timestep', None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError(' missing `sample` as a required keyward argument') if order is None: if len(args) > 2: order = args[2] else: raise ValueError(' missing `order` as a required keyward argument') if prev_timestep is not None: deprecate('prev_timestep', '1.0.0', 'Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') model_output_list = self.model_outputs s0 = self.timestep_list[-1] m0 = model_output_list[-1] x = sample if self.solver_p: x_t = self.solver_p.step(model_output, s0, x).prev_sample return x_t (sigma_t, sigma_s0) = (self.sigmas[self.step_index + 1], self.sigmas[self.step_index]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) h = lambda_t - lambda_s0 device = sample.device rks = [] D1s = [] for i in range(1, order): si = self.step_index - i mi = model_output_list[-(i + 1)] (alpha_si, sigma_si) = self._sigma_to_alpha_sigma_t(self.sigmas[si]) lambda_si = torch.log(alpha_si) - torch.log(sigma_si) rk = (lambda_si - lambda_s0) / h rks.append(rk) D1s.append((mi - m0) / rk) rks.append(1.0) rks = torch.tensor(rks, device=device) R = [] b = [] hh = -h if self.predict_x0 else h h_phi_1 = torch.expm1(hh) h_phi_k = h_phi_1 / hh - 1 factorial_i = 1 if self.config.solver_type == 'bh1': B_h = hh elif self.config.solver_type == 'bh2': B_h = torch.expm1(hh) else: raise NotImplementedError() for i in range(1, order + 1): R.append(torch.pow(rks, i - 1)) b.append(h_phi_k * factorial_i / B_h) factorial_i *= i + 1 h_phi_k = h_phi_k / hh - 1 / factorial_i R = torch.stack(R) b = torch.tensor(b, device=device) if len(D1s) > 0: D1s = torch.stack(D1s, dim=1) if order == 2: rhos_p = torch.tensor([0.5], dtype=x.dtype, device=device) else: rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1]).to(device).to(x.dtype) else: D1s = None if self.predict_x0: x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 if D1s is not None: pred_res = torch.einsum('k,bkc...->bc...', rhos_p, D1s) else: pred_res = 0 x_t = x_t_ - alpha_t * B_h * pred_res else: x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 if D1s is not None: pred_res = torch.einsum('k,bkc...->bc...', rhos_p, D1s) else: pred_res = 0 x_t = x_t_ - sigma_t * B_h * pred_res x_t = x_t.to(x.dtype) return x_t def multistep_uni_c_bh_update(self, this_model_output: torch.Tensor, *args, last_sample: torch.Tensor=None, this_sample: torch.Tensor=None, order: int=None, **kwargs) -> torch.Tensor: this_timestep = args[0] if len(args) > 0 else kwargs.pop('this_timestep', None) if last_sample is None: if len(args) > 1: last_sample = args[1] else: raise ValueError(' missing`last_sample` as a required keyward argument') if this_sample is None: if len(args) > 2: this_sample = args[2] else: raise ValueError(' missing`this_sample` as a required keyward argument') if order is None: if len(args) > 3: order = args[3] else: raise ValueError(' missing`order` as a required keyward argument') if this_timestep is not None: deprecate('this_timestep', '1.0.0', 'Passing `this_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`') model_output_list = self.model_outputs m0 = model_output_list[-1] x = last_sample x_t = this_sample model_t = this_model_output (sigma_t, sigma_s0) = (self.sigmas[self.step_index], self.sigmas[self.step_index - 1]) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma_t) (alpha_s0, sigma_s0) = self._sigma_to_alpha_sigma_t(sigma_s0) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) h = lambda_t - lambda_s0 device = this_sample.device rks = [] D1s = [] for i in range(1, order): si = self.step_index - (i + 1) mi = model_output_list[-(i + 1)] (alpha_si, sigma_si) = self._sigma_to_alpha_sigma_t(self.sigmas[si]) lambda_si = torch.log(alpha_si) - torch.log(sigma_si) rk = (lambda_si - lambda_s0) / h rks.append(rk) D1s.append((mi - m0) / rk) rks.append(1.0) rks = torch.tensor(rks, device=device) R = [] b = [] hh = -h if self.predict_x0 else h h_phi_1 = torch.expm1(hh) h_phi_k = h_phi_1 / hh - 1 factorial_i = 1 if self.config.solver_type == 'bh1': B_h = hh elif self.config.solver_type == 'bh2': B_h = torch.expm1(hh) else: raise NotImplementedError() for i in range(1, order + 1): R.append(torch.pow(rks, i - 1)) b.append(h_phi_k * factorial_i / B_h) factorial_i *= i + 1 h_phi_k = h_phi_k / hh - 1 / factorial_i R = torch.stack(R) b = torch.tensor(b, device=device) if len(D1s) > 0: D1s = torch.stack(D1s, dim=1) else: D1s = None if order == 1: rhos_c = torch.tensor([0.5], dtype=x.dtype, device=device) else: rhos_c = torch.linalg.solve(R, b).to(device).to(x.dtype) if self.predict_x0: x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 if D1s is not None: corr_res = torch.einsum('k,bkc...->bc...', rhos_c[:-1], D1s) else: corr_res = 0 D1_t = model_t - m0 x_t = x_t_ - alpha_t * B_h * (corr_res + rhos_c[-1] * D1_t) else: x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 if D1s is not None: corr_res = torch.einsum('k,bkc...->bc...', rhos_c[:-1], D1s) else: corr_res = 0 D1_t = model_t - m0 x_t = x_t_ - sigma_t * B_h * (corr_res + rhos_c[-1] * D1_t) x_t = x_t.to(x.dtype) return x_t def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps index_candidates = (schedule_timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() return step_index def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step(self, model_output: torch.Tensor, timestep: Union[int, torch.Tensor], sample: torch.Tensor, return_dict: bool=True) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler") if self.step_index is None: self._init_step_index(timestep) use_corrector = self.step_index > 0 and self.step_index - 1 not in self.disable_corrector and (self.last_sample is not None) model_output_convert = self.convert_model_output(model_output, sample=sample) if use_corrector: sample = self.multistep_uni_c_bh_update(this_model_output=model_output_convert, last_sample=self.last_sample, this_sample=sample, order=self.this_order) for i in range(self.config.solver_order - 1): self.model_outputs[i] = self.model_outputs[i + 1] self.timestep_list[i] = self.timestep_list[i + 1] self.model_outputs[-1] = model_output_convert self.timestep_list[-1] = timestep if self.config.lower_order_final: this_order = min(self.config.solver_order, len(self.timesteps) - self.step_index) else: this_order = self.config.solver_order self.this_order = min(this_order, self.lower_order_nums + 1) assert self.this_order > 0 self.last_sample = sample prev_sample = self.multistep_uni_p_bh_update(model_output=model_output, sample=sample, order=self.this_order) if self.lower_order_nums < self.config.solver_order: self.lower_order_nums += 1 self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: return sample def add_noise(self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == 'mps' and torch.is_floating_point(timesteps): schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: step_indices = [self.step_index] * timesteps.shape[0] else: step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) (alpha_t, sigma_t) = self._sigma_to_alpha_sigma_t(sigma) noisy_samples = alpha_t * original_samples + sigma_t * noise return noisy_samples def __len__(self): return self.config.num_train_timesteps # File: diffusers-main/src/diffusers/schedulers/scheduling_utils.py import importlib import os from dataclasses import dataclass from enum import Enum from typing import Optional, Union import torch from huggingface_hub.utils import validate_hf_hub_args from ..utils import BaseOutput, PushToHubMixin SCHEDULER_CONFIG_NAME = 'scheduler_config.json' class KarrasDiffusionSchedulers(Enum): DDIMScheduler = 1 DDPMScheduler = 2 PNDMScheduler = 3 LMSDiscreteScheduler = 4 EulerDiscreteScheduler = 5 HeunDiscreteScheduler = 6 EulerAncestralDiscreteScheduler = 7 DPMSolverMultistepScheduler = 8 DPMSolverSinglestepScheduler = 9 KDPM2DiscreteScheduler = 10 KDPM2AncestralDiscreteScheduler = 11 DEISMultistepScheduler = 12 UniPCMultistepScheduler = 13 DPMSolverSDEScheduler = 14 EDMEulerScheduler = 15 AysSchedules = {'StableDiffusionTimesteps': [999, 850, 736, 645, 545, 455, 343, 233, 124, 24], 'StableDiffusionSigmas': [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.0], 'StableDiffusionXLTimesteps': [999, 845, 730, 587, 443, 310, 193, 116, 53, 13], 'StableDiffusionXLSigmas': [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.38, 0.234, 0.113, 0.0], 'StableDiffusionVideoSigmas': [700.0, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.0]} @dataclass class SchedulerOutput(BaseOutput): prev_sample: torch.Tensor class SchedulerMixin(PushToHubMixin): config_name = SCHEDULER_CONFIG_NAME _compatibles = [] has_compatibles = True @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]]=None, subfolder: Optional[str]=None, return_unused_kwargs=False, **kwargs): (config, kwargs, commit_hash) = cls.load_config(pretrained_model_name_or_path=pretrained_model_name_or_path, subfolder=subfolder, return_unused_kwargs=True, return_commit_hash=True, **kwargs) return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs) def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs): self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs) @property def compatibles(self): return self._get_compatibles() @classmethod def _get_compatibles(cls): compatible_classes_str = list(set([cls.__name__] + cls._compatibles)) diffusers_library = importlib.import_module(__name__.split('.')[0]) compatible_classes = [getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)] return compatible_classes # File: diffusers-main/src/diffusers/schedulers/scheduling_utils_flax.py import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from huggingface_hub.utils import validate_hf_hub_args from ..utils import BaseOutput, PushToHubMixin SCHEDULER_CONFIG_NAME = 'scheduler_config.json' class FlaxKarrasDiffusionSchedulers(Enum): FlaxDDIMScheduler = 1 FlaxDDPMScheduler = 2 FlaxPNDMScheduler = 3 FlaxLMSDiscreteScheduler = 4 FlaxDPMSolverMultistepScheduler = 5 FlaxEulerDiscreteScheduler = 6 @dataclass class FlaxSchedulerOutput(BaseOutput): prev_sample: jnp.ndarray class FlaxSchedulerMixin(PushToHubMixin): config_name = SCHEDULER_CONFIG_NAME ignore_for_config = ['dtype'] _compatibles = [] has_compatibles = True @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]]=None, subfolder: Optional[str]=None, return_unused_kwargs=False, **kwargs): (config, kwargs) = cls.load_config(pretrained_model_name_or_path=pretrained_model_name_or_path, subfolder=subfolder, return_unused_kwargs=True, **kwargs) (scheduler, unused_kwargs) = cls.from_config(config, return_unused_kwargs=True, **kwargs) if hasattr(scheduler, 'create_state') and getattr(scheduler, 'has_state', False): state = scheduler.create_state() if return_unused_kwargs: return (scheduler, state, unused_kwargs) return (scheduler, state) def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs): self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs) @property def compatibles(self): return self._get_compatibles() @classmethod def _get_compatibles(cls): compatible_classes_str = list(set([cls.__name__] + cls._compatibles)) diffusers_library = importlib.import_module(__name__.split('.')[0]) compatible_classes = [getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)] return compatible_classes def broadcast_to_shape_from_left(x: jnp.ndarray, shape: Tuple[int]) -> jnp.ndarray: assert len(shape) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(shape) - x.ndim)), shape) def betas_for_alpha_bar(num_diffusion_timesteps: int, max_beta=0.999, dtype=jnp.float32) -> jnp.ndarray: def alpha_bar(time_step): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return jnp.array(betas, dtype=dtype) @flax.struct.dataclass class CommonSchedulerState: alphas: jnp.ndarray betas: jnp.ndarray alphas_cumprod: jnp.ndarray @classmethod def create(cls, scheduler): config = scheduler.config if config.trained_betas is not None: betas = jnp.asarray(config.trained_betas, dtype=scheduler.dtype) elif config.beta_schedule == 'linear': betas = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype) elif config.beta_schedule == 'scaled_linear': betas = jnp.linspace(config.beta_start ** 0.5, config.beta_end ** 0.5, config.num_train_timesteps, dtype=scheduler.dtype) ** 2 elif config.beta_schedule == 'squaredcos_cap_v2': betas = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype) else: raise NotImplementedError(f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}') alphas = 1.0 - betas alphas_cumprod = jnp.cumprod(alphas, axis=0) return cls(alphas=alphas, betas=betas, alphas_cumprod=alphas_cumprod) def get_sqrt_alpha_prod(state: CommonSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray): alphas_cumprod = state.alphas_cumprod sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() sqrt_alpha_prod = broadcast_to_shape_from_left(sqrt_alpha_prod, original_samples.shape) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() sqrt_one_minus_alpha_prod = broadcast_to_shape_from_left(sqrt_one_minus_alpha_prod, original_samples.shape) return (sqrt_alpha_prod, sqrt_one_minus_alpha_prod) def add_noise_common(state: CommonSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray): (sqrt_alpha_prod, sqrt_one_minus_alpha_prod) = get_sqrt_alpha_prod(state, original_samples, noise, timesteps) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def get_velocity_common(state: CommonSchedulerState, sample: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray): (sqrt_alpha_prod, sqrt_one_minus_alpha_prod) = get_sqrt_alpha_prod(state, sample, noise, timesteps) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity # File: diffusers-main/src/diffusers/schedulers/scheduling_vq_diffusion.py from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils import SchedulerMixin @dataclass class VQDiffusionSchedulerOutput(BaseOutput): prev_sample: torch.LongTensor def index_to_log_onehot(x: torch.LongTensor, num_classes: int) -> torch.Tensor: x_onehot = F.one_hot(x, num_classes) x_onehot = x_onehot.permute(0, 2, 1) log_x = torch.log(x_onehot.float().clamp(min=1e-30)) return log_x def gumbel_noised(logits: torch.Tensor, generator: Optional[torch.Generator]) -> torch.Tensor: uniform = torch.rand(logits.shape, device=logits.device, generator=generator) gumbel_noise = -torch.log(-torch.log(uniform + 1e-30) + 1e-30) noised = gumbel_noise + logits return noised def alpha_schedules(num_diffusion_timesteps: int, alpha_cum_start=0.99999, alpha_cum_end=9e-06): att = np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (alpha_cum_end - alpha_cum_start) + alpha_cum_start att = np.concatenate(([1], att)) at = att[1:] / att[:-1] att = np.concatenate((att[1:], [1])) return (at, att) def gamma_schedules(num_diffusion_timesteps: int, gamma_cum_start=9e-06, gamma_cum_end=0.99999): ctt = np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (gamma_cum_end - gamma_cum_start) + gamma_cum_start ctt = np.concatenate(([0], ctt)) one_minus_ctt = 1 - ctt one_minus_ct = one_minus_ctt[1:] / one_minus_ctt[:-1] ct = 1 - one_minus_ct ctt = np.concatenate((ctt[1:], [0])) return (ct, ctt) class VQDiffusionScheduler(SchedulerMixin, ConfigMixin): order = 1 @register_to_config def __init__(self, num_vec_classes: int, num_train_timesteps: int=100, alpha_cum_start: float=0.99999, alpha_cum_end: float=9e-06, gamma_cum_start: float=9e-06, gamma_cum_end: float=0.99999): self.num_embed = num_vec_classes self.mask_class = self.num_embed - 1 (at, att) = alpha_schedules(num_train_timesteps, alpha_cum_start=alpha_cum_start, alpha_cum_end=alpha_cum_end) (ct, ctt) = gamma_schedules(num_train_timesteps, gamma_cum_start=gamma_cum_start, gamma_cum_end=gamma_cum_end) num_non_mask_classes = self.num_embed - 1 bt = (1 - at - ct) / num_non_mask_classes btt = (1 - att - ctt) / num_non_mask_classes at = torch.tensor(at.astype('float64')) bt = torch.tensor(bt.astype('float64')) ct = torch.tensor(ct.astype('float64')) log_at = torch.log(at) log_bt = torch.log(bt) log_ct = torch.log(ct) att = torch.tensor(att.astype('float64')) btt = torch.tensor(btt.astype('float64')) ctt = torch.tensor(ctt.astype('float64')) log_cumprod_at = torch.log(att) log_cumprod_bt = torch.log(btt) log_cumprod_ct = torch.log(ctt) self.log_at = log_at.float() self.log_bt = log_bt.float() self.log_ct = log_ct.float() self.log_cumprod_at = log_cumprod_at.float() self.log_cumprod_bt = log_cumprod_bt.float() self.log_cumprod_ct = log_cumprod_ct.float() self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device]=None): self.num_inference_steps = num_inference_steps timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() self.timesteps = torch.from_numpy(timesteps).to(device) self.log_at = self.log_at.to(device) self.log_bt = self.log_bt.to(device) self.log_ct = self.log_ct.to(device) self.log_cumprod_at = self.log_cumprod_at.to(device) self.log_cumprod_bt = self.log_cumprod_bt.to(device) self.log_cumprod_ct = self.log_cumprod_ct.to(device) def step(self, model_output: torch.Tensor, timestep: torch.long, sample: torch.LongTensor, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[VQDiffusionSchedulerOutput, Tuple]: if timestep == 0: log_p_x_t_min_1 = model_output else: log_p_x_t_min_1 = self.q_posterior(model_output, sample, timestep) log_p_x_t_min_1 = gumbel_noised(log_p_x_t_min_1, generator) x_t_min_1 = log_p_x_t_min_1.argmax(dim=1) if not return_dict: return (x_t_min_1,) return VQDiffusionSchedulerOutput(prev_sample=x_t_min_1) def q_posterior(self, log_p_x_0, x_t, t): log_onehot_x_t = index_to_log_onehot(x_t, self.num_embed) log_q_x_t_given_x_0 = self.log_Q_t_transitioning_to_known_class(t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=True) log_q_t_given_x_t_min_1 = self.log_Q_t_transitioning_to_known_class(t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=False) q = log_p_x_0 - log_q_x_t_given_x_0 q_log_sum_exp = torch.logsumexp(q, dim=1, keepdim=True) q = q - q_log_sum_exp q = self.apply_cumulative_transitions(q, t - 1) log_p_x_t_min_1 = q + log_q_t_given_x_t_min_1 + q_log_sum_exp return log_p_x_t_min_1 def log_Q_t_transitioning_to_known_class(self, *, t: torch.int, x_t: torch.LongTensor, log_onehot_x_t: torch.Tensor, cumulative: bool): if cumulative: a = self.log_cumprod_at[t] b = self.log_cumprod_bt[t] c = self.log_cumprod_ct[t] else: a = self.log_at[t] b = self.log_bt[t] c = self.log_ct[t] if not cumulative: log_onehot_x_t_transitioning_from_masked = log_onehot_x_t[:, -1, :].unsqueeze(1) log_onehot_x_t = log_onehot_x_t[:, :-1, :] log_Q_t = (log_onehot_x_t + a).logaddexp(b) mask_class_mask = x_t == self.mask_class mask_class_mask = mask_class_mask.unsqueeze(1).expand(-1, self.num_embed - 1, -1) log_Q_t[mask_class_mask] = c if not cumulative: log_Q_t = torch.cat((log_Q_t, log_onehot_x_t_transitioning_from_masked), dim=1) return log_Q_t def apply_cumulative_transitions(self, q, t): bsz = q.shape[0] a = self.log_cumprod_at[t] b = self.log_cumprod_bt[t] c = self.log_cumprod_ct[t] num_latent_pixels = q.shape[2] c = c.expand(bsz, 1, num_latent_pixels) q = (q + a).logaddexp(b) q = torch.cat((q, c), dim=1) return q # File: diffusers-main/src/diffusers/training_utils.py import contextlib import copy import gc import math import random from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import numpy as np import torch from .models import UNet2DConditionModel from .schedulers import SchedulerMixin from .utils import convert_state_dict_to_diffusers, convert_state_dict_to_peft, deprecate, is_peft_available, is_torch_npu_available, is_torchvision_available, is_transformers_available if is_transformers_available(): import transformers if is_peft_available(): from peft import set_peft_model_state_dict if is_torchvision_available(): from torchvision import transforms if is_torch_npu_available(): import torch_npu def set_seed(seed: int): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if is_torch_npu_available(): torch.npu.manual_seed_all(seed) else: torch.cuda.manual_seed_all(seed) def compute_snr(noise_scheduler, timesteps): alphas_cumprod = noise_scheduler.alphas_cumprod sqrt_alphas_cumprod = alphas_cumprod ** 0.5 sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5 sqrt_alphas_cumprod = sqrt_alphas_cumprod.to(device=timesteps.device)[timesteps].float() while len(sqrt_alphas_cumprod.shape) < len(timesteps.shape): sqrt_alphas_cumprod = sqrt_alphas_cumprod[..., None] alpha = sqrt_alphas_cumprod.expand(timesteps.shape) sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod.to(device=timesteps.device)[timesteps].float() while len(sqrt_one_minus_alphas_cumprod.shape) < len(timesteps.shape): sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod[..., None] sigma = sqrt_one_minus_alphas_cumprod.expand(timesteps.shape) snr = (alpha / sigma) ** 2 return snr def resolve_interpolation_mode(interpolation_type: str): if not is_torchvision_available(): raise ImportError('Please make sure to install `torchvision` to be able to use the `resolve_interpolation_mode()` function.') if interpolation_type == 'bilinear': interpolation_mode = transforms.InterpolationMode.BILINEAR elif interpolation_type == 'bicubic': interpolation_mode = transforms.InterpolationMode.BICUBIC elif interpolation_type == 'box': interpolation_mode = transforms.InterpolationMode.BOX elif interpolation_type == 'nearest': interpolation_mode = transforms.InterpolationMode.NEAREST elif interpolation_type == 'nearest_exact': interpolation_mode = transforms.InterpolationMode.NEAREST_EXACT elif interpolation_type == 'hamming': interpolation_mode = transforms.InterpolationMode.HAMMING elif interpolation_type == 'lanczos': interpolation_mode = transforms.InterpolationMode.LANCZOS else: raise ValueError(f'The given interpolation mode {interpolation_type} is not supported. Currently supported interpolation modes are `bilinear`, `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos`.') return interpolation_mode def compute_dream_and_update_latents(unet: UNet2DConditionModel, noise_scheduler: SchedulerMixin, timesteps: torch.Tensor, noise: torch.Tensor, noisy_latents: torch.Tensor, target: torch.Tensor, encoder_hidden_states: torch.Tensor, dream_detail_preservation: float=1.0) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]: alphas_cumprod = noise_scheduler.alphas_cumprod.to(timesteps.device)[timesteps, None, None, None] sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5 dream_lambda = sqrt_one_minus_alphas_cumprod ** dream_detail_preservation pred = None with torch.no_grad(): pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample (_noisy_latents, _target) = (None, None) if noise_scheduler.config.prediction_type == 'epsilon': predicted_noise = pred delta_noise = (noise - predicted_noise).detach() delta_noise.mul_(dream_lambda) _noisy_latents = noisy_latents.add(sqrt_one_minus_alphas_cumprod * delta_noise) _target = target.add(delta_noise) elif noise_scheduler.config.prediction_type == 'v_prediction': raise NotImplementedError('DREAM has not been implemented for v-prediction') else: raise ValueError(f'Unknown prediction type {noise_scheduler.config.prediction_type}') return (_noisy_latents, _target) def unet_lora_state_dict(unet: UNet2DConditionModel) -> Dict[str, torch.Tensor]: lora_state_dict = {} for (name, module) in unet.named_modules(): if hasattr(module, 'set_lora_layer'): lora_layer = getattr(module, 'lora_layer') if lora_layer is not None: current_lora_layer_sd = lora_layer.state_dict() for (lora_layer_matrix_name, lora_param) in current_lora_layer_sd.items(): lora_state_dict[f'{name}.lora.{lora_layer_matrix_name}'] = lora_param return lora_state_dict def cast_training_params(model: Union[torch.nn.Module, List[torch.nn.Module]], dtype=torch.float32): if not isinstance(model, list): model = [model] for m in model: for param in m.parameters(): if param.requires_grad: param.data = param.to(dtype) def _set_state_dict_into_text_encoder(lora_state_dict: Dict[str, torch.Tensor], prefix: str, text_encoder: torch.nn.Module): text_encoder_state_dict = {f"{k.replace(prefix, '')}": v for (k, v) in lora_state_dict.items() if k.startswith(prefix)} text_encoder_state_dict = convert_state_dict_to_peft(convert_state_dict_to_diffusers(text_encoder_state_dict)) set_peft_model_state_dict(text_encoder, text_encoder_state_dict, adapter_name='default') def compute_density_for_timestep_sampling(weighting_scheme: str, batch_size: int, logit_mean: float=None, logit_std: float=None, mode_scale: float=None): if weighting_scheme == 'logit_normal': u = torch.normal(mean=logit_mean, std=logit_std, size=(batch_size,), device='cpu') u = torch.nn.functional.sigmoid(u) elif weighting_scheme == 'mode': u = torch.rand(size=(batch_size,), device='cpu') u = 1 - u - mode_scale * (torch.cos(math.pi * u / 2) ** 2 - 1 + u) else: u = torch.rand(size=(batch_size,), device='cpu') return u def compute_loss_weighting_for_sd3(weighting_scheme: str, sigmas=None): if weighting_scheme == 'sigma_sqrt': weighting = (sigmas ** (-2.0)).float() elif weighting_scheme == 'cosmap': bot = 1 - 2 * sigmas + 2 * sigmas ** 2 weighting = 2 / (math.pi * bot) else: weighting = torch.ones_like(sigmas) return weighting def clear_objs_and_retain_memory(objs: List[Any]): if len(objs) >= 1: for obj in objs: del obj gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() elif torch.backends.mps.is_available(): torch.mps.empty_cache() elif is_torch_npu_available(): torch_npu.empty_cache() class EMAModel: def __init__(self, parameters: Iterable[torch.nn.Parameter], decay: float=0.9999, min_decay: float=0.0, update_after_step: int=0, use_ema_warmup: bool=False, inv_gamma: Union[float, int]=1.0, power: Union[float, int]=2 / 3, foreach: bool=False, model_cls: Optional[Any]=None, model_config: Dict[str, Any]=None, **kwargs): if isinstance(parameters, torch.nn.Module): deprecation_message = 'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. Please pass the parameters of the module instead.' deprecate('passing a `torch.nn.Module` to `ExponentialMovingAverage`', '1.0.0', deprecation_message, standard_warn=False) parameters = parameters.parameters() use_ema_warmup = True if kwargs.get('max_value', None) is not None: deprecation_message = 'The `max_value` argument is deprecated. Please use `decay` instead.' deprecate('max_value', '1.0.0', deprecation_message, standard_warn=False) decay = kwargs['max_value'] if kwargs.get('min_value', None) is not None: deprecation_message = 'The `min_value` argument is deprecated. Please use `min_decay` instead.' deprecate('min_value', '1.0.0', deprecation_message, standard_warn=False) min_decay = kwargs['min_value'] parameters = list(parameters) self.shadow_params = [p.clone().detach() for p in parameters] if kwargs.get('device', None) is not None: deprecation_message = 'The `device` argument is deprecated. Please use `to` instead.' deprecate('device', '1.0.0', deprecation_message, standard_warn=False) self.to(device=kwargs['device']) self.temp_stored_params = None self.decay = decay self.min_decay = min_decay self.update_after_step = update_after_step self.use_ema_warmup = use_ema_warmup self.inv_gamma = inv_gamma self.power = power self.optimization_step = 0 self.cur_decay_value = None self.foreach = foreach self.model_cls = model_cls self.model_config = model_config @classmethod def from_pretrained(cls, path, model_cls, foreach=False) -> 'EMAModel': (_, ema_kwargs) = model_cls.load_config(path, return_unused_kwargs=True) model = model_cls.from_pretrained(path) ema_model = cls(model.parameters(), model_cls=model_cls, model_config=model.config, foreach=foreach) ema_model.load_state_dict(ema_kwargs) return ema_model def save_pretrained(self, path): if self.model_cls is None: raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.') if self.model_config is None: raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.') model = self.model_cls.from_config(self.model_config) state_dict = self.state_dict() state_dict.pop('shadow_params', None) model.register_to_config(**state_dict) self.copy_to(model.parameters()) model.save_pretrained(path) def get_decay(self, optimization_step: int) -> float: step = max(0, optimization_step - self.update_after_step - 1) if step <= 0: return 0.0 if self.use_ema_warmup: cur_decay_value = 1 - (1 + step / self.inv_gamma) ** (-self.power) else: cur_decay_value = (1 + step) / (10 + step) cur_decay_value = min(cur_decay_value, self.decay) cur_decay_value = max(cur_decay_value, self.min_decay) return cur_decay_value @torch.no_grad() def step(self, parameters: Iterable[torch.nn.Parameter]): if isinstance(parameters, torch.nn.Module): deprecation_message = 'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. Please pass the parameters of the module instead.' deprecate('passing a `torch.nn.Module` to `ExponentialMovingAverage.step`', '1.0.0', deprecation_message, standard_warn=False) parameters = parameters.parameters() parameters = list(parameters) self.optimization_step += 1 decay = self.get_decay(self.optimization_step) self.cur_decay_value = decay one_minus_decay = 1 - decay context_manager = contextlib.nullcontext if is_transformers_available() and transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): import deepspeed if self.foreach: if is_transformers_available() and transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): context_manager = deepspeed.zero.GatheredParameters(parameters, modifier_rank=None) with context_manager(): params_grad = [param for param in parameters if param.requires_grad] s_params_grad = [s_param for (s_param, param) in zip(self.shadow_params, parameters) if param.requires_grad] if len(params_grad) < len(parameters): torch._foreach_copy_([s_param for (s_param, param) in zip(self.shadow_params, parameters) if not param.requires_grad], [param for param in parameters if not param.requires_grad], non_blocking=True) torch._foreach_sub_(s_params_grad, torch._foreach_sub(s_params_grad, params_grad), alpha=one_minus_decay) else: for (s_param, param) in zip(self.shadow_params, parameters): if is_transformers_available() and transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): context_manager = deepspeed.zero.GatheredParameters(param, modifier_rank=None) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param)) else: s_param.copy_(param) def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None: parameters = list(parameters) if self.foreach: torch._foreach_copy_([param.data for param in parameters], [s_param.to(param.device).data for (s_param, param) in zip(self.shadow_params, parameters)]) else: for (s_param, param) in zip(self.shadow_params, parameters): param.data.copy_(s_param.to(param.device).data) def pin_memory(self) -> None: self.shadow_params = [p.pin_memory() for p in self.shadow_params] def to(self, device=None, dtype=None, non_blocking=False) -> None: self.shadow_params = [p.to(device=device, dtype=dtype, non_blocking=non_blocking) if p.is_floating_point() else p.to(device=device, non_blocking=non_blocking) for p in self.shadow_params] def state_dict(self) -> dict: return {'decay': self.decay, 'min_decay': self.min_decay, 'optimization_step': self.optimization_step, 'update_after_step': self.update_after_step, 'use_ema_warmup': self.use_ema_warmup, 'inv_gamma': self.inv_gamma, 'power': self.power, 'shadow_params': self.shadow_params} def store(self, parameters: Iterable[torch.nn.Parameter]) -> None: self.temp_stored_params = [param.detach().cpu().clone() for param in parameters] def restore(self, parameters: Iterable[torch.nn.Parameter]) -> None: if self.temp_stored_params is None: raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights to `restore()`') if self.foreach: torch._foreach_copy_([param.data for param in parameters], [c_param.data for c_param in self.temp_stored_params]) else: for (c_param, param) in zip(self.temp_stored_params, parameters): param.data.copy_(c_param.data) self.temp_stored_params = None def load_state_dict(self, state_dict: dict) -> None: state_dict = copy.deepcopy(state_dict) self.decay = state_dict.get('decay', self.decay) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('Decay must be between 0 and 1') self.min_decay = state_dict.get('min_decay', self.min_decay) if not isinstance(self.min_decay, float): raise ValueError('Invalid min_decay') self.optimization_step = state_dict.get('optimization_step', self.optimization_step) if not isinstance(self.optimization_step, int): raise ValueError('Invalid optimization_step') self.update_after_step = state_dict.get('update_after_step', self.update_after_step) if not isinstance(self.update_after_step, int): raise ValueError('Invalid update_after_step') self.use_ema_warmup = state_dict.get('use_ema_warmup', self.use_ema_warmup) if not isinstance(self.use_ema_warmup, bool): raise ValueError('Invalid use_ema_warmup') self.inv_gamma = state_dict.get('inv_gamma', self.inv_gamma) if not isinstance(self.inv_gamma, (float, int)): raise ValueError('Invalid inv_gamma') self.power = state_dict.get('power', self.power) if not isinstance(self.power, (float, int)): raise ValueError('Invalid power') shadow_params = state_dict.get('shadow_params', None) if shadow_params is not None: self.shadow_params = shadow_params if not isinstance(self.shadow_params, list): raise ValueError('shadow_params must be a list') if not all((isinstance(p, torch.Tensor) for p in self.shadow_params)): raise ValueError('shadow_params must all be Tensors') # File: diffusers-main/src/diffusers/video_processor.py import warnings from typing import List, Optional, Union import numpy as np import PIL import torch from .image_processor import VaeImageProcessor, is_valid_image, is_valid_image_imagelist class VideoProcessor(VaeImageProcessor): def preprocess_video(self, video, height: Optional[int]=None, width: Optional[int]=None) -> torch.Tensor: if isinstance(video, list) and isinstance(video[0], np.ndarray) and (video[0].ndim == 5): warnings.warn('Passing `video` as a list of 5d np.ndarray is deprecated.Please concatenate the list along the batch dimension and pass it as a single 5d np.ndarray', FutureWarning) video = np.concatenate(video, axis=0) if isinstance(video, list) and isinstance(video[0], torch.Tensor) and (video[0].ndim == 5): warnings.warn('Passing `video` as a list of 5d torch.Tensor is deprecated.Please concatenate the list along the batch dimension and pass it as a single 5d torch.Tensor', FutureWarning) video = torch.cat(video, axis=0) if isinstance(video, (np.ndarray, torch.Tensor)) and video.ndim == 5: video = list(video) elif isinstance(video, list) and is_valid_image(video[0]) or is_valid_image_imagelist(video): video = [video] elif isinstance(video, list) and is_valid_image_imagelist(video[0]): video = video else: raise ValueError('Input is in incorrect format. Currently, we only support numpy.ndarray, torch.Tensor, PIL.Image.Image') video = torch.stack([self.preprocess(img, height=height, width=width) for img in video], dim=0) video = video.permute(0, 2, 1, 3, 4) return video def postprocess_video(self, video: torch.Tensor, output_type: str='np') -> Union[np.ndarray, torch.Tensor, List[PIL.Image.Image]]: batch_size = video.shape[0] outputs = [] for batch_idx in range(batch_size): batch_vid = video[batch_idx].permute(1, 0, 2, 3) batch_output = self.postprocess(batch_vid, output_type) outputs.append(batch_output) if output_type == 'np': outputs = np.stack(outputs) elif output_type == 'pt': outputs = torch.stack(outputs) elif not output_type == 'pil': raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']") return outputs