1inkusFace commited on
Commit
337cb58
·
verified ·
1 Parent(s): 6c8a9a7

Create transformer_sd3.py

Browse files
Files changed (1) hide show
  1. models/transformer_sd3.py +375 -0
models/transformer_sd3.py ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Stability AI, The HuggingFace Team and The InstantX Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+
21
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
22
+ from diffusers.loaders import FromOriginalModelMixin, PeftAdapterMixin
23
+ from .attention import JointTransformerBlock
24
+ from diffusers.models.attention_processor import Attention, AttentionProcessor, FusedJointAttnProcessor2_0
25
+ from diffusers.models.modeling_utils import ModelMixin
26
+ from diffusers.models.normalization import AdaLayerNormContinuous
27
+ from diffusers.utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers
28
+ from diffusers.models.embeddings import CombinedTimestepTextProjEmbeddings, PatchEmbed
29
+ from diffusers.models.modeling_outputs import Transformer2DModelOutput
30
+
31
+
32
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
33
+
34
+
35
+ class SD3Transformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
36
+ """
37
+ The Transformer model introduced in Stable Diffusion 3.
38
+
39
+ Reference: https://arxiv.org/abs/2403.03206
40
+
41
+ Parameters:
42
+ sample_size (`int`): The width of the latent images. This is fixed during training since
43
+ it is used to learn a number of position embeddings.
44
+ patch_size (`int`): Patch size to turn the input data into small patches.
45
+ in_channels (`int`, *optional*, defaults to 16): The number of channels in the input.
46
+ num_layers (`int`, *optional*, defaults to 18): The number of layers of Transformer blocks to use.
47
+ attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head.
48
+ num_attention_heads (`int`, *optional*, defaults to 18): The number of heads to use for multi-head attention.
49
+ cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
50
+ caption_projection_dim (`int`): Number of dimensions to use when projecting the `encoder_hidden_states`.
51
+ pooled_projection_dim (`int`): Number of dimensions to use when projecting the `pooled_projections`.
52
+ out_channels (`int`, defaults to 16): Number of output channels.
53
+
54
+ """
55
+
56
+ _supports_gradient_checkpointing = True
57
+
58
+ @register_to_config
59
+ def __init__(
60
+ self,
61
+ sample_size: int = 128,
62
+ patch_size: int = 2,
63
+ in_channels: int = 16,
64
+ num_layers: int = 18,
65
+ attention_head_dim: int = 64,
66
+ num_attention_heads: int = 18,
67
+ joint_attention_dim: int = 4096,
68
+ caption_projection_dim: int = 1152,
69
+ pooled_projection_dim: int = 2048,
70
+ out_channels: int = 16,
71
+ pos_embed_max_size: int = 96,
72
+ dual_attention_layers: Tuple[
73
+ int, ...
74
+ ] = (), # () for sd3.0; (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) for sd3.5
75
+ qk_norm: Optional[str] = None,
76
+ ):
77
+ super().__init__()
78
+ default_out_channels = in_channels
79
+ self.out_channels = out_channels if out_channels is not None else default_out_channels
80
+ self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
81
+
82
+ self.pos_embed = PatchEmbed(
83
+ height=self.config.sample_size,
84
+ width=self.config.sample_size,
85
+ patch_size=self.config.patch_size,
86
+ in_channels=self.config.in_channels,
87
+ embed_dim=self.inner_dim,
88
+ pos_embed_max_size=pos_embed_max_size, # hard-code for now.
89
+ )
90
+ self.time_text_embed = CombinedTimestepTextProjEmbeddings(
91
+ embedding_dim=self.inner_dim, pooled_projection_dim=self.config.pooled_projection_dim
92
+ )
93
+ self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.config.caption_projection_dim)
94
+
95
+ # `attention_head_dim` is doubled to account for the mixing.
96
+ # It needs to crafted when we get the actual checkpoints.
97
+ self.transformer_blocks = nn.ModuleList(
98
+ [
99
+ JointTransformerBlock(
100
+ dim=self.inner_dim,
101
+ num_attention_heads=self.config.num_attention_heads,
102
+ attention_head_dim=self.config.attention_head_dim,
103
+ context_pre_only=i == num_layers - 1,
104
+ qk_norm=qk_norm,
105
+ use_dual_attention=True if i in dual_attention_layers else False,
106
+ )
107
+ for i in range(self.config.num_layers)
108
+ ]
109
+ )
110
+
111
+ self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6)
112
+ self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
113
+
114
+ self.gradient_checkpointing = False
115
+
116
+ # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking
117
+ def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None:
118
+ """
119
+ Sets the attention processor to use [feed forward
120
+ chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers).
121
+
122
+ Parameters:
123
+ chunk_size (`int`, *optional*):
124
+ The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually
125
+ over each tensor of dim=`dim`.
126
+ dim (`int`, *optional*, defaults to `0`):
127
+ The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch)
128
+ or dim=1 (sequence length).
129
+ """
130
+ if dim not in [0, 1]:
131
+ raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}")
132
+
133
+ # By default chunk size is 1
134
+ chunk_size = chunk_size or 1
135
+
136
+ def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int):
137
+ if hasattr(module, "set_chunk_feed_forward"):
138
+ module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim)
139
+
140
+ for child in module.children():
141
+ fn_recursive_feed_forward(child, chunk_size, dim)
142
+
143
+ for module in self.children():
144
+ fn_recursive_feed_forward(module, chunk_size, dim)
145
+
146
+ # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.disable_forward_chunking
147
+ def disable_forward_chunking(self):
148
+ def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int):
149
+ if hasattr(module, "set_chunk_feed_forward"):
150
+ module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim)
151
+
152
+ for child in module.children():
153
+ fn_recursive_feed_forward(child, chunk_size, dim)
154
+
155
+ for module in self.children():
156
+ fn_recursive_feed_forward(module, None, 0)
157
+
158
+ @property
159
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
160
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
161
+ r"""
162
+ Returns:
163
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
164
+ indexed by its weight name.
165
+ """
166
+ # set recursively
167
+ processors = {}
168
+
169
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
170
+ if hasattr(module, "get_processor"):
171
+ processors[f"{name}.processor"] = module.get_processor()
172
+
173
+ for sub_name, child in module.named_children():
174
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
175
+
176
+ return processors
177
+
178
+ for name, module in self.named_children():
179
+ fn_recursive_add_processors(name, module, processors)
180
+
181
+ return processors
182
+
183
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
184
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
185
+ r"""
186
+ Sets the attention processor to use to compute attention.
187
+
188
+ Parameters:
189
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
190
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
191
+ for **all** `Attention` layers.
192
+
193
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
194
+ processor. This is strongly recommended when setting trainable attention processors.
195
+
196
+ """
197
+ count = len(self.attn_processors.keys())
198
+
199
+ if isinstance(processor, dict) and len(processor) != count:
200
+ raise ValueError(
201
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
202
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
203
+ )
204
+
205
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
206
+ if hasattr(module, "set_processor"):
207
+ if not isinstance(processor, dict):
208
+ module.set_processor(processor)
209
+ else:
210
+ module.set_processor(processor.pop(f"{name}.processor"))
211
+
212
+ for sub_name, child in module.named_children():
213
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
214
+
215
+ for name, module in self.named_children():
216
+ fn_recursive_attn_processor(name, module, processor)
217
+
218
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedJointAttnProcessor2_0
219
+ def fuse_qkv_projections(self):
220
+ """
221
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
222
+ are fused. For cross-attention modules, key and value projection matrices are fused.
223
+
224
+ <Tip warning={true}>
225
+
226
+ This API is 🧪 experimental.
227
+
228
+ </Tip>
229
+ """
230
+ self.original_attn_processors = None
231
+
232
+ for _, attn_processor in self.attn_processors.items():
233
+ if "Added" in str(attn_processor.__class__.__name__):
234
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
235
+
236
+ self.original_attn_processors = self.attn_processors
237
+
238
+ for module in self.modules():
239
+ if isinstance(module, Attention):
240
+ module.fuse_projections(fuse=True)
241
+
242
+ self.set_attn_processor(FusedJointAttnProcessor2_0())
243
+
244
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
245
+ def unfuse_qkv_projections(self):
246
+ """Disables the fused QKV projection if enabled.
247
+
248
+ <Tip warning={true}>
249
+
250
+ This API is 🧪 experimental.
251
+
252
+ </Tip>
253
+
254
+ """
255
+ if self.original_attn_processors is not None:
256
+ self.set_attn_processor(self.original_attn_processors)
257
+
258
+ def _set_gradient_checkpointing(self, module, value=False):
259
+ if hasattr(module, "gradient_checkpointing"):
260
+ module.gradient_checkpointing = value
261
+
262
+ def forward(
263
+ self,
264
+ hidden_states: torch.FloatTensor,
265
+ encoder_hidden_states: torch.FloatTensor = None,
266
+ pooled_projections: torch.FloatTensor = None,
267
+ timestep: torch.LongTensor = None,
268
+ block_controlnet_hidden_states: List = None,
269
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
270
+ return_dict: bool = True,
271
+ ) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
272
+ """
273
+ The [`SD3Transformer2DModel`] forward method.
274
+
275
+ Args:
276
+ hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`):
277
+ Input `hidden_states`.
278
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`):
279
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
280
+ pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected
281
+ from the embeddings of input conditions.
282
+ timestep ( `torch.LongTensor`):
283
+ Used to indicate denoising step.
284
+ block_controlnet_hidden_states: (`list` of `torch.Tensor`):
285
+ A list of tensors that if specified are added to the residuals of transformer blocks.
286
+ joint_attention_kwargs (`dict`, *optional*):
287
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
288
+ `self.processor` in
289
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
290
+ return_dict (`bool`, *optional*, defaults to `True`):
291
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
292
+ tuple.
293
+
294
+ Returns:
295
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
296
+ `tuple` where the first element is the sample tensor.
297
+ """
298
+ if joint_attention_kwargs is not None:
299
+ joint_attention_kwargs = joint_attention_kwargs.copy()
300
+ lora_scale = joint_attention_kwargs.pop("scale", 1.0)
301
+ else:
302
+ lora_scale = 1.0
303
+
304
+ if USE_PEFT_BACKEND:
305
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
306
+ scale_lora_layers(self, lora_scale)
307
+ else:
308
+ if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
309
+ logger.warning(
310
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
311
+ )
312
+
313
+ height, width = hidden_states.shape[-2:]
314
+
315
+ hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too.
316
+ temb = self.time_text_embed(timestep, pooled_projections)
317
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
318
+
319
+ for index_block, block in enumerate(self.transformer_blocks):
320
+ if self.training and self.gradient_checkpointing:
321
+
322
+ def create_custom_forward(module, return_dict=None):
323
+ def custom_forward(*inputs):
324
+ if return_dict is not None:
325
+ return module(*inputs, return_dict=return_dict)
326
+ else:
327
+ return module(*inputs)
328
+
329
+ return custom_forward
330
+
331
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
332
+ encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint(
333
+ create_custom_forward(block),
334
+ hidden_states,
335
+ encoder_hidden_states,
336
+ temb,
337
+ joint_attention_kwargs,
338
+ **ckpt_kwargs,
339
+ )
340
+
341
+ else:
342
+ encoder_hidden_states, hidden_states = block(
343
+ hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb,
344
+ joint_attention_kwargs=joint_attention_kwargs,
345
+ )
346
+
347
+ # controlnet residual
348
+ if block_controlnet_hidden_states is not None and block.context_pre_only is False:
349
+ interval_control = len(self.transformer_blocks) // len(block_controlnet_hidden_states)
350
+ hidden_states = hidden_states + block_controlnet_hidden_states[index_block // interval_control]
351
+
352
+ hidden_states = self.norm_out(hidden_states, temb)
353
+ hidden_states = self.proj_out(hidden_states)
354
+
355
+ # unpatchify
356
+ patch_size = self.config.patch_size
357
+ height = height // patch_size
358
+ width = width // patch_size
359
+
360
+ hidden_states = hidden_states.reshape(
361
+ shape=(hidden_states.shape[0], height, width, patch_size, patch_size, self.out_channels)
362
+ )
363
+ hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
364
+ output = hidden_states.reshape(
365
+ shape=(hidden_states.shape[0], self.out_channels, height * patch_size, width * patch_size)
366
+ )
367
+
368
+ if USE_PEFT_BACKEND:
369
+ # remove `lora_scale` from each PEFT layer
370
+ unscale_lora_layers(self, lora_scale)
371
+
372
+ if not return_dict:
373
+ return (output,)
374
+
375
+ return Transformer2DModelOutput(sample=output)