Himanshu-AT commited on
Commit
0712200
·
1 Parent(s): 784ce74

travel back

Browse files
Files changed (2) hide show
  1. controlnet_flux.py +4 -4
  2. transformer_flux.py +4 -4
controlnet_flux.py CHANGED
@@ -45,10 +45,10 @@ class FluxControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin):
45
  self,
46
  patch_size: int = 1,
47
  in_channels: int = 64,
48
- num_layers: int = 24,
49
- num_single_layers: int = 48,
50
- attention_head_dim: int = 256,
51
- num_attention_heads: int = 35,
52
  joint_attention_dim: int = 4096,
53
  pooled_projection_dim: int = 768,
54
  guidance_embeds: bool = False,
 
45
  self,
46
  patch_size: int = 1,
47
  in_channels: int = 64,
48
+ num_layers: int = 19,
49
+ num_single_layers: int = 38,
50
+ attention_head_dim: int = 128,
51
+ num_attention_heads: int = 24,
52
  joint_attention_dim: int = 4096,
53
  pooled_projection_dim: int = 768,
54
  guidance_embeds: bool = False,
transformer_flux.py CHANGED
@@ -278,10 +278,10 @@ class FluxTransformer2DModel(
278
  self,
279
  patch_size: int = 1,
280
  in_channels: int = 64,
281
- num_layers: int = 24,
282
- num_single_layers: int = 48,
283
- attention_head_dim: int = 256,
284
- num_attention_heads: int = 32,
285
  joint_attention_dim: int = 4096,
286
  pooled_projection_dim: int = 768,
287
  guidance_embeds: bool = False,
 
278
  self,
279
  patch_size: int = 1,
280
  in_channels: int = 64,
281
+ num_layers: int = 19,
282
+ num_single_layers: int = 38,
283
+ attention_head_dim: int = 128,
284
+ num_attention_heads: int = 24,
285
  joint_attention_dim: int = 4096,
286
  pooled_projection_dim: int = 768,
287
  guidance_embeds: bool = False,