File size: 4,658 Bytes
57f6236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
{
    "reference_unet": {
        "_class_name": "UNet2DConditionModel",
        "act_fn": "silu",
        "attention_head_dim": 8,
        "block_out_channels": [
            320,
            640,
            1280,
            1280
        ],
        "center_input_sample": false,
        "cross_attention_dim": 768,
        "down_block_types": [
            "CrossAttnDownBlock2D",
            "CrossAttnDownBlock2D",
            "CrossAttnDownBlock2D",
            "DownBlock2D"
        ],
        "downsample_padding": 1,
        "flip_sin_to_cos": true,
        "freq_shift": 0,
        "in_channels": 4,
        "layers_per_block": 2,
        "mid_block_scale_factor": 1,
        "norm_eps": 1e-05,
        "norm_num_groups": 32,
        "out_channels": 4,
        "sample_size": 64,
        "up_block_types": [
            "UpBlock2D",
            "CrossAttnUpBlock2D",
            "CrossAttnUpBlock2D",
            "CrossAttnUpBlock2D"
        ]
    },
    "denoising_unet": {
        "_class_name": "UNet2DConditionModel",
        "act_fn": "silu",
        "attention_head_dim": 8,
        "block_out_channels": [
            320,
            640,
            1280,
            1280
        ],
        "center_input_sample": false,
        "cross_attention_dim": 768,
        "down_block_types": [
            "CrossAttnDownBlock3D",
            "CrossAttnDownBlock3D",
            "CrossAttnDownBlock3D",
            "DownBlock3D"
        ],
        "downsample_padding": 1,
        "flip_sin_to_cos": true,
        "freq_shift": 0,
        "in_channels": 4,
        "layers_per_block": 2,
        "mid_block_scale_factor": 1,
        "norm_eps": 1e-05,
        "norm_num_groups": 32,
        "out_channels": 4,
        "sample_size": 64,
        "up_block_types": [
            "UpBlock3D",
            "CrossAttnUpBlock3D",
            "CrossAttnUpBlock3D",
            "CrossAttnUpBlock3D"
        ],
        "mid_block_type": "UNetMidBlock3DCrossAttn",
        "use_inflated_groupnorm": true,
        "unet_use_cross_frame_attention": false,
        "unet_use_temporal_attention": false,
        "use_motion_module": true,
        "motion_module_resolutions": [
            1,
            2,
            4,
            8
        ],
        "motion_module_mid_block": true,
        "motion_module_decoder_only": false,
        "motion_module_type": "Vanilla",
        "motion_module_kwargs": {
            "num_attention_heads": 8,
            "num_transformer_block": 1,
            "attention_block_types": [
                "Temporal_Self",
                "Temporal_Self"
            ],
            "temporal_position_encoding": true,
            "temporal_position_encoding_max_len": 32,
            "temporal_attention_dim_div": 1
        }
    },
    "vae": {
        "_class_name": "AutoencoderKL",
        "act_fn": "silu",
        "block_out_channels": [
            128,
            256,
            512,
            512
        ],
        "down_block_types": [
            "DownEncoderBlock2D",
            "DownEncoderBlock2D",
            "DownEncoderBlock2D",
            "DownEncoderBlock2D"
        ],
        "in_channels": 3,
        "latent_channels": 4,
        "layers_per_block": 2,
        "norm_num_groups": 32,
        "out_channels": 3,
        "sample_size": 256,
        "up_block_types": [
            "UpDecoderBlock2D",
            "UpDecoderBlock2D",
            "UpDecoderBlock2D",
            "UpDecoderBlock2D"
        ]
    },
    "image_encoder": {
        "architectures": [
            "CLIPVisionModelWithProjection"
        ],
        "attention_dropout": 0,
        "dropout": 0,
        "hidden_act": "quick_gelu",
        "hidden_size": 1024,
        "image_size": 224,
        "initializer_factor": 1,
        "initializer_range": 0.02,
        "intermediate_size": 4096,
        "layer_norm_eps": 0.00001,
        "model_type": "clip_vision_model",
        "num_attention_heads": 16,
        "num_channels": 3,
        "num_hidden_layers": 24,
        "patch_size": 14,
        "projection_dim": 768,
        "torch_dtype": "float32"
    },
    "guidance_encoder": {
        "guidance_embedding_channels": 320,
        "guidance_input_channels": 3,
        "block_out_channels": [
            16,
            32,
            96,
            256
        ]
    },
    "scheduler": {
        "num_train_timesteps": 1000,
        "beta_start": 0.00085,
        "beta_end": 0.012,
        "beta_schedule": "linear",
        "steps_offset": 1,
        "clip_sample": false,
        "rescale_betas_zero_snr": true,
        "timestep_spacing": "trailing",
        "prediction_type": "v_prediction"
    }
}