sypyp commited on
Commit
ea381f5
·
1 Parent(s): f7a92df

init upload

Browse files
Files changed (23) hide show
  1. image_encoder/config.json +22 -0
  2. image_encoder/model.safetensors +3 -0
  3. image_processor/preprocessor_config.json +28 -0
  4. model_index.json +32 -0
  5. scheduler/scheduler_config.json +28 -0
  6. transformer_i2v_480p/config.json +26 -0
  7. transformer_i2v_480p/diffusion_pytorch_model-00001-of-00007.safetensors +3 -0
  8. transformer_i2v_480p/diffusion_pytorch_model-00002-of-00007.safetensors +3 -0
  9. transformer_i2v_480p/diffusion_pytorch_model-00003-of-00007.safetensors +3 -0
  10. transformer_i2v_480p/diffusion_pytorch_model-00004-of-00007.safetensors +3 -0
  11. transformer_i2v_480p/diffusion_pytorch_model-00005-of-00007.safetensors +3 -0
  12. transformer_i2v_480p/diffusion_pytorch_model-00006-of-00007.safetensors +3 -0
  13. transformer_i2v_480p/diffusion_pytorch_model-00007-of-00007.safetensors +3 -0
  14. transformer_i2v_480p/diffusion_pytorch_model.safetensors.index.json +0 -0
  15. transformer_i2v_720p/config.json +26 -0
  16. transformer_i2v_720p/diffusion_pytorch_model-00001-of-00007.safetensors +3 -0
  17. transformer_i2v_720p/diffusion_pytorch_model-00002-of-00007.safetensors +3 -0
  18. transformer_i2v_720p/diffusion_pytorch_model-00003-of-00007.safetensors +3 -0
  19. transformer_i2v_720p/diffusion_pytorch_model-00004-of-00007.safetensors +3 -0
  20. transformer_i2v_720p/diffusion_pytorch_model-00005-of-00007.safetensors +3 -0
  21. transformer_i2v_720p/diffusion_pytorch_model-00006-of-00007.safetensors +3 -0
  22. transformer_i2v_720p/diffusion_pytorch_model-00007-of-00007.safetensors +3 -0
  23. transformer_i2v_720p/diffusion_pytorch_model.safetensors.index.json +0 -0
image_encoder/config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPVisionModel"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "dropout": 0.0,
7
+ "hidden_act": "gelu",
8
+ "hidden_size": 1280,
9
+ "image_size": 224,
10
+ "initializer_factor": 1.0,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 5120,
13
+ "layer_norm_eps": 1e-05,
14
+ "model_type": "clip_vision_model",
15
+ "num_attention_heads": 16,
16
+ "num_channels": 3,
17
+ "num_hidden_layers": 32,
18
+ "patch_size": 14,
19
+ "projection_dim": 1024,
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.49.0"
22
+ }
image_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8eb46f477ef5e1859b659014aed6ca56cdc207c12cb7a0f9d61b4d80a1a7bb84
3
+ size 2523128312
image_processor/preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": false,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.48145466,
13
+ 0.4578275,
14
+ 0.40821073
15
+ ],
16
+ "image_processor_type": "CLIPImageProcessor",
17
+ "image_std": [
18
+ 0.26862954,
19
+ 0.26130258,
20
+ 0.27577711
21
+ ],
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "height": 224,
26
+ "width": 224
27
+ }
28
+ }
model_index.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "WanI2VPipeline",
3
+ "_diffusers_version": "0.33.0.dev0",
4
+ "image_encoder": [
5
+ "transformers",
6
+ "CLIPVisionModel"
7
+ ],
8
+ "image_processor": [
9
+ "transformers",
10
+ "CLIPImageProcessor"
11
+ ],
12
+ "scheduler": [
13
+ "diffusers",
14
+ "UniPCMultistepScheduler"
15
+ ],
16
+ "text_encoder": [
17
+ "transformers",
18
+ "UMT5EncoderModel"
19
+ ],
20
+ "tokenizer": [
21
+ "transformers",
22
+ "T5TokenizerFast"
23
+ ],
24
+ "transformer": [
25
+ "diffusers",
26
+ "WanTransformer3DModel"
27
+ ],
28
+ "vae": [
29
+ "diffusers",
30
+ "AutoencoderKLWan"
31
+ ]
32
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UniPCMultistepScheduler",
3
+ "_diffusers_version": "0.33.0.dev0",
4
+ "beta_end": 0.02,
5
+ "beta_schedule": "linear",
6
+ "beta_start": 0.0001,
7
+ "disable_corrector": [],
8
+ "dynamic_thresholding_ratio": 0.995,
9
+ "final_sigmas_type": "zero",
10
+ "flow_shift": 7.0,
11
+ "lower_order_final": true,
12
+ "num_train_timesteps": 1000,
13
+ "predict_x0": true,
14
+ "prediction_type": "flow_prediction",
15
+ "rescale_betas_zero_snr": false,
16
+ "sample_max_value": 1.0,
17
+ "solver_order": 2,
18
+ "solver_p": null,
19
+ "solver_type": "bh2",
20
+ "steps_offset": 0,
21
+ "thresholding": false,
22
+ "timestep_spacing": "linspace",
23
+ "trained_betas": null,
24
+ "use_beta_sigmas": false,
25
+ "use_exponential_sigmas": false,
26
+ "use_flow_sigmas": true,
27
+ "use_karras_sigmas": false
28
+ }
transformer_i2v_480p/config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "WanTransformer3DModel",
3
+ "_diffusers_version": "0.33.0.dev0",
4
+ "add_img_emb": true,
5
+ "added_kv_proj_dim": 5120,
6
+ "attention_head_dim": 128,
7
+ "cross_attn_norm": true,
8
+ "eps": 1e-06,
9
+ "ffn_dim": 13824,
10
+ "freq_dim": 256,
11
+ "in_channels": 36,
12
+ "num_attention_heads": 40,
13
+ "num_layers": 40,
14
+ "out_channels": 16,
15
+ "patch_size": [
16
+ 1,
17
+ 2,
18
+ 2
19
+ ],
20
+ "qk_norm": true,
21
+ "text_dim": 4096,
22
+ "window_size": [
23
+ -1,
24
+ -1
25
+ ]
26
+ }
transformer_i2v_480p/diffusion_pytorch_model-00001-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fda6557f558eb0322fbf8153792a2ce63bdb23dff5509d50b3ee5200169c8683
3
+ size 9952163512
transformer_i2v_480p/diffusion_pytorch_model-00002-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03c82cc054dbe4c74e9288e2f3063a0b6fb4bc3500c1e68d0dbc787246e8ec8a
3
+ size 9797226656
transformer_i2v_480p/diffusion_pytorch_model-00003-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd244048f9c3ecbad51e1ce0c0241f104dfdb6107743253e0c17e73525b33bb4
3
+ size 9975437232
transformer_i2v_480p/diffusion_pytorch_model-00004-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e761b1e69b7598774bf2c1d5843eb530ec33bc33f857d768f85b0aa50e5a300
3
+ size 9975566544
transformer_i2v_480p/diffusion_pytorch_model-00005-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08cd3a3e4159f3e04a4c03d772c7de683055173cb6ecb8426be90f61323e699a
3
+ size 9902022768
transformer_i2v_480p/diffusion_pytorch_model-00006-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a232aa38c5457c4fcb170dbafca3580df6a35fe551038c0c779e4754225a1d0
3
+ size 9902063944
transformer_i2v_480p/diffusion_pytorch_model-00007-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2650699150328d85d9cc392b8fe2bf78575dad38059e0d2137dfdf6f3080d443
3
+ size 6075990120
transformer_i2v_480p/diffusion_pytorch_model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
transformer_i2v_720p/config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "WanTransformer3DModel",
3
+ "_diffusers_version": "0.33.0.dev0",
4
+ "add_img_emb": true,
5
+ "added_kv_proj_dim": 5120,
6
+ "attention_head_dim": 128,
7
+ "cross_attn_norm": true,
8
+ "eps": 1e-06,
9
+ "ffn_dim": 13824,
10
+ "freq_dim": 256,
11
+ "in_channels": 36,
12
+ "num_attention_heads": 40,
13
+ "num_layers": 40,
14
+ "out_channels": 16,
15
+ "patch_size": [
16
+ 1,
17
+ 2,
18
+ 2
19
+ ],
20
+ "qk_norm": true,
21
+ "text_dim": 4096,
22
+ "window_size": [
23
+ -1,
24
+ -1
25
+ ]
26
+ }
transformer_i2v_720p/diffusion_pytorch_model-00001-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:457639497b204e838c0b5e7f5955e8b1b0f9f04213bd9853e40cd77771569685
3
+ size 9952163512
transformer_i2v_720p/diffusion_pytorch_model-00002-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eda5a7f06db0164b852b52fc56db9cb82c502e963f0e7d407af8e13bac31826b
3
+ size 9797226656
transformer_i2v_720p/diffusion_pytorch_model-00003-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be4dfe5a12ac35c857fe307e91899d0f9c473551c2a828e80718f340489b27bd
3
+ size 9975437232
transformer_i2v_720p/diffusion_pytorch_model-00004-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:744a014df498e5b579d34a12e9ea836b2cd9adf9e0ef77b4f7378ad762091573
3
+ size 9975566544
transformer_i2v_720p/diffusion_pytorch_model-00005-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b49e16fed3592ffc07d65503f39517f3190d8e52130418706ea1ba678f207050
3
+ size 9902022768
transformer_i2v_720p/diffusion_pytorch_model-00006-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d173bad73ebcdde9fe4efef487d1df9c993dfc3590b041e05e7806418479be52
3
+ size 9902063944
transformer_i2v_720p/diffusion_pytorch_model-00007-of-00007.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68b4173328f39217e177fe40cf94e509a213955aea842b086f3fd17e6c286832
3
+ size 6075990120
transformer_i2v_720p/diffusion_pytorch_model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff