a-r-r-o-w HF staff commited on
Commit
1309fc6
·
verified ·
1 Parent(s): e2aea09

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +14 -12
README.md CHANGED
@@ -155,25 +155,27 @@ Wan can also be run directly using 🤗 Diffusers!
155
 
156
  ```python
157
  import torch
158
- from diffusers import AutoencoderKLWan, WanPipeline
159
- from diffusers.utils import export_to_video
160
 
161
- # Available models: Wan-AI/Wan2.1-T2V-14B-Diffusers, Wan-AI/Wan2.1-T2V-1.3B-Diffusers
162
- model_id = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
163
  vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
164
- pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
165
  pipe.to("cuda")
166
 
167
- prompt = "A cat walks on the grass, realistic"
 
 
 
 
 
 
 
168
  negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards"
169
 
170
  output = pipe(
171
- prompt=prompt,
172
- negative_prompt=negative_prompt,
173
- height=480,
174
- width=832,
175
- num_frames=81,
176
- guidance_scale=5.0
177
  ).frames[0]
178
  export_to_video(output, "output.mp4", fps=15)
179
  ```
 
155
 
156
  ```python
157
  import torch
158
+ from diffusers import AutoencoderKLWan, WanImageToVideoPipeline
159
+ from diffusers.utils import export_to_video, load_image
160
 
161
+ # Available models: Wan-AI/Wan2.1-I2V-14B-480P-Diffusers, Wan-AI/Wan2.1-I2V-1.3B-720P-Diffusers
162
+ model_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
163
  vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
164
+ pipe = WanImageToVideoPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
165
  pipe.to("cuda")
166
 
167
+ height, width = 480, 832
168
+ image = load_image(
169
+ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg"
170
+ ).resize((width, height))
171
+ prompt = (
172
+ "An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in "
173
+ "the background. High quality, ultrarealistic detail and breath-taking movie-like camera shot."
174
+ )
175
  negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards"
176
 
177
  output = pipe(
178
+ image=image, prompt=prompt, negative_prompt=negative_prompt, num_frames=81, guidance_scale=5.0
 
 
 
 
 
179
  ).frames[0]
180
  export_to_video(output, "output.mp4", fps=15)
181
  ```