seokochin commited on
Commit
feedcc2
·
verified ·
1 Parent(s): 753e73c

Upload 4 files

Browse files
Files changed (3) hide show
  1. app.py +36 -0
  2. generate.py +29 -0
  3. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ import subprocess
4
+ import os
5
+
6
+ # Check if GPU is available
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
8
+
9
+ # Title
10
+ st.title("WAN 2.1 - 1.3B Text-to-Video Generator 🎥")
11
+
12
+ # Model selection
13
+ model_path = "./Wan2.1-T2V-1.3B"
14
+
15
+ # Input fields
16
+ prompt = st.text_area("Enter your text prompt:", "A cat in military dress wearing headphones, laughing and walking.")
17
+ frame_num = st.slider("Number of frames:", min_value=30, max_value=180, value=60, step=10)
18
+ resolution = st.selectbox("Select resolution:", ["832*480", "1280*720"])
19
+ sample_steps = st.slider("Sampling steps:", min_value=10, max_value=50, value=30, step=5)
20
+
21
+ # Button to generate video
22
+ if st.button("Generate Video"):
23
+ st.info("Generating video... Please wait.")
24
+
25
+ # Run WAN 2.1 with user settings
26
+ command = f"python generate.py --task t2v-1.3B --size {resolution} --frame_num {frame_num} --sample_steps {sample_steps} --ckpt_dir {model_path} --prompt \"{prompt}\""
27
+
28
+ process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
29
+ process.wait()
30
+
31
+ # Display video if generated
32
+ if os.path.exists("output.mp4"):
33
+ st.video("output.mp4")
34
+ st.success("✅ Video generated successfully!")
35
+ else:
36
+ st.error("❌ Video generation failed. Check logs for details.")
generate.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import subprocess
4
+ import os
5
+
6
+ # Define Arguments
7
+ parser = argparse.ArgumentParser()
8
+ parser.add_argument("--task", type=str, default="t2v-1.3B")
9
+ parser.add_argument("--size", type=str, default="832*480")
10
+ parser.add_argument("--frame_num", type=int, default=60)
11
+ parser.add_argument("--sample_steps", type=int, default=30)
12
+ parser.add_argument("--ckpt_dir", type=str, default="./Wan2.1-T2V-1.3B")
13
+ parser.add_argument("--prompt", type=str, required=True)
14
+ args = parser.parse_args()
15
+
16
+ # Check GPU Availability
17
+ device = "cuda" if torch.cuda.is_available() else "cpu"
18
+ print(f"Using device: {device}")
19
+
20
+ # Run WAN 2.1 Inference
21
+ command = f"python run_model.py --task {args.task} --size {args.size} --frame_num {args.frame_num} --sample_steps {args.sample_steps} --ckpt_dir {args.ckpt_dir} --prompt \"{args.prompt}\" --device {device}"
22
+
23
+ subprocess.run(command, shell=True)
24
+
25
+ # Save output
26
+ if os.path.exists("output.mp4"):
27
+ print("✅ Video generated successfully: output.mp4")
28
+ else:
29
+ print("❌ Error generating video.")
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ torch
3
+ transformers
4
+ diffusers
5
+ opencv-python
6
+ pillow
7
+ huggingface_hub