Spaces:
Running
on
Zero
Running
on
Zero
zhiweili
commited on
Commit
·
d4160b5
1
Parent(s):
1b56e42
add image2video pipeline
Browse files- app_i2v.py +2 -2
- app_t2v.py +3 -3
- requirements.txt +1 -1
- video_model.py +6 -3
app_i2v.py
CHANGED
@@ -7,7 +7,7 @@ import tempfile
|
|
7 |
|
8 |
from diffusers.utils import export_to_video, load_image
|
9 |
|
10 |
-
from video_model import
|
11 |
|
12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
13 |
|
@@ -33,7 +33,7 @@ def create_demo() -> gr.Blocks:
|
|
33 |
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
|
34 |
try:
|
35 |
with torch.no_grad():
|
36 |
-
video =
|
37 |
image=input_image,
|
38 |
prompt=prompt,
|
39 |
negative_prompt=negative_prompt,
|
|
|
7 |
|
8 |
from diffusers.utils import export_to_video, load_image
|
9 |
|
10 |
+
from video_model import i2v_pipe
|
11 |
|
12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
13 |
|
|
|
33 |
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
|
34 |
try:
|
35 |
with torch.no_grad():
|
36 |
+
video = i2v_pipe(
|
37 |
image=input_image,
|
38 |
prompt=prompt,
|
39 |
negative_prompt=negative_prompt,
|
app_t2v.py
CHANGED
@@ -5,9 +5,9 @@ import torch
|
|
5 |
import gc
|
6 |
import tempfile
|
7 |
|
8 |
-
from diffusers.utils import export_to_video
|
9 |
|
10 |
-
from video_model import
|
11 |
|
12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
13 |
|
@@ -31,7 +31,7 @@ def create_demo() -> gr.Blocks:
|
|
31 |
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
|
32 |
try:
|
33 |
with torch.no_grad():
|
34 |
-
video =
|
35 |
prompt=prompt,
|
36 |
negative_prompt=negative_prompt,
|
37 |
generator=generator,
|
|
|
5 |
import gc
|
6 |
import tempfile
|
7 |
|
8 |
+
from diffusers.utils import export_to_video
|
9 |
|
10 |
+
from video_model import t2v_pipe
|
11 |
|
12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
13 |
|
|
|
31 |
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
|
32 |
try:
|
33 |
with torch.no_grad():
|
34 |
+
video = t2v_pipe(
|
35 |
prompt=prompt,
|
36 |
negative_prompt=negative_prompt,
|
37 |
generator=generator,
|
requirements.txt
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
gradio
|
2 |
torch
|
3 |
torchvision
|
4 |
-
|
5 |
transformers
|
6 |
accelerate
|
7 |
mediapipe
|
|
|
1 |
gradio
|
2 |
torch
|
3 |
torchvision
|
4 |
+
diffusers
|
5 |
transformers
|
6 |
accelerate
|
7 |
mediapipe
|
video_model.py
CHANGED
@@ -1,9 +1,12 @@
|
|
1 |
import torch
|
2 |
|
3 |
-
from diffusers import LTXPipeline
|
4 |
|
5 |
|
6 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
1 |
import torch
|
2 |
|
3 |
+
from diffusers import LTXPipeline, LTXImageToVideoPipeline
|
4 |
|
5 |
|
6 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
|
8 |
+
t2v_pipe = LTXPipeline.from_pretrained("Lightricks/LTX-Video", torch_dtype=torch.bfloat16)
|
9 |
+
t2v_pipe.to(device)
|
10 |
+
|
11 |
+
i2v_pipe = LTXImageToVideoPipeline.from_pretrained("Lightricks/LTX-Image", torch_dtype=torch.bfloat16)
|
12 |
+
i2v_pipe.to(device)
|