1inkusFace commited on
Commit
0e6bdde
·
verified ·
1 Parent(s): 1bd4e39

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -0
app.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import torch
3
+ import os
4
+ from diffusers import AutoencoderKLLTXVideo, LTXImageToVideoPipeline, LTXVideoTransformer3DModel
5
+ from diffusers.utils import export_to_video, load_image #, PIL_INTERPOLATION
6
+
7
+ import gradio as gr
8
+ import numpy as np
9
+ import random
10
+ from PIL import Image
11
+ import imageio.v3
12
+
13
+ torch.backends.cuda.matmul.allow_tf32 = False
14
+ torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
15
+ torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
16
+ torch.backends.cudnn.allow_tf32 = False
17
+ torch.backends.cudnn.deterministic = False
18
+ torch.backends.cudnn.benchmark = False
19
+ torch.backends.cuda.preferred_blas_library="cublas"
20
+ #torch.backends.cuda.preferred_linalg_library="cusolver"
21
+ torch.set_float32_matmul_precision("highest")
22
+ os.putenv("HF_HUB_ENABLE_HF_TRANSFER","1")
23
+ HF_TOKEN = os.getenv("HF_TOKEN")
24
+
25
+ MAX_SEED = np.iinfo(np.int64).max
26
+
27
+ single_file_url = "https://huggingface.co/Lightricks/LTX-Video/ltx-video-2b-v0.9.1.safetensors"
28
+ #vae_url = 'https://huggingface.co/spacepxl/ltx-video-0.9-vae-finetune/ltx-video-v0.9-vae_finetune_decoder_111k_smooth.safetensors'
29
+
30
+ transformer = LTXVideoTransformer3DModel.from_single_file(single_file_url,token=HF_TOKEN)
31
+
32
+ #vae = AutoencoderKLLTXVideo.from_single_file(vae_url,token=HF_TOKEN)
33
+
34
+ pipe = LTXImageToVideoPipeline.from_pretrained("Lightricks/LTX-Video",token=HF_TOKEN, transformer=transformer).to(torch.device("cuda"),torch.bfloat16)
35
+
36
+ @spaces.GPU(duration=80)
37
+ def generate_video(
38
+ image_url,
39
+ prompt,
40
+ negative_prompt,
41
+ width,
42
+ height,
43
+ num_frames,
44
+ guidance_scale,
45
+ num_inference_steps,
46
+ fps,
47
+ progress=gr.Progress(track_tqdm=True)
48
+ ):
49
+ seed=random.randint(0, MAX_SEED)
50
+ generator = torch.Generator(device="cuda").manual_seed(seed)
51
+ image = Image.open(image_url).convert("RGB")
52
+ image.resize((height,width), Image.LANCZOS)
53
+ video = pipe(
54
+ image=image,
55
+ prompt=prompt,
56
+ negative_prompt=negative_prompt,
57
+ width=width,
58
+ height=height,
59
+ num_frames=num_frames,
60
+ frame_rate=fps,
61
+ guidance_scale=guidance_scale,
62
+ generator=generator,
63
+ num_inference_steps=num_inference_steps,
64
+ output_type='pt',
65
+ max_sequence_length=512,
66
+ ).frames
67
+ video = video[0]
68
+ video = video.permute(0, 2, 3, 1).cpu().detach().to(torch.float32).numpy()
69
+ export_to_video(video, "output.mp4", fps=fps)
70
+ return "output.mp4"
71
+
72
+ iface = gr.Interface(
73
+ fn=generate_video,
74
+ inputs=[
75
+ gr.Image(type="filepath", label="Image"),
76
+ gr.Textbox(lines=2, label="Prompt"),
77
+ gr.Textbox(lines=2, label="Negative Prompt"),
78
+ gr.Slider(minimum=256, maximum=1024, step=8, value=704, label="Width"),
79
+ gr.Slider(minimum=256, maximum=1024, step=8, value=704, label="Height"),
80
+ gr.Slider(minimum=16, maximum=256, step=16, value=111, label="Number of Frames"),
81
+ gr.Slider(minimum=0.0, maximum=30.0, step=0.01, value=3.8, label="Guidance Scale"),
82
+ gr.Slider(minimum=1, maximum=100, step=1, value=40, label="Number of Inference Steps"),
83
+ gr.Slider(minimum=1, maximum=60, step=1, value=25, label="FPS"),
84
+ ],
85
+ outputs=gr.Video(label="Generated Video"),
86
+ title="LTX-Video Test D",
87
+ description="Generate video from image with LTX-Video.",
88
+ )
89
+
90
+ iface.launch()