|
import torch |
|
from diffusers import DiffusionPipeline |
|
from diffusers.utils import export_to_video |
|
import shutil |
|
|
|
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True |
|
|
|
# Load the pipeline |
|
pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16") |
|
pipe = pipe.to("cuda") |
|
|
|
# Generate video frames |
|
prompt = "Spiderman is surfing" |
|
video_frames = pipe(prompt, num_frames=75).frames[0] |
|
|
|
# Export video to a temporary path |
|
video_path = export_to_video(video_frames) # This returns the path to the saved video |
|
|
|
# Define the desired output path |
|
output_path = "/kaggle/working/spiderman_surfing.mp4" |
|
|
|
# Move the video to the desired output path using shutil.move |
|
shutil.move(video_path, output_path) |
|
print(f"Video saved at: {output_path}") |
|
|
|
# Utility function to convert bytes to gigabytes |
|
def bytes_to_giga_bytes(bytes): |
|
return bytes / 1024 / 1024 / 1024 |
|
|
|
# Print memory usage |
|
print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB") |