TroglodyteDerivations commited on
Commit
8cba5d9
·
verified ·
1 Parent(s): 3d8e4c9

Upload 2 files

Browse files
Files changed (2) hide show
  1. txt2vid_1.py +37 -0
  2. txt2vid_2.py +35 -0
txt2vid_1.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !pip install diffusers --upgrade
2
+
3
+ !pip install invisible_watermark transformers accelerate safetensors
4
+
5
+ from kaggle_secrets import UserSecretsClient
6
+ user_secrets = UserSecretsClient()
7
+ secret_value_0 = user_secrets.get_secret("huggingface")
8
+
9
+ import torch
10
+ from diffusers import DiffusionPipeline
11
+ from diffusers.utils import export_to_video
12
+ import shutil # Import shutil for cross-filesystem moves
13
+
14
+ # Load the pipeline
15
+ pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
16
+ pipe = pipe.to("cuda")
17
+
18
+ # Generate video frames
19
+ prompt = "Spiderman is surfing"
20
+ video_frames = pipe(prompt).frames[0]
21
+
22
+ # Export video to a temporary path
23
+ video_path = export_to_video(video_frames) # This returns the path to the saved video
24
+
25
+ # Define the desired output path
26
+ output_path = "/kaggle/working/spiderman_surfing.mp4"
27
+
28
+ # Move the video to the desired output path using shutil.move
29
+ shutil.move(video_path, output_path)
30
+ print(f"Video saved at: {output_path}")
31
+
32
+ # Utility function to convert bytes to gigabytes
33
+ def bytes_to_giga_bytes(bytes):
34
+ return bytes / 1024 / 1024 / 1024
35
+
36
+ # Print memory usage
37
+ print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB")
txt2vid_2.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers import DiffusionPipeline
3
+ from diffusers.utils import export_to_video
4
+ import shutil # Import shutil for cross-filesystem moves
5
+
6
+ # Clear GPU memory
7
+ torch.cuda.empty_cache()
8
+
9
+ # Set memory allocation configuration
10
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True
11
+
12
+ # Load the pipeline
13
+ pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
14
+ pipe = pipe.to("cuda")
15
+
16
+ # Generate video frames
17
+ prompt = "Spiderman is surfing"
18
+ video_frames = pipe(prompt, num_frames=75).frames[0]
19
+
20
+ # Export video to a temporary path
21
+ video_path = export_to_video(video_frames) # This returns the path to the saved video
22
+
23
+ # Define the desired output path
24
+ output_path = "/kaggle/working/spiderman_surfing.mp4"
25
+
26
+ # Move the video to the desired output path using shutil.move
27
+ shutil.move(video_path, output_path)
28
+ print(f"Video saved at: {output_path}")
29
+
30
+ # Utility function to convert bytes to gigabytes
31
+ def bytes_to_giga_bytes(bytes):
32
+ return bytes / 1024 / 1024 / 1024
33
+
34
+ # Print memory usage
35
+ print(f"Max memory allocated: {bytes_to_giga_bytes(torch.cuda.max_memory_allocated())} GB")