Manjushri commited on
Commit
aefd676
·
verified ·
1 Parent(s): 1bfa2fc

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -111
app.py DELETED
@@ -1,111 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- import numpy as np
4
- import modin.pandas as pd
5
- from PIL import Image
6
- from diffusers import DiffusionPipeline
7
- from huggingface_hub import login
8
- import os
9
- from glob import glob
10
- from pathlib import Path
11
- from typing import Optional
12
- import uuid
13
- import random
14
-
15
- token = os.environ['HF_TOKEN']
16
- login(token=token)
17
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
18
- torch.cuda.max_memory_allocated(device=device)
19
- torch.cuda.empty_cache()
20
- pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt-1-1")
21
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
22
-
23
- #pipe.enable_xformers_memory_efficient_attention()
24
- #pipe = pipe.to(device)
25
- pipe.enable_model_cpu_offload()
26
- torch.cuda.empty_cache()
27
-
28
- max_64_bit_int = 2**63 - 1
29
-
30
- def sample(
31
- image: Image,
32
- seed: Optional[int] = 42,
33
- randomize_seed: bool = True,
34
- motion_bucket_id: int = 127,
35
- fps_id: int = 6,
36
- version: str = "svd_xt_1-1",
37
- cond_aug: float = 0.02,
38
- decoding_t: int = 3, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
39
- device: str = "cuda",
40
- output_folder: str = "outputs",):
41
-
42
- if image.mode == "RGBA":
43
- image = image.convert("RGB")
44
-
45
- if(randomize_seed):
46
- seed = random.randint(0, max_64_bit_int)
47
- generator = torch.manual_seed(seed)
48
- torch.cuda.empty_cache()
49
- os.makedirs(output_folder, exist_ok=True)
50
- base_count = len(glob(os.path.join(output_folder, "*.mp4")))
51
- video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
52
-
53
- frames = pipe(image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=0.1, num_frames=25).frames[0]
54
- export_to_video(frames, video_path, fps=fps_id)
55
- torch.manual_seed(seed)
56
- torch.cuda.empty_cache()
57
- return video_path, seed
58
-
59
- def resize_image(image, output_size=(768, 512)):
60
- # Calculate aspect ratios
61
- target_aspect = output_size[0] / output_size[1] # Aspect ratio of the desired size
62
- image_aspect = image.width / image.height # Aspect ratio of the original image
63
-
64
- # Resize then crop if the original image is larger
65
- if image_aspect > target_aspect:
66
- # Resize the image to match the target height, maintaining aspect ratio
67
- new_height = output_size[1]
68
- new_width = int(new_height * image_aspect)
69
- resized_image = image.resize((new_width, new_height), Image.LANCZOS)
70
- # Calculate coordinates for cropping
71
- left = (new_width - output_size[0]) / 2
72
- top = 0
73
- right = (new_width + output_size[0]) / 2
74
- bottom = output_size[1]
75
- else:
76
- # Resize the image to match the target width, maintaining aspect ratio
77
- new_width = output_size[0]
78
- new_height = int(new_width / image_aspect)
79
- resized_image = image.resize((new_width, new_height), Image.LANCZOS)
80
- # Calculate coordinates for cropping
81
- left = 0
82
- top = (new_height - output_size[1]) / 2
83
- right = output_size[0]
84
- bottom = (new_height + output_size[1]) / 2
85
-
86
- # Crop the image
87
- cropped_image = resized_image.crop((left, top, right, bottom))
88
- torch.cuda.empty_cache()
89
- return cropped_image
90
-
91
- with gr.Blocks() as demo:
92
- #gr.Markdown('''# Community demo for Stable Video Diffusion - Img2Vid - XT ([model](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt), [paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets), [stability's ui waitlist](https://stability.ai/contact))
93
- #### Research release ([_non-commercial_](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE)): generate `4s` vid from a single image at (`25 frames` at `6 fps`). this demo uses [🧨 diffusers for low VRAM and fast generation](https://huggingface.co/docs/diffusers/main/en/using-diffusers/svd).
94
- #''')
95
- with gr.Row():
96
- with gr.Column():
97
- image = gr.Image(label="Upload your image", type="pil")
98
- generate_btn = gr.Button("Generate")
99
- video = gr.Video()
100
- with gr.Accordion("Advanced options", open=False):
101
- seed = gr.Slider(label="Seed", value=42, randomize=True, minimum=0, maximum=max_64_bit_int, step=1)
102
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
103
- motion_bucket_id = gr.Slider(label="Motion bucket id", info="Controls how much motion to add/remove from the image", value=127, minimum=1, maximum=255)
104
- fps_id = gr.Slider(label="Frames per second", info="The length of your video in seconds will be 25/fps", value=6, minimum=5, maximum=30)
105
-
106
- image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
107
- generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id], outputs=[video, seed], api_name="video",)# inputs=image, outputs=[video, seed], fn=sample, cache_examples=True,)
108
-
109
- if __name__ == "__main__":
110
- demo.queue(max_size=20, api_open=False)
111
- demo.launch(show_api=False)