zRzRzRzRzRzRzR commited on
Commit
74a40be
1 Parent(s): 28d0068

update diffusers

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. app.py +90 -108
  3. app_test.py +260 -0
  4. requirements.txt +5 -6
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🎥
4
  colorFrom: yellow
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 4.41.0
8
  suggested_hardware: a10g-large
9
  suggested_storage: large
10
  app_port: 7860
 
4
  colorFrom: yellow
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 4.42.0
8
  suggested_hardware: a10g-large
9
  suggested_storage: large
10
  app_port: 7860
app.py CHANGED
@@ -1,23 +1,21 @@
1
  import os
2
- import tempfile
3
  import threading
4
  import time
5
 
6
  import gradio as gr
7
- import numpy as np
8
  import torch
9
  from diffusers import CogVideoXPipeline
 
10
  from datetime import datetime, timedelta
11
  from openai import OpenAI
12
- import spaces
13
- import imageio
14
  import moviepy.editor as mp
15
- from typing import List, Union
16
- import PIL
17
 
18
- dtype = torch.float16
19
  device = "cuda" if torch.cuda.is_available() else "cpu"
20
- pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=dtype).to(device)
 
 
 
21
 
22
  sys_prompt = """You are part of a team of bots that creates videos. You work with an assistant bot that will draw anything you say in square brackets.
23
 
@@ -33,25 +31,6 @@ Video descriptions must have the same num of words as examples below. Extra word
33
  """
34
 
35
 
36
- def export_to_video_imageio(
37
- video_frames: Union[List[np.ndarray], List[PIL.Image.Image]], output_video_path: str = None, fps: int = 8
38
- ) -> str:
39
- """
40
- Export the video frames to a video file using imageio lib to Avoid "green screen" issue (for example CogVideoX)
41
- """
42
- if output_video_path is None:
43
- output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name
44
-
45
- if isinstance(video_frames[0], PIL.Image.Image):
46
- video_frames = [np.array(frame) for frame in video_frames]
47
-
48
- with imageio.get_writer(output_video_path, fps=fps) as writer:
49
- for frame in video_frames:
50
- writer.append_data(frame)
51
-
52
- return output_video_path
53
-
54
-
55
  def convert_prompt(prompt: str, retry_times: int = 3) -> str:
56
  if not os.environ.get("OPENAI_API_KEY"):
57
  return prompt
@@ -62,20 +41,34 @@ def convert_prompt(prompt: str, retry_times: int = 3) -> str:
62
  response = client.chat.completions.create(
63
  messages=[
64
  {"role": "system", "content": sys_prompt},
65
- {"role": "user",
66
- "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : "a girl is on the beach"'},
67
- {"role": "assistant",
68
- "content": "A radiant woman stands on a deserted beach, arms outstretched, wearing a beige trench coat, white blouse, light blue jeans, and chic boots, against a backdrop of soft sky and sea. Moments later, she is seen mid-twirl, arms exuberant, with the lighting suggesting dawn or dusk. Then, she runs along the beach, her attire complemented by an off-white scarf and black ankle boots, the tranquil sea behind her. Finally, she holds a paper airplane, her pose reflecting joy and freedom, with the ocean's gentle waves and the sky's soft pastel hues enhancing the serene ambiance."},
69
- {"role": "user",
70
- "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : "A man jogging on a football field"'},
71
- {"role": "assistant",
72
- "content": "A determined man in athletic attire, including a blue long-sleeve shirt, black shorts, and blue socks, jogs around a snow-covered soccer field, showcasing his solitary exercise in a quiet, overcast setting. His long dreadlocks, focused expression, and the serene winter backdrop highlight his dedication to fitness. As he moves, his attire, consisting of a blue sports sweatshirt, black athletic pants, gloves, and sneakers, grips the snowy ground. He is seen running past a chain-link fence enclosing the playground area, with a basketball hoop and children's slide, suggesting a moment of solitary exercise amidst the empty field."},
73
- {"role": "user",
74
- "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : " A woman is dancing, HD footage, close-up"'},
75
- {"role": "assistant",
76
- "content": "A young woman with her hair in an updo and wearing a teal hoodie stands against a light backdrop, initially looking over her shoulder with a contemplative expression. She then confidently makes a subtle dance move, suggesting rhythm and movement. Next, she appears poised and focused, looking directly at the camera. Her expression shifts to one of introspection as she gazes downward slightly. Finally, she dances with confidence, her left hand over her heart, symbolizing a poignant moment, all while dressed in the same teal hoodie against a plain, light-colored background."},
77
- {"role": "user",
78
- "content": f'Create an imaginative video descriptive caption or modify an earlier caption in ENGLISH for the user input: "{text}"'},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  ],
80
  model="glm-4-0520",
81
  temperature=0.01,
@@ -88,33 +81,16 @@ def convert_prompt(prompt: str, retry_times: int = 3) -> str:
88
  return prompt
89
 
90
 
91
- @spaces.GPU(duration=240)
92
- def infer(
93
- prompt: str,
94
- num_inference_steps: int,
95
- guidance_scale: float,
96
- progress=gr.Progress(track_tqdm=True)
97
- ):
98
  torch.cuda.empty_cache()
99
-
100
- prompt_embeds, _ = pipe.encode_prompt(
101
  prompt=prompt,
102
- negative_prompt=None,
103
- do_classifier_free_guidance=True,
104
  num_videos_per_prompt=1,
105
- max_sequence_length=226,
106
- device=device,
107
- dtype=dtype,
108
- )
109
-
110
- video = pipe(
111
  num_inference_steps=num_inference_steps,
 
112
  guidance_scale=guidance_scale,
113
- prompt_embeds=prompt_embeds,
114
- negative_prompt_embeds=torch.zeros_like(prompt_embeds),
115
  ).frames[0]
116
 
117
-
118
  return video
119
 
120
 
@@ -122,14 +98,15 @@ def save_video(tensor):
122
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
123
  video_path = f"./output/{timestamp}.mp4"
124
  os.makedirs(os.path.dirname(video_path), exist_ok=True)
125
- export_to_video_imageio(tensor[1:], video_path)
126
  return video_path
127
 
 
128
  def convert_to_gif(video_path):
129
  clip = mp.VideoFileClip(video_path)
130
  clip = clip.set_fps(8)
131
  clip = clip.resize(height=240)
132
- gif_path = video_path.replace('.mp4', '.gif')
133
  clip.write_gif(gif_path, fps=8)
134
  return gif_path
135
 
@@ -138,14 +115,16 @@ def delete_old_files():
138
  while True:
139
  now = datetime.now()
140
  cutoff = now - timedelta(minutes=10)
141
- output_dir = './output'
142
- for filename in os.listdir(output_dir):
143
- file_path = os.path.join(output_dir, filename)
144
- if os.path.isfile(file_path):
145
- file_mtime = datetime.fromtimestamp(os.path.getmtime(file_path))
146
- if file_mtime < cutoff:
147
- os.remove(file_path)
148
- time.sleep(600) # Sleep for 10 minutes
 
 
149
 
150
 
151
  threading.Thread(target=delete_old_files, daemon=True).start()
@@ -153,10 +132,10 @@ threading.Thread(target=delete_old_files, daemon=True).start()
153
  with gr.Blocks() as demo:
154
  gr.Markdown("""
155
  <div style="text-align: center; font-size: 32px; font-weight: bold; margin-bottom: 20px;">
156
- CogVideoX-2B Huggingface Space🤗
157
  </div>
158
  <div style="text-align: center;">
159
- <a href="https://huggingface.co/THUDM/CogVideoX-2b">🤗 Model Hub</a> |
160
  <a href="https://github.com/THUDM/CogVideo">🌐 Github</a> |
161
  <a href="https://arxiv.org/pdf/2408.06072">📜 arxiv </a>
162
  </div>
@@ -169,6 +148,7 @@ with gr.Blocks() as demo:
169
  with gr.Row():
170
  with gr.Column():
171
  prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here", lines=5)
 
172
  with gr.Row():
173
  gr.Markdown(
174
  "✨Upon pressing the enhanced prompt button, we will use [GLM-4 Model](https://github.com/THUDM/GLM-4) to polish the prompt and overwrite the original one.")
@@ -176,8 +156,9 @@ with gr.Blocks() as demo:
176
 
177
  with gr.Column():
178
  gr.Markdown("**Optional Parameters** (default values are recommended)<br>"
179
- "Turn Inference Steps larger if you want more detailed video, but it will be slower.<br>"
180
- "50 steps are recommended for most cases. will cause 120 seconds for inference.<br>")
 
181
  with gr.Row():
182
  num_inference_steps = gr.Number(label="Inference Steps", value=50)
183
  guidance_scale = gr.Number(label="Guidance Scale", value=6.0)
@@ -190,42 +171,43 @@ with gr.Blocks() as demo:
190
  download_gif_button = gr.File(label="📥 Download GIF", visible=False)
191
 
192
  gr.Markdown("""
193
- <table border="1" style="width: 100%; text-align: left; margin-top: 20px;">
194
- <tr>
195
- <th>Prompt</th>
196
- <th>Video URL</th>
197
- <th>Inference Steps</th>
198
- <th>Guidance Scale</th>
199
- </tr>
200
- <tr>
201
- <td>A detailed wooden toy ship with intricately carved masts and sails is seen gliding smoothly over a plush, blue carpet that mimics the waves of the sea. The ship's hull is painted a rich brown, with tiny windows. The carpet, soft and textured, provides a perfect backdrop, resembling an oceanic expanse. Surrounding the ship are various other toys and children's items, hinting at a playful environment. The scene captures the innocence and imagination of childhood, with the toy ship's journey symbolizing endless adventures in a whimsical, indoor setting.</td>
202
- <td><a href="https://github.com/THUDM/CogVideo/raw/main/resources/videos/1.mp4">Video 1</a></td>
203
- <td>50</td>
204
- <td>6</td>
205
- </tr>
206
- <tr>
207
- <td>The camera follows behind a white vintage SUV with a black roof rack as it speeds up a steep dirt road surrounded by pine trees on a steep mountain slope, dust kicks up from it’s tires, the sunlight shines on the SUV as it speeds along the dirt road, casting a warm glow over the scene. The dirt road curves gently into the distance, with no other cars or vehicles in sight. The trees on either side of the road are redwoods, with patches of greenery scattered throughout. The car is seen from the rear following the curve with ease, making it seem as if it is on a rugged drive through the rugged terrain. The dirt road itself is surrounded by steep hills and mountains, with a clear blue sky above with wispy clouds.</td>
208
- <td><a href="https://github.com/THUDM/CogVideo/raw/main/resources/videos/2.mp4">Video 2</a></td>
209
- <td>50</td>
210
- <td>6</td>
211
- </tr>
212
- <tr>
213
- <td>A street artist, clad in a worn-out denim jacket and a colorful bandana, stands before a vast concrete wall in the heart, holding a can of spray paint, spray-painting a colorful bird on a mottled wall.</td>
214
- <td><a href="https://github.com/THUDM/CogVideo/raw/main/resources/videos/3.mp4">Video 3</a></td>
215
- <td>50</td>
216
- <td>6</td>
217
- </tr>
218
- <tr>
219
- <td>In the haunting backdrop of a war-torn city, where ruins and crumbled walls tell a story of devastation, a poignant close-up frames a young girl. Her face is smudged with ash, a silent testament to the chaos around her. Her eyes glistening with a mix of sorrow and resilience, capturing the raw emotion of a world that has lost its innocence to the ravages of conflict.</td>
220
- <td><a href="https://github.com/THUDM/CogVideo/raw/main/resources/videos/4.mp4">Video 4</a></td>
221
- <td>50</td>
222
- <td>6</td>
223
- </tr>
224
- </table>
 
225
  """)
226
 
227
 
228
- def generate(prompt, num_inference_steps, guidance_scale, progress=gr.Progress(track_tqdm=True)):
229
  tensor = infer(prompt, num_inference_steps, guidance_scale, progress=progress)
230
  video_path = save_video(tensor)
231
  video_update = gr.update(visible=True, value=video_path)
 
1
  import os
 
2
  import threading
3
  import time
4
 
5
  import gradio as gr
 
6
  import torch
7
  from diffusers import CogVideoXPipeline
8
+ from diffusers.utils import export_to_video
9
  from datetime import datetime, timedelta
10
  from openai import OpenAI
 
 
11
  import moviepy.editor as mp
 
 
12
 
13
+ dtype = torch.bfloat16
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
+ pipe = CogVideoXPipeline.from_pretrained("/share/home/zyx/Models/cogvideox-5b-0823-hf", torch_dtype=dtype).to(device)
16
+
17
+ os.makedirs("./output", exist_ok=True)
18
+ os.makedirs("./gradio_tmp", exist_ok=True)
19
 
20
  sys_prompt = """You are part of a team of bots that creates videos. You work with an assistant bot that will draw anything you say in square brackets.
21
 
 
31
  """
32
 
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  def convert_prompt(prompt: str, retry_times: int = 3) -> str:
35
  if not os.environ.get("OPENAI_API_KEY"):
36
  return prompt
 
41
  response = client.chat.completions.create(
42
  messages=[
43
  {"role": "system", "content": sys_prompt},
44
+ {
45
+ "role": "user",
46
+ "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : "a girl is on the beach"',
47
+ },
48
+ {
49
+ "role": "assistant",
50
+ "content": "A radiant woman stands on a deserted beach, arms outstretched, wearing a beige trench coat, white blouse, light blue jeans, and chic boots, against a backdrop of soft sky and sea. Moments later, she is seen mid-twirl, arms exuberant, with the lighting suggesting dawn or dusk. Then, she runs along the beach, her attire complemented by an off-white scarf and black ankle boots, the tranquil sea behind her. Finally, she holds a paper airplane, her pose reflecting joy and freedom, with the ocean's gentle waves and the sky's soft pastel hues enhancing the serene ambiance.",
51
+ },
52
+ {
53
+ "role": "user",
54
+ "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : "A man jogging on a football field"',
55
+ },
56
+ {
57
+ "role": "assistant",
58
+ "content": "A determined man in athletic attire, including a blue long-sleeve shirt, black shorts, and blue socks, jogs around a snow-covered soccer field, showcasing his solitary exercise in a quiet, overcast setting. His long dreadlocks, focused expression, and the serene winter backdrop highlight his dedication to fitness. As he moves, his attire, consisting of a blue sports sweatshirt, black athletic pants, gloves, and sneakers, grips the snowy ground. He is seen running past a chain-link fence enclosing the playground area, with a basketball hoop and children's slide, suggesting a moment of solitary exercise amidst the empty field.",
59
+ },
60
+ {
61
+ "role": "user",
62
+ "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : " A woman is dancing, HD footage, close-up"',
63
+ },
64
+ {
65
+ "role": "assistant",
66
+ "content": "A young woman with her hair in an updo and wearing a teal hoodie stands against a light backdrop, initially looking over her shoulder with a contemplative expression. She then confidently makes a subtle dance move, suggesting rhythm and movement. Next, she appears poised and focused, looking directly at the camera. Her expression shifts to one of introspection as she gazes downward slightly. Finally, she dances with confidence, her left hand over her heart, symbolizing a poignant moment, all while dressed in the same teal hoodie against a plain, light-colored background.",
67
+ },
68
+ {
69
+ "role": "user",
70
+ "content": f'Create an imaginative video descriptive caption or modify an earlier caption in ENGLISH for the user input: "{text}"',
71
+ },
72
  ],
73
  model="glm-4-0520",
74
  temperature=0.01,
 
81
  return prompt
82
 
83
 
84
+ def infer(prompt: str, num_inference_steps: int, guidance_scale: float, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
85
  torch.cuda.empty_cache()
86
+ video = pipe(
 
87
  prompt=prompt,
 
 
88
  num_videos_per_prompt=1,
 
 
 
 
 
 
89
  num_inference_steps=num_inference_steps,
90
+ num_frames=49,
91
  guidance_scale=guidance_scale,
 
 
92
  ).frames[0]
93
 
 
94
  return video
95
 
96
 
 
98
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
99
  video_path = f"./output/{timestamp}.mp4"
100
  os.makedirs(os.path.dirname(video_path), exist_ok=True)
101
+ export_to_video(tensor, video_path)
102
  return video_path
103
 
104
+
105
  def convert_to_gif(video_path):
106
  clip = mp.VideoFileClip(video_path)
107
  clip = clip.set_fps(8)
108
  clip = clip.resize(height=240)
109
+ gif_path = video_path.replace(".mp4", ".gif")
110
  clip.write_gif(gif_path, fps=8)
111
  return gif_path
112
 
 
115
  while True:
116
  now = datetime.now()
117
  cutoff = now - timedelta(minutes=10)
118
+ directories = ["./output", "./gradio_tmp"]
119
+
120
+ for directory in directories:
121
+ for filename in os.listdir(directory):
122
+ file_path = os.path.join(directory, filename)
123
+ if os.path.isfile(file_path):
124
+ file_mtime = datetime.fromtimestamp(os.path.getmtime(file_path))
125
+ if file_mtime < cutoff:
126
+ os.remove(file_path)
127
+ time.sleep(600)
128
 
129
 
130
  threading.Thread(target=delete_old_files, daemon=True).start()
 
132
  with gr.Blocks() as demo:
133
  gr.Markdown("""
134
  <div style="text-align: center; font-size: 32px; font-weight: bold; margin-bottom: 20px;">
135
+ CogVideoX-5B Huggingface Space🤗
136
  </div>
137
  <div style="text-align: center;">
138
+ <a href="https://huggingface.co/THUDM/CogVideoX-2B">🤗 2B Model Hub</a> |
139
  <a href="https://github.com/THUDM/CogVideo">🌐 Github</a> |
140
  <a href="https://arxiv.org/pdf/2408.06072">📜 arxiv </a>
141
  </div>
 
148
  with gr.Row():
149
  with gr.Column():
150
  prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here", lines=5)
151
+
152
  with gr.Row():
153
  gr.Markdown(
154
  "✨Upon pressing the enhanced prompt button, we will use [GLM-4 Model](https://github.com/THUDM/GLM-4) to polish the prompt and overwrite the original one.")
 
156
 
157
  with gr.Column():
158
  gr.Markdown("**Optional Parameters** (default values are recommended)<br>"
159
+ "Increasing the number of inference steps will produce more detailed videos, but it will slow down the process.<br>"
160
+ "50 steps are recommended for most cases.<br>"
161
+ "For the 5B model, 50 steps will take approximately 350 seconds.")
162
  with gr.Row():
163
  num_inference_steps = gr.Number(label="Inference Steps", value=50)
164
  guidance_scale = gr.Number(label="Guidance Scale", value=6.0)
 
171
  download_gif_button = gr.File(label="📥 Download GIF", visible=False)
172
 
173
  gr.Markdown("""
174
+ <table border="0" style="width: 100%; text-align: left; margin-top: 20px;">
175
+ <div style="text-align: center; font-size: 24px; font-weight: bold; margin-bottom: 20px;">
176
+ Demo Videos with 50 Inference Steps and 6.0 Guidance Scale.
177
+ </div>
178
+ <tr>
179
+ <td style="width: 25%; vertical-align: top; font-size: 0.8em;">
180
+ <p>A detailed wooden toy ship with intricately carved masts and sails is seen gliding smoothly over a plush, blue carpet that mimics the waves of the sea. The ship's hull is painted a rich brown, with tiny windows. The carpet, soft and textured, provides a perfect backdrop, resembling an oceanic expanse. Surrounding the ship are various other toys and children's items, hinting at a playful environment. The scene captures the innocence and imagination of childhood, with the toy ship's journey symbolizing endless adventures in a whimsical, indoor setting.</p>
181
+ </td>
182
+ <td style="width: 25%; vertical-align: top;">
183
+ <video src="https://github.com/user-attachments/assets/ea3af39a-3160-4999-90ec-2f7863c5b0e9" width="100%" controls autoplay></video>
184
+ </td>
185
+ <td style="width: 25%; vertical-align: top; font-size: 0.8em;">
186
+ <p>The camera follows behind a white vintage SUV with a black roof rack as it speeds up a steep dirt road surrounded by pine trees on a steep mountain slope, dust kicks up from its tires, the sunlight shines on the SUV as it speeds along the dirt road, casting a warm glow over the scene. The dirt road curves gently into the distance, with no other cars or vehicles in sight. The trees on either side of the road are redwoods, with patches of greenery scattered throughout. The car is seen from the rear following the curve with ease, making it seem as if it is on a rugged drive through the rugged terrain. The dirt road itself is surrounded by steep hills and mountains, with a clear blue sky above with wispy clouds.</p>
187
+ </td>
188
+ <td style="width: 25%; vertical-align: top;">
189
+ <video src="https://github.com/user-attachments/assets/9de41efd-d4d1-4095-aeda-246dd834e91d" width="100%" controls autoplay></video>
190
+ </td>
191
+ </tr>
192
+ <tr>
193
+ <td style="width: 25%; vertical-align: top; font-size: 0.8em;">
194
+ <p>A street artist, clad in a worn-out denim jacket and a colorful bandana, stands before a vast concrete wall in the heart, holding a can of spray paint, spray-painting a colorful bird on a mottled wall.</p>
195
+ </td>
196
+ <td style="width: 25%; vertical-align: top;">
197
+ <video src="https://github.com/user-attachments/assets/941d6661-6a8d-4a1b-b912-59606f0b2841" width="100%" controls autoplay></video>
198
+ </td>
199
+ <td style="width: 25%; vertical-align: top; font-size: 0.8em;">
200
+ <p>In the haunting backdrop of a war-torn city, where ruins and crumbled walls tell a story of devastation, a poignant close-up frames a young girl. Her face is smudged with ash, a silent testament to the chaos around her. Her eyes glistening with a mix of sorrow and resilience, capturing the raw emotion of a world that has lost its innocence to the ravages of conflict.</p>
201
+ </td>
202
+ <td style="width: 25%; vertical-align: top;">
203
+ <video src="https://github.com/user-attachments/assets/938529c4-91ae-4f60-b96b-3c3947fa63cb" width="100%" controls autoplay></video>
204
+ </td>
205
+ </tr>
206
+ </table>
207
  """)
208
 
209
 
210
+ def generate(prompt, num_inference_steps, guidance_scale, model_choice, progress=gr.Progress(track_tqdm=True)):
211
  tensor = infer(prompt, num_inference_steps, guidance_scale, progress=progress)
212
  video_path = save_video(tensor)
213
  video_update = gr.update(visible=True, value=video_path)
app_test.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import os
3
+ import tempfile
4
+ import threading
5
+ import time
6
+
7
+ import gradio as gr
8
+ import numpy as np
9
+ import torch
10
+ from diffusers import CogVideoXPipeline
11
+ from datetime import datetime, timedelta
12
+ from openai import OpenAI
13
+ import imageio
14
+ import moviepy.editor as mp
15
+ from typing import List, Union
16
+ import PIL
17
+ from modelscope import snapshot_download
18
+
19
+ model_dir = snapshot_download("ZhipuAI/CogVideoX-5b")
20
+
21
+ device = "cuda" if torch.cuda.is_available() else "cpu"
22
+
23
+ pipe = CogVideoXPipeline.from_pretrained(model_dir, torch_dtype=torch.bfloat16).to(
24
+ device)
25
+ pipe.enable_model_cpu_offload()
26
+
27
+ gc.collect()
28
+ torch.cuda.empty_cache()
29
+ torch.cuda.reset_accumulated_memory_stats()
30
+ torch.cuda.reset_peak_memory_stats()
31
+
32
+ # pipe.vae.enable_tiling()
33
+
34
+ sys_prompt = """You are part of a team of bots that creates videos. You work with an assistant bot that will draw anything you say in square brackets.
35
+
36
+ For example , outputting " a beautiful morning in the woods with the sun peaking through the trees " will trigger your partner bot to output an video of a forest morning , as described. You will be prompted by people looking to create detailed , amazing videos. The way to accomplish this is to take their short prompts and make them extremely detailed and descriptive.
37
+ There are a few rules to follow:
38
+
39
+ You will only ever output a single video description per user request.
40
+
41
+ When modifications are requested , you should not simply make the description longer . You should refactor the entire description to integrate the suggestions.
42
+ Other times the user will not want modifications , but instead want a new image . In this case , you should ignore your previous conversation with the user.
43
+
44
+ Video descriptions must have the same num of words as examples below. Extra words will be ignored.
45
+ """
46
+
47
+
48
+ def export_to_video_imageio(
49
+ video_frames: Union[List[np.ndarray], List[PIL.Image.Image]], output_video_path: str = None, fps: int = 8
50
+ ) -> str:
51
+ """
52
+ Export the video frames to a video file using imageio lib to Avoid "green screen" issue (for example CogVideoX)
53
+ """
54
+ if output_video_path is None:
55
+ output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name
56
+
57
+ if isinstance(video_frames[0], PIL.Image.Image):
58
+ video_frames = [np.array(frame) for frame in video_frames]
59
+
60
+ with imageio.get_writer(output_video_path, fps=fps) as writer:
61
+ for frame in video_frames:
62
+ writer.append_data(frame)
63
+
64
+ return output_video_path
65
+
66
+
67
+ def convert_prompt(prompt: str, retry_times: int = 3) -> str:
68
+ if not os.environ.get("OPENAI_API_KEY"):
69
+ return prompt
70
+ client = OpenAI()
71
+ text = prompt.strip()
72
+
73
+ for i in range(retry_times):
74
+ response = client.chat.completions.create(
75
+ messages=[
76
+ {"role": "system", "content": sys_prompt},
77
+ {"role": "user",
78
+ "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : "a girl is on the beach"'},
79
+ {"role": "assistant",
80
+ "content": "A radiant woman stands on a deserted beach, arms outstretched, wearing a beige trench coat, white blouse, light blue jeans, and chic boots, against a backdrop of soft sky and sea. Moments later, she is seen mid-twirl, arms exuberant, with the lighting suggesting dawn or dusk. Then, she runs along the beach, her attire complemented by an off-white scarf and black ankle boots, the tranquil sea behind her. Finally, she holds a paper airplane, her pose reflecting joy and freedom, with the ocean's gentle waves and the sky's soft pastel hues enhancing the serene ambiance."},
81
+ {"role": "user",
82
+ "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : "A man jogging on a football field"'},
83
+ {"role": "assistant",
84
+ "content": "A determined man in athletic attire, including a blue long-sleeve shirt, black shorts, and blue socks, jogs around a snow-covered soccer field, showcasing his solitary exercise in a quiet, overcast setting. His long dreadlocks, focused expression, and the serene winter backdrop highlight his dedication to fitness. As he moves, his attire, consisting of a blue sports sweatshirt, black athletic pants, gloves, and sneakers, grips the snowy ground. He is seen running past a chain-link fence enclosing the playground area, with a basketball hoop and children's slide, suggesting a moment of solitary exercise amidst the empty field."},
85
+ {"role": "user",
86
+ "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : " A woman is dancing, HD footage, close-up"'},
87
+ {"role": "assistant",
88
+ "content": "A young woman with her hair in an updo and wearing a teal hoodie stands against a light backdrop, initially looking over her shoulder with a contemplative expression. She then confidently makes a subtle dance move, suggesting rhythm and movement. Next, she appears poised and focused, looking directly at the camera. Her expression shifts to one of introspection as she gazes downward slightly. Finally, she dances with confidence, her left hand over her heart, symbolizing a poignant moment, all while dressed in the same teal hoodie against a plain, light-colored background."},
89
+ {"role": "user",
90
+ "content": f'Create an imaginative video descriptive caption or modify an earlier caption in ENGLISH for the user input: "{text}"'},
91
+ ],
92
+ model="glm-4-0520",
93
+ temperature=0.01,
94
+ top_p=0.7,
95
+ stream=False,
96
+ max_tokens=200,
97
+ )
98
+ if response.choices:
99
+ return response.choices[0].message.content
100
+ return prompt
101
+
102
+
103
+ def infer(
104
+ prompt: str,
105
+ num_inference_steps: int,
106
+ guidance_scale: float,
107
+ progress=gr.Progress(track_tqdm=True),
108
+ ):
109
+ torch.cuda.empty_cache()
110
+
111
+ video = pipe(
112
+ prompt=prompt,
113
+ num_inference_steps=num_inference_steps,
114
+ guidance_scale=guidance_scale,
115
+ ).frames[0]
116
+
117
+ return video
118
+
119
+
120
+ def save_video(tensor):
121
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
122
+ video_path = f"./output/{timestamp}.mp4"
123
+ os.makedirs(os.path.dirname(video_path), exist_ok=True)
124
+ export_to_video_imageio(tensor[1:], video_path)
125
+ return video_path
126
+
127
+
128
+ def convert_to_gif(video_path):
129
+ clip = mp.VideoFileClip(video_path)
130
+ clip = clip.set_fps(8)
131
+ clip = clip.resize(height=240)
132
+ gif_path = video_path.replace('.mp4', '.gif')
133
+ clip.write_gif(gif_path, fps=8)
134
+ return gif_path
135
+
136
+
137
+ def delete_old_files():
138
+ while True:
139
+ now = datetime.now()
140
+ cutoff = now - timedelta(minutes=10)
141
+ output_dir = './output'
142
+ os.makedirs(output_dir, exist_ok=True)
143
+ for filename in os.listdir(output_dir):
144
+ file_path = os.path.join(output_dir, filename)
145
+ if os.path.isfile(file_path):
146
+ file_mtime = datetime.fromtimestamp(os.path.getmtime(file_path))
147
+ if file_mtime < cutoff:
148
+ os.remove(file_path)
149
+ time.sleep(600) # Sleep for 10 minutes
150
+
151
+
152
+ threading.Thread(target=delete_old_files, daemon=True).start()
153
+
154
+ with gr.Blocks() as demo:
155
+ gr.Markdown("""
156
+ <div style="text-align: center; font-size: 32px; font-weight: bold; margin-bottom: 20px;">
157
+ CogVideoX-5B Huggingface Space🤗
158
+ </div>
159
+ <div style="text-align: center;">
160
+ <a href="https://huggingface.co/THUDM/CogVideoX-2B">🤗 2B Model Hub</a> |
161
+ <a href="https://huggingface.co/THUDM/CogVideoX-5B">🤗 5B Model Hub</a> |
162
+ <a href="https://github.com/THUDM/CogVideo">🌐 Github</a> |
163
+ <a href="https://arxiv.org/pdf/2408.06072">📜 arxiv </a>
164
+ </div>
165
+
166
+ <div style="text-align: center; font-size: 15px; font-weight: bold; color: red; margin-bottom: 20px;">
167
+ ⚠️ This demo is for academic research and experiential use only.
168
+ Users should strictly adhere to local laws and ethics.
169
+ </div>
170
+ """)
171
+ with gr.Row():
172
+ with gr.Column():
173
+ prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here", lines=5)
174
+
175
+ with gr.Row():
176
+ gr.Markdown(
177
+ "✨Upon pressing the enhanced prompt button, we will use [GLM-4 Model](https://github.com/THUDM/GLM-4) to polish the prompt and overwrite the original one.")
178
+ enhance_button = gr.Button("✨ Enhance Prompt(Optional)")
179
+
180
+ with gr.Column():
181
+ gr.Markdown("**Optional Parameters** (default values are recommended)<br>"
182
+ "Increasing the number of inference steps will produce more detailed videos, but it will slow down the process.<br>"
183
+ "50 steps are recommended for most cases.<br>"
184
+ "For the 5B model, 50 steps will take approximately 350 seconds.")
185
+ with gr.Row():
186
+ num_inference_steps = gr.Number(label="Inference Steps", value=50)
187
+ guidance_scale = gr.Number(label="Guidance Scale", value=6.0)
188
+ generate_button = gr.Button("🎬 Generate Video")
189
+
190
+ with gr.Column():
191
+ video_output = gr.Video(label="CogVideoX Generate Video", width=720, height=480)
192
+ with gr.Row():
193
+ download_video_button = gr.File(label="📥 Download Video", visible=False)
194
+ download_gif_button = gr.File(label="📥 Download GIF", visible=False)
195
+
196
+ gr.Markdown("""
197
+ <table border="0" style="width: 100%; text-align: left; margin-top: 20px;">
198
+ <div style="text-align: center; font-size: 24px; font-weight: bold; margin-bottom: 20px;">
199
+ Demo Videos with 50 Inference Steps and 6.0 Guidance Scale.
200
+ </div>
201
+ <tr>
202
+ <td style="width: 25%; vertical-align: top; font-size: 1.2em;">
203
+ <p>A detailed wooden toy ship with intricately carved masts and sails is seen gliding smoothly over a plush, blue carpet that mimics the waves of the sea. The ship's hull is painted a rich brown, with tiny windows. The carpet, soft and textured, provides a perfect backdrop, resembling an oceanic expanse. Surrounding the ship are various other toys and children's items, hinting at a playful environment. The scene captures the innocence and imagination of childhood, with the toy ship's journey symbolizing endless adventures in a whimsical, indoor setting.</p>
204
+ </td>
205
+ <td style="width: 25%; vertical-align: top;">
206
+ <video src="https://github.com/user-attachments/assets/ea3af39a-3160-4999-90ec-2f7863c5b0e9" width="100%" controls autoplay></video>
207
+ </td>
208
+ <td style="width: 25%; vertical-align: top; font-size: 1.2em;">
209
+ <p>The camera follows behind a white vintage SUV with a black roof rack as it speeds up a steep dirt road surrounded by pine trees on a steep mountain slope, dust kicks up from its tires, the sunlight shines on the SUV as it speeds along the dirt road, casting a warm glow over the scene. The dirt road curves gently into the distance, with no other cars or vehicles in sight. The trees on either side of the road are redwoods, with patches of greenery scattered throughout. The car is seen from the rear following the curve with ease, making it seem as if it is on a rugged drive through the rugged terrain. The dirt road itself is surrounded by steep hills and mountains, with a clear blue sky above with wispy clouds.</p>
210
+ </td>
211
+ <td style="width: 25%; vertical-align: top;">
212
+ <video src="https://github.com/user-attachments/assets/9de41efd-d4d1-4095-aeda-246dd834e91d" width="100%" controls autoplay></video>
213
+ </td>
214
+ </tr>
215
+ <tr>
216
+ <td style="width: 25%; vertical-align: top; font-size: 1.2em;">
217
+ <p>A street artist, clad in a worn-out denim jacket and a colorful bandana, stands before a vast concrete wall in the heart, holding a can of spray paint, spray-painting a colorful bird on a mottled wall.</p>
218
+ </td>
219
+ <td style="width: 25%; vertical-align: top;">
220
+ <video src="https://github.com/user-attachments/assets/941d6661-6a8d-4a1b-b912-59606f0b2841" width="100%" controls autoplay></video>
221
+ </td>
222
+ <td style="width: 25%; vertical-align: top; font-size: 1.2em;">
223
+ <p>In the haunting backdrop of a war-torn city, where ruins and crumbled walls tell a story of devastation, a poignant close-up frames a young girl. Her face is smudged with ash, a silent testament to the chaos around her. Her eyes glistening with a mix of sorrow and resilience, capturing the raw emotion of a world that has lost its innocence to the ravages of conflict.</p>
224
+ </td>
225
+ <td style="width: 25%; vertical-align: top;">
226
+ <video src="https://github.com/user-attachments/assets/938529c4-91ae-4f60-b96b-3c3947fa63cb" width="100%" controls autoplay></video>
227
+ </td>
228
+ </tr>
229
+ </table>
230
+ """)
231
+
232
+
233
+ def generate(prompt, num_inference_steps, guidance_scale, model_choice, progress=gr.Progress(track_tqdm=True)):
234
+ tensor = infer(prompt, num_inference_steps, guidance_scale, progress=progress)
235
+ video_path = save_video(tensor)
236
+ video_update = gr.update(visible=True, value=video_path)
237
+ gif_path = convert_to_gif(video_path)
238
+ gif_update = gr.update(visible=True, value=gif_path)
239
+
240
+ return video_path, video_update, gif_update
241
+
242
+
243
+ def enhance_prompt_func(prompt):
244
+ return convert_prompt(prompt, retry_times=1)
245
+
246
+
247
+ generate_button.click(
248
+ generate,
249
+ inputs=[prompt, num_inference_steps, guidance_scale],
250
+ outputs=[video_output, download_video_button, download_gif_button]
251
+ )
252
+
253
+ enhance_button.click(
254
+ enhance_prompt_func,
255
+ inputs=[prompt],
256
+ outputs=[prompt]
257
+ )
258
+
259
+ if __name__ == "__main__":
260
+ demo.launch(server_port=7870)
requirements.txt CHANGED
@@ -1,11 +1,10 @@
1
- gradio==4.41.0
2
  imageio-ffmpeg==0.5.1
3
  torch==2.2.0
4
- git+https://github.com/huggingface/diffusers.git@main#egg=diffusers
5
- transformers==4.42.0
6
  spaces==0.29.2
7
  moviepy==1.0.3
8
- openai==1.40.3
9
  git+https://github.com/huggingface/accelerate.git@main#egg=accelerate
10
- sentencepiece==0.2.0
11
- Pillow==9.5.0
 
1
+ gradio==4.42.0
2
  imageio-ffmpeg==0.5.1
3
  torch==2.2.0
4
+ diffusers==0.30.1
5
+ transformers==4.44.2
6
  spaces==0.29.2
7
  moviepy==1.0.3
8
+ openai==1.42.0
9
  git+https://github.com/huggingface/accelerate.git@main#egg=accelerate
10
+ sentencepiece==0.2.0