Spaces:
Runtime error
Runtime error
Linoy Tsaban
commited on
Commit
·
0b1c448
1
Parent(s):
e62a915
Update app.py
Browse files
app.py
CHANGED
@@ -64,9 +64,11 @@ def prep(config):
|
|
64 |
model_key = "stabilityai/stable-diffusion-2-depth"
|
65 |
toy_scheduler = DDIMScheduler.from_pretrained(model_key, subfolder="scheduler")
|
66 |
toy_scheduler.set_timesteps(config["save_steps"])
|
|
|
67 |
timesteps_to_save, num_inference_steps = get_timesteps(toy_scheduler, num_inference_steps=config["save_steps"],
|
68 |
strength=1.0,
|
69 |
device=device)
|
|
|
70 |
|
71 |
# seed_everything(config["seed"])
|
72 |
if not config["frames"]: # original non demo setting
|
@@ -109,7 +111,7 @@ def preprocess_and_invert(input_video,
|
|
109 |
randomize_seed,
|
110 |
do_inversion,
|
111 |
# save_dir: str = "latents",
|
112 |
-
steps
|
113 |
n_timesteps = 50,
|
114 |
batch_size: int = 8,
|
115 |
n_frames: int = 40,
|
@@ -119,7 +121,7 @@ def preprocess_and_invert(input_video,
|
|
119 |
sd_version = "2.1"
|
120 |
height = 512
|
121 |
weidth: int = 512
|
122 |
-
|
123 |
if do_inversion or randomize_seed:
|
124 |
preprocess_config = {}
|
125 |
preprocess_config['H'] = height
|
@@ -128,7 +130,7 @@ def preprocess_and_invert(input_video,
|
|
128 |
preprocess_config['sd_version'] = sd_version
|
129 |
preprocess_config['steps'] = steps
|
130 |
preprocess_config['batch_size'] = batch_size
|
131 |
-
preprocess_config['save_steps'] = n_timesteps
|
132 |
preprocess_config['n_frames'] = n_frames
|
133 |
preprocess_config['seed'] = seed
|
134 |
preprocess_config['inversion_prompt'] = inversion_prompt
|
@@ -141,6 +143,8 @@ def preprocess_and_invert(input_video,
|
|
141 |
seed_everything(seed)
|
142 |
|
143 |
frames, latents, total_inverted_latents, rgb_reconstruction = prep(preprocess_config)
|
|
|
|
|
144 |
frames = gr.State(value=frames)
|
145 |
latents = gr.State(value=latents)
|
146 |
inverted_latents = gr.State(value=total_inverted_latents)
|
@@ -173,7 +177,7 @@ def edit_with_pnp(input_video,
|
|
173 |
|
174 |
config["sd_version"] = "2.1"
|
175 |
config["device"] = device
|
176 |
-
config["n_timesteps"] = n_timesteps
|
177 |
config["n_frames"] = n_frames
|
178 |
config["batch_size"] = batch_size
|
179 |
config["guidance_scale"] = gudiance_scale
|
@@ -194,6 +198,7 @@ def edit_with_pnp(input_video,
|
|
194 |
randomize_seed,
|
195 |
do_inversion,
|
196 |
steps,
|
|
|
197 |
batch_size,
|
198 |
n_frames,
|
199 |
inversion_prompt)
|
@@ -272,7 +277,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
272 |
randomize_seed = gr.Checkbox(label='Randomize seed', value=False)
|
273 |
gudiance_scale = gr.Slider(label='Guidance Scale', minimum=1, maximum=30,
|
274 |
value=7.5, step=0.5, interactive=True)
|
275 |
-
steps = gr.Slider(label='Inversion steps', minimum=
|
276 |
value=500, step=1, interactive=True)
|
277 |
|
278 |
with gr.Column(min_width=100):
|
@@ -282,7 +287,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
282 |
n_frames = gr.Slider(label='Num frames', minimum=2, maximum=200,
|
283 |
value=24, step=1, interactive=True)
|
284 |
n_timesteps = gr.Slider(label='Diffusion steps', minimum=25, maximum=100,
|
285 |
-
value=
|
286 |
n_fps = gr.Slider(label='Frames per second', minimum=1, maximum=60,
|
287 |
value=10, step=1, interactive=True)
|
288 |
|
@@ -351,7 +356,5 @@ with gr.Blocks(css="style.css") as demo:
|
|
351 |
outputs=[output_video]
|
352 |
)
|
353 |
|
354 |
-
|
355 |
-
|
356 |
demo.queue()
|
357 |
demo.launch()
|
|
|
64 |
model_key = "stabilityai/stable-diffusion-2-depth"
|
65 |
toy_scheduler = DDIMScheduler.from_pretrained(model_key, subfolder="scheduler")
|
66 |
toy_scheduler.set_timesteps(config["save_steps"])
|
67 |
+
print("config[save_steps]", config["save_steps"])
|
68 |
timesteps_to_save, num_inference_steps = get_timesteps(toy_scheduler, num_inference_steps=config["save_steps"],
|
69 |
strength=1.0,
|
70 |
device=device)
|
71 |
+
print("YOOOO timesteps to save", timesteps_to_save)
|
72 |
|
73 |
# seed_everything(config["seed"])
|
74 |
if not config["frames"]: # original non demo setting
|
|
|
111 |
randomize_seed,
|
112 |
do_inversion,
|
113 |
# save_dir: str = "latents",
|
114 |
+
steps,
|
115 |
n_timesteps = 50,
|
116 |
batch_size: int = 8,
|
117 |
n_frames: int = 40,
|
|
|
121 |
sd_version = "2.1"
|
122 |
height = 512
|
123 |
weidth: int = 512
|
124 |
+
print("n timesteps", n_timesteps)
|
125 |
if do_inversion or randomize_seed:
|
126 |
preprocess_config = {}
|
127 |
preprocess_config['H'] = height
|
|
|
130 |
preprocess_config['sd_version'] = sd_version
|
131 |
preprocess_config['steps'] = steps
|
132 |
preprocess_config['batch_size'] = batch_size
|
133 |
+
preprocess_config['save_steps'] = int(n_timesteps)
|
134 |
preprocess_config['n_frames'] = n_frames
|
135 |
preprocess_config['seed'] = seed
|
136 |
preprocess_config['inversion_prompt'] = inversion_prompt
|
|
|
143 |
seed_everything(seed)
|
144 |
|
145 |
frames, latents, total_inverted_latents, rgb_reconstruction = prep(preprocess_config)
|
146 |
+
print(total_inverted_latents.keys())
|
147 |
+
print(len(total_inverted_latents.keys()))
|
148 |
frames = gr.State(value=frames)
|
149 |
latents = gr.State(value=latents)
|
150 |
inverted_latents = gr.State(value=total_inverted_latents)
|
|
|
177 |
|
178 |
config["sd_version"] = "2.1"
|
179 |
config["device"] = device
|
180 |
+
config["n_timesteps"] = int(n_timesteps)
|
181 |
config["n_frames"] = n_frames
|
182 |
config["batch_size"] = batch_size
|
183 |
config["guidance_scale"] = gudiance_scale
|
|
|
198 |
randomize_seed,
|
199 |
do_inversion,
|
200 |
steps,
|
201 |
+
n_timesteps,
|
202 |
batch_size,
|
203 |
n_frames,
|
204 |
inversion_prompt)
|
|
|
277 |
randomize_seed = gr.Checkbox(label='Randomize seed', value=False)
|
278 |
gudiance_scale = gr.Slider(label='Guidance Scale', minimum=1, maximum=30,
|
279 |
value=7.5, step=0.5, interactive=True)
|
280 |
+
steps = gr.Slider(label='Inversion steps', minimum=10, maximum=500,
|
281 |
value=500, step=1, interactive=True)
|
282 |
|
283 |
with gr.Column(min_width=100):
|
|
|
287 |
n_frames = gr.Slider(label='Num frames', minimum=2, maximum=200,
|
288 |
value=24, step=1, interactive=True)
|
289 |
n_timesteps = gr.Slider(label='Diffusion steps', minimum=25, maximum=100,
|
290 |
+
value=50, step=25, interactive=True)
|
291 |
n_fps = gr.Slider(label='Frames per second', minimum=1, maximum=60,
|
292 |
value=10, step=1, interactive=True)
|
293 |
|
|
|
356 |
outputs=[output_video]
|
357 |
)
|
358 |
|
|
|
|
|
359 |
demo.queue()
|
360 |
demo.launch()
|