Spaces:
Runtime error
Runtime error
Commit
Β·
4d461ba
1
Parent(s):
8a46c43
Update app.py
Browse files
app.py
CHANGED
@@ -22,13 +22,13 @@ h1 {
|
|
22 |
}
|
23 |
"""
|
24 |
|
25 |
-
def infer(prompt, image_inp, seed_inp, ddim_steps):
|
26 |
setup_seed(seed_inp)
|
27 |
args.num_sampling_steps = ddim_steps
|
28 |
###ε
ζ΅θ―ImageηθΏεη±»ε
|
29 |
print(prompt, seed_inp, ddim_steps, type(image_inp))
|
30 |
img = cv2.imread(image_inp)
|
31 |
-
new_size = [
|
32 |
# if(img.shape[0]==512 and img.shape[1]==512):
|
33 |
# args.image_size = [512,512]
|
34 |
# elif(img.shape[0]==320 and img.shape[1]==512):
|
@@ -118,7 +118,9 @@ with gr.Blocks(css='style.css') as demo:
|
|
118 |
# control_task = gr.Dropdown(label="Task", choices=["Text-2-video", "Image-2-video"], value="Text-2-video", multiselect=False, elem_id="controltask-in")
|
119 |
ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=250, step=1)
|
120 |
seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=250, elem_id="seed-in")
|
121 |
-
|
|
|
|
|
122 |
# ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=250, step=1)
|
123 |
|
124 |
|
@@ -127,14 +129,14 @@ with gr.Blocks(css='style.css') as demo:
|
|
127 |
clean_btn = gr.Button("Clean video")
|
128 |
|
129 |
video_out = gr.Video(label="Video result", elem_id="video-output", width = 800)
|
130 |
-
inputs = [prompt,image_inp, seed_inp, ddim_steps]
|
131 |
outputs = [video_out]
|
132 |
ex = gr.Examples(
|
133 |
-
examples = [["./The_picture_shows_the_beauty_of_the_sea_.jpg","A video of the beauty of the sea",123,250],
|
134 |
-
["./The_picture_shows_the_beauty_of_the_sea.png","A video of the beauty of the sea",123,250],
|
135 |
-
["./Close-up_essence_is_poured_from_bottleKodak_Vision.png","A video of close-up essence is poured from bottleKodak Vision",123,250]],
|
136 |
fn = infer,
|
137 |
-
inputs = [image_inp, prompt, seed_inp, ddim_steps],
|
138 |
outputs=[video_out],
|
139 |
cache_examples=False
|
140 |
|
|
|
22 |
}
|
23 |
"""
|
24 |
|
25 |
+
def infer(prompt, image_inp, seed_inp, ddim_steps,width,height):
|
26 |
setup_seed(seed_inp)
|
27 |
args.num_sampling_steps = ddim_steps
|
28 |
###ε
ζ΅θ―ImageηθΏεη±»ε
|
29 |
print(prompt, seed_inp, ddim_steps, type(image_inp))
|
30 |
img = cv2.imread(image_inp)
|
31 |
+
new_size = [height,width]
|
32 |
# if(img.shape[0]==512 and img.shape[1]==512):
|
33 |
# args.image_size = [512,512]
|
34 |
# elif(img.shape[0]==320 and img.shape[1]==512):
|
|
|
118 |
# control_task = gr.Dropdown(label="Task", choices=["Text-2-video", "Image-2-video"], value="Text-2-video", multiselect=False, elem_id="controltask-in")
|
119 |
ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=250, step=1)
|
120 |
seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=250, elem_id="seed-in")
|
121 |
+
with gr.Row():
|
122 |
+
width = gr.Slider(label='width',minimum=1,maximum=2000,value=512,step=1)
|
123 |
+
height = gr.Slider(label='height',minimum=1,maximum=2000,value=512,step=1)
|
124 |
# ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=250, step=1)
|
125 |
|
126 |
|
|
|
129 |
clean_btn = gr.Button("Clean video")
|
130 |
|
131 |
video_out = gr.Video(label="Video result", elem_id="video-output", width = 800)
|
132 |
+
inputs = [prompt,image_inp, seed_inp, ddim_steps,width,height]
|
133 |
outputs = [video_out]
|
134 |
ex = gr.Examples(
|
135 |
+
examples = [["./The_picture_shows_the_beauty_of_the_sea_.jpg","A video of the beauty of the sea",123,250,512,512],
|
136 |
+
["./The_picture_shows_the_beauty_of_the_sea.png","A video of the beauty of the sea",123,250,512,512],
|
137 |
+
["./Close-up_essence_is_poured_from_bottleKodak_Vision.png","A video of close-up essence is poured from bottleKodak Vision",123,250,512,512]],
|
138 |
fn = infer,
|
139 |
+
inputs = [image_inp, prompt, seed_inp, ddim_steps,width,height],
|
140 |
outputs=[video_out],
|
141 |
cache_examples=False
|
142 |
|