Zhouyan248 commited on
Commit
f333013
Β·
1 Parent(s): df005dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -8
app.py CHANGED
@@ -22,12 +22,16 @@ h1 {
22
  }
23
  """
24
 
25
- def infer(prompt, image_inp, seed_inp, sampling_steps,width,height):
26
  setup_seed(seed_inp)
27
  args.num_sampling_steps = sampling_steps
28
  img = cv2.imread(image_inp)
29
  new_size = [height,width]
30
  args.image_size = new_size
 
 
 
 
31
  vae, model, text_encoder, diffusion = model_i2v_fun(args)
32
  vae.to(device)
33
  model.to(device)
@@ -106,8 +110,10 @@ with gr.Blocks(css='style.css') as demo:
106
 
107
  with gr.Row():
108
  # control_task = gr.Dropdown(label="Task", choices=["Text-2-video", "Image-2-video"], value="Text-2-video", multiselect=False, elem_id="controltask-in")
109
- sampling_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=250, step=1)
110
  seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=250, elem_id="seed-in")
 
 
111
  with gr.Row():
112
  width = gr.Slider(label='width',minimum=1,maximum=2000,value=512,step=1)
113
  height = gr.Slider(label='height',minimum=1,maximum=2000,value=320,step=1)
@@ -119,14 +125,14 @@ with gr.Blocks(css='style.css') as demo:
119
  # clean_btn = gr.Button("Clean video")
120
 
121
  video_out = gr.Video(label="Video result", elem_id="video-output", width = 800)
122
- inputs = [prompt,image_inp, seed_inp, sampling_steps,width,height]
123
  outputs = [video_out]
124
  ex = gr.Examples(
125
- examples = [["./input/i2v/The_picture_shows_the_beauty_of_the_sea_and_at_the_same.png","A video of the beauty of the sea",123,250,560,240],
126
- ["./input/i2v/The_picture_shows_the_beauty_of_the_sea.png","A video of the beauty of the sea",14717,250,560,240],
127
- ["./input/i2v/Close-up_essence_is_poured_from_bottleKodak_Vision.png","A video of close-up essence is poured from bottleKodak Vision",178135313,250,560,240]],
128
  fn = infer,
129
- inputs = [image_inp, prompt, seed_inp, sampling_steps,width,height],
130
  outputs=[video_out],
131
  cache_examples=False
132
 
@@ -134,7 +140,6 @@ with gr.Blocks(css='style.css') as demo:
134
  )
135
  ex.dataset.headers = [""]
136
 
137
- # control_task.change(change_task_options, inputs=[control_task], outputs=[canny_opt, hough_opt, normal_opt], queue=False)
138
  # clean_btn.click(clean, inputs=[], outputs=[video_out], queue=False)
139
  submit_btn.click(infer, inputs, outputs)
140
  # share_button.click(None, [], [], _js=share_js)
 
22
  }
23
  """
24
 
25
+ def infer(prompt, image_inp, seed_inp, sampling_steps,width,height,infer_type):
26
  setup_seed(seed_inp)
27
  args.num_sampling_steps = sampling_steps
28
  img = cv2.imread(image_inp)
29
  new_size = [height,width]
30
  args.image_size = new_size
31
+ if infer_type == 'ddpm':
32
+ args.sample_method = 'ddpm'
33
+ elif infer_type == 'ddim':
34
+ args.sample_method = 'ddim'
35
  vae, model, text_encoder, diffusion = model_i2v_fun(args)
36
  vae.to(device)
37
  model.to(device)
 
110
 
111
  with gr.Row():
112
  # control_task = gr.Dropdown(label="Task", choices=["Text-2-video", "Image-2-video"], value="Text-2-video", multiselect=False, elem_id="controltask-in")
113
+ sampling_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=100, step=1)
114
  seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=250, elem_id="seed-in")
115
+ with gr.Row():
116
+ infer_type = gr.Dropdown(['ddpm','ddim'], label='infer_type',value='ddim')
117
  with gr.Row():
118
  width = gr.Slider(label='width',minimum=1,maximum=2000,value=512,step=1)
119
  height = gr.Slider(label='height',minimum=1,maximum=2000,value=320,step=1)
 
125
  # clean_btn = gr.Button("Clean video")
126
 
127
  video_out = gr.Video(label="Video result", elem_id="video-output", width = 800)
128
+ inputs = [prompt,image_inp, seed_inp, sampling_steps,width,height,infer_type]
129
  outputs = [video_out]
130
  ex = gr.Examples(
131
+ examples = [["./input/i2v/The_picture_shows_the_beauty_of_the_sea_and_at_the_same.png","A video of the beauty of the sea",123,250,560,240,'ddim'],
132
+ ["./input/i2v/The_picture_shows_the_beauty_of_the_sea.png","A video of the beauty of the sea",14717,250,560,240,'ddim'],
133
+ ["./input/i2v/Close-up_essence_is_poured_from_bottleKodak_Vision.png","A video of close-up essence is poured from bottleKodak Vision",178135313,250,560,240,'ddim']],
134
  fn = infer,
135
+ inputs = [image_inp, prompt, seed_inp, sampling_steps,width,height,infer_type],
136
  outputs=[video_out],
137
  cache_examples=False
138
 
 
140
  )
141
  ex.dataset.headers = [""]
142
 
 
143
  # clean_btn.click(clean, inputs=[], outputs=[video_out], queue=False)
144
  submit_btn.click(infer, inputs, outputs)
145
  # share_button.click(None, [], [], _js=share_js)