yerang commited on
Commit
66095e8
ยท
verified ยท
1 Parent(s): 32a4f9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -51
app.py CHANGED
@@ -63,11 +63,11 @@ args = tyro.cli(ArgumentConfig)
63
  inference_cfg = partial_fields(InferenceConfig, args.__dict__) # use attribute of args to initial InferenceConfig
64
  crop_cfg = partial_fields(CropConfig, args.__dict__) # use attribute of args to initial CropConfig
65
 
66
- gradio_pipeline = GradioPipeline(
67
- inference_cfg=inference_cfg,
68
- crop_cfg=crop_cfg,
69
- args=args
70
- )
71
 
72
  @spaces.GPU(duration=120)
73
  def gpu_wrapped_execute_video(*args, **kwargs):
@@ -191,58 +191,77 @@ def run_end_to_end(image_path, text, voice, input_video, flag_relative, flag_do_
191
 
192
 
193
 
 
 
 
 
 
 
194
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
195
- with gr.Tabs():
196
- # ์ฒซ ๋ฒˆ์งธ ํƒญ: Text to LipSync
197
- with gr.Tab("Text to LipSync"):
198
- gr.Markdown("# Text to LipSync")
199
- with gr.Row():
200
- script_txt = gr.Text()
201
- voice = gr.Audio(label="์‚ฌ์šฉ์ž ์Œ์„ฑ", type="filepath")
202
- input_video = gr.Video()
203
-
204
- with gr.Row():
205
- image_input = gr.Image(type="filepath") # ์—ฌ๊ธฐ์„œ image_input์„ ์ •์˜ํ•ฉ๋‹ˆ๋‹ค.
206
- output_video.render()
207
- #crop_output_video.render()
208
- output_video_concat.render()
209
-
210
- with gr.Row():
211
- flag_relative_input = gr.Checkbox(value=True, label="relative motion")
212
- flag_do_crop_input = gr.Checkbox(value=True, label="do crop")
213
- flag_remap_input = gr.Checkbox(value=True, label="paste-back")
214
- flag_crop_driving_video_input = gr.Checkbox(value=False, label="do crop (driving video)")
215
- male = gr.Checkbox(value=False, label="male")
216
- #animal = gr.Checkbox(value=False, label="animal") # animal ์ฒดํฌ๋ฐ•์Šค ์ถ”๊ฐ€
217
-
218
- with gr.Row():
219
- generate_speech = gr.Button("๐Ÿš€ Generate Speech", variant="primary")
220
-
221
- generate_speech.click(
222
- fn=run_end_to_end,
223
  inputs=[
224
- image_input,
225
- script_txt,
226
- voice,
227
- input_video,
228
- flag_relative_input,
229
- flag_do_crop_input,
230
- flag_remap_input,
231
- flag_crop_driving_video_input,
232
- male,
233
- #animal # ์ถ”๊ฐ€๋œ animal ์ž…๋ ฅ
234
  ],
235
- outputs=[output_video, output_video_concat]
236
  )
237
 
238
- # # ๋‘ ๋ฒˆ์งธ ํƒญ: FLUX ์ด๋ฏธ์ง€ ์ƒ์„ฑ
239
- # with gr.Tab("FLUX ์ด๋ฏธ์ง€ ์ƒ์„ฑ"):
240
- # flux_tab(image_input) # FLUX ์ด๋ฏธ์ง€ ์ƒ์„ฑ์„ ์œ„ํ•œ ๋ณ„๋„์˜ ํƒญ
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
 
242
- # # ์„ธ ๋ฒˆ์งธ ํƒญ: Flux ๊ฐœ๋ฐœ์šฉ ํƒญ
243
- # with gr.Tab("FLUX Dev"):
244
- # flux_demo = create_flux_tab() # Flux ๊ฐœ๋ฐœ์šฉ ํƒญ ์ƒ์„ฑ
245
- # #flux_demo.render() # ํ•ด๋‹น UI๋ฅผ ๋ณ„๋„์˜ ํƒญ์—๋งŒ ๋ Œ๋”๋ง
246
 
247
 
248
 
 
63
  inference_cfg = partial_fields(InferenceConfig, args.__dict__) # use attribute of args to initial InferenceConfig
64
  crop_cfg = partial_fields(CropConfig, args.__dict__) # use attribute of args to initial CropConfig
65
 
66
+ # gradio_pipeline = GradioPipeline(
67
+ # inference_cfg=inference_cfg,
68
+ # crop_cfg=crop_cfg,
69
+ # args=args
70
+ # )
71
 
72
  @spaces.GPU(duration=120)
73
  def gpu_wrapped_execute_video(*args, **kwargs):
 
191
 
192
 
193
 
194
+
195
+ ###### ํ…Œ์ŠคํŠธ์ค‘ ######
196
+
197
+
198
+ stf_pipeline = STFPipeline()
199
+
200
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
201
+ stf_button = gr.Button("stf test", variant="primary")
202
+ stf_button.click(
203
+ fn=gpu_wrapped_stf_pipeline_execute,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  inputs=[
205
+ audio_path
 
 
 
 
 
 
 
 
 
206
  ],
207
+ outputs=[driving_video_path]
208
  )
209
 
210
+ ###### ํ…Œ์ŠคํŠธ์ค‘ ######
211
+
212
+
213
+ # with gr.Blocks(theme=gr.themes.Soft()) as demo:
214
+ # with gr.Tabs():
215
+ # # ์ฒซ ๋ฒˆ์งธ ํƒญ: Text to LipSync
216
+ # with gr.Tab("Text to LipSync"):
217
+ # gr.Markdown("# Text to LipSync")
218
+ # with gr.Row():
219
+ # script_txt = gr.Text()
220
+ # voice = gr.Audio(label="์‚ฌ์šฉ์ž ์Œ์„ฑ", type="filepath")
221
+ # input_video = gr.Video()
222
+
223
+ # with gr.Row():
224
+ # image_input = gr.Image(type="filepath") # ์—ฌ๊ธฐ์„œ image_input์„ ์ •์˜ํ•ฉ๋‹ˆ๋‹ค.
225
+ # output_video.render()
226
+ # #crop_output_video.render()
227
+ # output_video_concat.render()
228
+
229
+ # with gr.Row():
230
+ # flag_relative_input = gr.Checkbox(value=True, label="relative motion")
231
+ # flag_do_crop_input = gr.Checkbox(value=True, label="do crop")
232
+ # flag_remap_input = gr.Checkbox(value=True, label="paste-back")
233
+ # flag_crop_driving_video_input = gr.Checkbox(value=False, label="do crop (driving video)")
234
+ # male = gr.Checkbox(value=False, label="male")
235
+ # #animal = gr.Checkbox(value=False, label="animal") # animal ์ฒดํฌ๋ฐ•์Šค ์ถ”๊ฐ€
236
+
237
+ # with gr.Row():
238
+ # generate_speech = gr.Button("๐Ÿš€ Generate Speech", variant="primary")
239
+
240
+ # generate_speech.click(
241
+ # fn=run_end_to_end,
242
+ # inputs=[
243
+ # image_input,
244
+ # script_txt,
245
+ # voice,
246
+ # input_video,
247
+ # flag_relative_input,
248
+ # flag_do_crop_input,
249
+ # flag_remap_input,
250
+ # flag_crop_driving_video_input,
251
+ # male,
252
+ # #animal # ์ถ”๊ฐ€๋œ animal ์ž…๋ ฅ
253
+ # ],
254
+ # outputs=[output_video, output_video_concat]
255
+ # )
256
+
257
+ # # # ๋‘ ๋ฒˆ์งธ ํƒญ: FLUX ์ด๋ฏธ์ง€ ์ƒ์„ฑ
258
+ # # with gr.Tab("FLUX ์ด๋ฏธ์ง€ ์ƒ์„ฑ"):
259
+ # # flux_tab(image_input) # FLUX ์ด๋ฏธ์ง€ ์ƒ์„ฑ์„ ์œ„ํ•œ ๋ณ„๋„์˜ ํƒญ
260
 
261
+ # # # ์„ธ ๋ฒˆ์งธ ํƒญ: Flux ๊ฐœ๋ฐœ์šฉ ํƒญ
262
+ # # with gr.Tab("FLUX Dev"):
263
+ # # flux_demo = create_flux_tab() # Flux ๊ฐœ๋ฐœ์šฉ ํƒญ ์ƒ์„ฑ
264
+ # # #flux_demo.render() # ํ•ด๋‹น UI๋ฅผ ๋ณ„๋„์˜ ํƒญ์—๋งŒ ๋ Œ๋”๋ง
265
 
266
 
267