Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -51,163 +51,173 @@ print("LD_LIBRARY_PATH:", os.environ['LD_LIBRARY_PATH'])
|
|
51 |
|
52 |
from stf_utils import STFPipeline
|
53 |
|
|
|
54 |
|
55 |
-
|
56 |
-
|
|
|
57 |
|
58 |
-
|
59 |
-
tyro.extras.set_accent_color("bright_cyan")
|
60 |
-
args = tyro.cli(ArgumentConfig)
|
61 |
|
62 |
-
# specify configs for inference
|
63 |
-
inference_cfg = partial_fields(InferenceConfig, args.__dict__) # use attribute of args to initial InferenceConfig
|
64 |
-
crop_cfg = partial_fields(CropConfig, args.__dict__) # use attribute of args to initial CropConfig
|
65 |
|
66 |
-
# gradio_pipeline = GradioPipeline(
|
67 |
-
# inference_cfg=inference_cfg,
|
68 |
-
# crop_cfg=crop_cfg,
|
69 |
-
# args=args
|
70 |
-
# )
|
71 |
|
72 |
-
@spaces.GPU(duration=120)
|
73 |
-
def gpu_wrapped_execute_video(*args, **kwargs):
|
74 |
-
return gradio_pipeline.execute_video(*args, **kwargs)
|
75 |
|
76 |
-
|
77 |
-
|
78 |
-
return gradio_pipeline.execute_image(*args, **kwargs)
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
|
84 |
-
|
85 |
-
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
88 |
|
89 |
-
def is_square_video(video_path):
|
90 |
-
|
91 |
|
92 |
-
|
93 |
-
|
94 |
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
|
99 |
-
|
100 |
|
101 |
|
102 |
-
# assets
|
103 |
-
title_md = "assets/gradio_title.md"
|
104 |
-
example_portrait_dir = "assets/examples/source"
|
105 |
-
example_video_dir = "assets/examples/driving"
|
106 |
-
data_examples = [
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
]
|
114 |
-
#################### interface logic ####################
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
# Define components first
|
119 |
-
eye_retargeting_slider = gr.Slider(minimum=0, maximum=0.8, step=0.01, label="target eyes-open ratio")
|
120 |
-
lip_retargeting_slider = gr.Slider(minimum=0, maximum=0.8, step=0.01, label="target lip-open ratio")
|
121 |
-
retargeting_input_image = gr.Image(type="filepath")
|
122 |
-
output_image = gr.Image(type="numpy")
|
123 |
-
output_image_paste_back = gr.Image(type="numpy")
|
124 |
-
output_video = gr.Video()
|
125 |
-
output_video_concat = gr.Video()
|
126 |
|
127 |
|
128 |
|
129 |
|
130 |
-
def run_end_to_end(image_path, text, voice, input_video, flag_relative, flag_do_crop, flag_remap, flag_crop_driving_video, male): #, animal):
|
131 |
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
|
190 |
-
|
191 |
|
192 |
|
193 |
|
194 |
|
195 |
-
###### 테스트중 ######
|
196 |
|
197 |
|
198 |
-
stf_pipeline = STFPipeline()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
199 |
|
200 |
-
|
201 |
-
stf_button = gr.Button("stf test", variant="primary")
|
202 |
-
stf_button.click(
|
203 |
-
fn=gpu_wrapped_stf_pipeline_execute,
|
204 |
-
inputs=[
|
205 |
-
audio_path
|
206 |
-
],
|
207 |
-
outputs=[driving_video_path]
|
208 |
-
)
|
209 |
-
|
210 |
-
###### 테스트중 ######
|
211 |
|
212 |
|
213 |
# with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
|
51 |
|
52 |
from stf_utils import STFPipeline
|
53 |
|
54 |
+
stf_pipeline = STFPipeline()
|
55 |
|
56 |
+
@spaces.GPU(duration=120)
|
57 |
+
def gpu_wrapped_stf_pipeline_execute(audio_path):
|
58 |
+
return stf_pipeline.execute(audio_path)
|
59 |
|
60 |
+
|
|
|
|
|
61 |
|
|
|
|
|
|
|
62 |
|
|
|
|
|
|
|
|
|
|
|
63 |
|
|
|
|
|
|
|
64 |
|
65 |
+
# def partial_fields(target_class, kwargs):
|
66 |
+
# return target_class(**{k: v for k, v in kwargs.items() if hasattr(target_class, k)})
|
|
|
67 |
|
68 |
+
# # set tyro theme
|
69 |
+
# tyro.extras.set_accent_color("bright_cyan")
|
70 |
+
# args = tyro.cli(ArgumentConfig)
|
71 |
|
72 |
+
# # specify configs for inference
|
73 |
+
# inference_cfg = partial_fields(InferenceConfig, args.__dict__) # use attribute of args to initial InferenceConfig
|
74 |
+
# crop_cfg = partial_fields(CropConfig, args.__dict__) # use attribute of args to initial CropConfig
|
75 |
+
|
76 |
+
# # gradio_pipeline = GradioPipeline(
|
77 |
+
# # inference_cfg=inference_cfg,
|
78 |
+
# # crop_cfg=crop_cfg,
|
79 |
+
# # args=args
|
80 |
+
# # )
|
81 |
+
|
82 |
+
# @spaces.GPU(duration=120)
|
83 |
+
# def gpu_wrapped_execute_video(*args, **kwargs):
|
84 |
+
# return gradio_pipeline.execute_video(*args, **kwargs)
|
85 |
+
|
86 |
+
# @spaces.GPU(duration=120)
|
87 |
+
# def gpu_wrapped_execute_image(*args, **kwargs):
|
88 |
+
# return gradio_pipeline.execute_image(*args, **kwargs)
|
89 |
+
|
90 |
+
# @spaces.GPU(duration=120)
|
91 |
+
# def gpu_wrapped_stf_pipeline_execute(audio_path):
|
92 |
+
# return stf_pipeline.execute(audio_path)
|
93 |
+
|
94 |
+
# @spaces.GPU(duration=120)
|
95 |
+
# def gpu_wrapped_elevenlabs_pipeline_generate_voice(text, voice):
|
96 |
+
# return elevenlabs_pipeline.generate_voice(text, voice)
|
97 |
|
98 |
|
99 |
+
# def is_square_video(video_path):
|
100 |
+
# video = cv2.VideoCapture(video_path)
|
101 |
|
102 |
+
# width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
|
103 |
+
# height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
104 |
|
105 |
+
# video.release()
|
106 |
+
# if width != height:
|
107 |
+
# raise gr.Error("Error: the video does not have a square aspect ratio. We currently only support square videos")
|
108 |
|
109 |
+
# return gr.update(visible=True)
|
110 |
|
111 |
|
112 |
+
# # assets
|
113 |
+
# title_md = "assets/gradio_title.md"
|
114 |
+
# example_portrait_dir = "assets/examples/source"
|
115 |
+
# example_video_dir = "assets/examples/driving"
|
116 |
+
# data_examples = [
|
117 |
+
# [osp.join(example_portrait_dir, "s9.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
|
118 |
+
# [osp.join(example_portrait_dir, "s6.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
|
119 |
+
# [osp.join(example_portrait_dir, "s10.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
|
120 |
+
# [osp.join(example_portrait_dir, "s5.jpg"), osp.join(example_video_dir, "d18.mp4"), True, True, True, True],
|
121 |
+
# [osp.join(example_portrait_dir, "s7.jpg"), osp.join(example_video_dir, "d19.mp4"), True, True, True, True],
|
122 |
+
# [osp.join(example_portrait_dir, "s22.jpg"), osp.join(example_video_dir, "d0.mp4"), True, True, True, True],
|
123 |
+
# ]
|
124 |
+
# #################### interface logic ####################
|
125 |
+
|
126 |
+
|
127 |
+
|
128 |
+
# # Define components first
|
129 |
+
# eye_retargeting_slider = gr.Slider(minimum=0, maximum=0.8, step=0.01, label="target eyes-open ratio")
|
130 |
+
# lip_retargeting_slider = gr.Slider(minimum=0, maximum=0.8, step=0.01, label="target lip-open ratio")
|
131 |
+
# retargeting_input_image = gr.Image(type="filepath")
|
132 |
+
# output_image = gr.Image(type="numpy")
|
133 |
+
# output_image_paste_back = gr.Image(type="numpy")
|
134 |
+
# output_video = gr.Video()
|
135 |
+
# output_video_concat = gr.Video()
|
136 |
|
137 |
|
138 |
|
139 |
|
140 |
+
# def run_end_to_end(image_path, text, voice, input_video, flag_relative, flag_do_crop, flag_remap, flag_crop_driving_video, male): #, animal):
|
141 |
|
142 |
+
# # # animal 체크 여부에 따라 다른 pipeline 사용
|
143 |
+
# # if animal:
|
144 |
+
# # gradio_pipeline = GradioPipelineAnimal(
|
145 |
+
# # inference_cfg=inference_cfg,
|
146 |
+
# # crop_cfg=crop_cfg,
|
147 |
+
# # args=args
|
148 |
+
# # )
|
149 |
+
# # else:
|
150 |
+
# # gradio_pipeline = GradioPipeline(
|
151 |
+
# # inference_cfg=inference_cfg,
|
152 |
+
# # crop_cfg=crop_cfg,
|
153 |
+
# # args=args
|
154 |
+
# # )
|
155 |
+
|
156 |
+
|
157 |
+
# if input_video is None:
|
158 |
|
159 |
+
# if not male:
|
160 |
+
# stf_pipeline = STFPipeline()
|
161 |
+
# else:
|
162 |
+
# stf_pipeline = STFPipeline(template_video_path="/home/user/app/stf/TEMP/Cam2_2309071202_0012_Natural_Looped.mp4",
|
163 |
+
# config_path="/home/user/app/stf/TEMP/front_config_v3.json",
|
164 |
+
# checkpoint_path="/home/user/app/stf/TEMP/0157.pth",
|
165 |
+
# )
|
166 |
+
|
167 |
+
# if input_video is None:
|
168 |
+
# #audio_path = elevenlabs_pipeline.generate_voice(text, voice)
|
169 |
+
# audio_path = gpu_wrapped_elevenlabs_pipeline_generate_voice(text, voice)
|
170 |
+
# #driving_video_path = stf_pipeline.execute(audio_path)
|
171 |
+
# driving_video_path = gpu_wrapped_stf_pipeline_execute(audio_path)
|
172 |
+
# else:
|
173 |
+
# driving_video_path = input_video
|
174 |
+
# os.makedirs("animations",exist_ok=True)
|
175 |
+
# audio_path = osp.join("animations", Path(input_video).stem+".wav")
|
176 |
+
# extract_audio(driving_video_path, audio_path)
|
177 |
+
|
178 |
+
|
179 |
+
# #output_path, crop_output_path = gradio_pipeline.execute_video(
|
180 |
+
# output_path, crop_output_path = gpu_wrapped_execute_video(
|
181 |
+
# input_image_path=image_path,
|
182 |
+
# input_video_path=driving_video_path,
|
183 |
+
# # input_driving_video_pickle_path=None,
|
184 |
+
# flag_do_crop_input=flag_do_crop,
|
185 |
+
# flag_remap_input=flag_remap,
|
186 |
+
# flag_relative_input=flag_relative,
|
187 |
+
# # driving_multiplier=1.0,
|
188 |
+
# # flag_stitching=False,
|
189 |
+
# # flag_crop_driving_video_input=flag_crop_driving_video,
|
190 |
+
# # scale=2.3,
|
191 |
+
# # vx_ratio=0.0,
|
192 |
+
# # vy_ratio=-0.125,
|
193 |
+
# # scale_crop_driving_video=2.2,
|
194 |
+
# # vx_ratio_crop_driving_video=0.0,
|
195 |
+
# # vy_ratio_crop_driving_video=-0.1,
|
196 |
+
# # tab_selection=None,
|
197 |
+
# audio_path=audio_path
|
198 |
+
# )
|
199 |
|
200 |
+
# return output_path, crop_output_path
|
201 |
|
202 |
|
203 |
|
204 |
|
205 |
+
# ###### 테스트중 ######
|
206 |
|
207 |
|
208 |
+
# stf_pipeline = STFPipeline()
|
209 |
+
|
210 |
+
# with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
211 |
+
# stf_button = gr.Button("stf test", variant="primary")
|
212 |
+
# stf_button.click(
|
213 |
+
# fn=gpu_wrapped_stf_pipeline_execute,
|
214 |
+
# inputs=[
|
215 |
+
# audio_path
|
216 |
+
# ],
|
217 |
+
# outputs=[driving_video_path]
|
218 |
+
# )
|
219 |
|
220 |
+
# ###### 테스트중 ######
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
221 |
|
222 |
|
223 |
# with gr.Blocks(theme=gr.themes.Soft()) as demo:
|