Harisreedhar commited on
Commit
cf144f1
β€’
1 Parent(s): 85ce599

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -4
app.py CHANGED
@@ -13,6 +13,7 @@ import onnxruntime
13
  import numpy as np
14
  import gradio as gr
15
  from tqdm import tqdm
 
16
  from moviepy.editor import VideoFileClip
17
 
18
  from nsfw_detector import get_nsfw_detector
@@ -262,17 +263,59 @@ def process(
262
  torch.cuda.empty_cache()
263
 
264
  split_preds = split_list_by_lengths(preds, num_faces_per_frame)
 
265
  split_aimgs = split_list_by_lengths(aimgs, num_faces_per_frame)
 
266
  split_matrs = split_list_by_lengths(matrs, num_faces_per_frame)
 
267
 
268
  yield "### \n βŒ› Post-processing...", *ui_before()
269
- for idx, frame_img in tqdm(enumerate(image_sequence), total=len(image_sequence), desc="Post-Processing"):
270
  whole_img_path = frame_img
271
  whole_img = cv2.imread(whole_img_path)
272
- for p, a, m in zip(split_preds[idx], split_aimgs[idx], split_matrs[idx]):
273
- whole_img = paste_to_whole(p, a, m, whole_img, laplacian_blend=enable_laplacian_blend, crop_mask=(crop_top,crop_bott,crop_left,crop_right))
274
  cv2.imwrite(whole_img_path, whole_img)
275
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
 
277
  ## ------------------------------ IMAGE ------------------------------
278
 
@@ -621,7 +664,7 @@ with gr.Blocks(css=css) as interface:
621
 
622
  with gr.Group():
623
  input_type = gr.Radio(
624
- ["Image", "Video"],
625
  label="Target Type",
626
  value="Video",
627
  )
 
13
  import numpy as np
14
  import gradio as gr
15
  from tqdm import tqdm
16
+ import concurrent.futures
17
  from moviepy.editor import VideoFileClip
18
 
19
  from nsfw_detector import get_nsfw_detector
 
263
  torch.cuda.empty_cache()
264
 
265
  split_preds = split_list_by_lengths(preds, num_faces_per_frame)
266
+ del preds
267
  split_aimgs = split_list_by_lengths(aimgs, num_faces_per_frame)
268
+ del aimgs
269
  split_matrs = split_list_by_lengths(matrs, num_faces_per_frame)
270
+ del matrs
271
 
272
  yield "### \n βŒ› Post-processing...", *ui_before()
273
+ def process_frame(frame_idx, frame_img, split_preds, split_aimgs, split_matrs, enable_laplacian_blend, crop_top, crop_bott, crop_left, crop_right):
274
  whole_img_path = frame_img
275
  whole_img = cv2.imread(whole_img_path)
276
+ for p, a, m in zip(split_preds[frame_idx], split_aimgs[frame_idx], split_matrs[frame_idx]):
277
+ whole_img = paste_to_whole(p, a, m, whole_img, laplacian_blend=enable_laplacian_blend, crop_mask=(crop_top, crop_bott, crop_left, crop_right))
278
  cv2.imwrite(whole_img_path, whole_img)
279
 
280
+ def optimize_processing(image_sequence, split_preds, split_aimgs, split_matrs, enable_laplacian_blend, crop_top, crop_bott, crop_left, crop_right):
281
+ with concurrent.futures.ThreadPoolExecutor() as executor:
282
+ futures = []
283
+ for idx, frame_img in enumerate(image_sequence):
284
+ future = executor.submit(
285
+ process_frame,
286
+ idx,
287
+ frame_img,
288
+ split_preds,
289
+ split_aimgs,
290
+ split_matrs,
291
+ enable_laplacian_blend,
292
+ crop_top,
293
+ crop_bott,
294
+ crop_left,
295
+ crop_right
296
+ )
297
+ futures.append(future)
298
+
299
+ for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures), desc="Post-Processing"):
300
+ try:
301
+ result = future.result()
302
+ except Exception as e:
303
+ print(f"An error occurred: {e}")
304
+
305
+ # Usage:
306
+ optimize_processing(
307
+ image_sequence,
308
+ split_preds,
309
+ split_aimgs,
310
+ split_matrs,
311
+ enable_laplacian_blend,
312
+ crop_top,
313
+ crop_bott,
314
+ crop_left,
315
+ crop_right
316
+ )
317
+
318
+
319
 
320
  ## ------------------------------ IMAGE ------------------------------
321
 
 
664
 
665
  with gr.Group():
666
  input_type = gr.Radio(
667
+ ["Image", "Video", "Directory"],
668
  label="Target Type",
669
  value="Video",
670
  )