fffiloni commited on
Commit
68c6b17
·
verified ·
1 Parent(s): b048b56

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -10
app.py CHANGED
@@ -83,9 +83,16 @@ def preprocess_video_in(video_path):
83
 
84
  # Release the video capture object
85
  cap.release()
 
 
 
 
 
 
 
86
 
87
  # 'image' is the first frame extracted from video_in
88
- return first_frame, gr.State([]), gr.State([]), first_frame, first_frame, output_dir, None, None, gr.update(open=False)
89
 
90
  def get_point(point_type, tracking_points, trackings_input_label, first_frame_path, evt: gr.SelectData):
91
  print(f"You selected {evt.value} at {evt.index} from {evt.target}")
@@ -220,7 +227,7 @@ def load_model(checkpoint):
220
 
221
 
222
 
223
- def sam_process(input_first_frame_image, checkpoint, tracking_points, trackings_input_label, video_frames_dir, progress=gr.Progress(track_tqdm=True)):
224
  # 1. We need to preprocess the video and store frames in the right directory
225
  # — Penser à utiliser un ID unique pour le dossier
226
 
@@ -233,12 +240,7 @@ def sam_process(input_first_frame_image, checkpoint, tracking_points, trackings_
233
  video_dir = video_frames_dir
234
 
235
  # scan all the JPEG frame names in this directory
236
- frame_names = [
237
- p for p in os.listdir(video_dir)
238
- if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"]
239
- ]
240
- frame_names.sort(key=lambda p: int(os.path.splitext(p)[0]))
241
-
242
 
243
  inference_state = predictor.init_state(video_path=video_dir)
244
 
@@ -357,6 +359,7 @@ with gr.Blocks() as demo:
357
  tracking_points = gr.State([])
358
  trackings_input_label = gr.State([])
359
  video_frames_dir = gr.State()
 
360
  stored_inference_state = gr.State()
361
  stored_frame_names = gr.State()
362
  with gr.Column():
@@ -412,7 +415,7 @@ with gr.Blocks() as demo:
412
  video_in.upload(
413
  fn = preprocess_video_in,
414
  inputs = [video_in],
415
- outputs = [first_frame_path, tracking_points, trackings_input_label, input_first_frame_image, points_map, video_frames_dir, stored_inference_state, stored_frame_names, video_in_drawer],
416
  queue = False
417
  )
418
 
@@ -425,7 +428,7 @@ with gr.Blocks() as demo:
425
 
426
  submit_btn.click(
427
  fn = sam_process,
428
- inputs = [input_first_frame_image, checkpoint, tracking_points, trackings_input_label, video_frames_dir],
429
  outputs = [output_result, stored_frame_names, stored_inference_state]
430
  )
431
 
 
83
 
84
  # Release the video capture object
85
  cap.release()
86
+
87
+ # scan all the JPEG frame names in this directory
88
+ scanned_frames = [
89
+ p for p in os.listdir(output_dir)
90
+ if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"]
91
+ ]
92
+ scanned_frames.sort(key=lambda p: int(os.path.splitext(p)[0]))
93
 
94
  # 'image' is the first frame extracted from video_in
95
+ return first_frame, gr.State([]), gr.State([]), first_frame, first_frame, output_dir, scanned_frames, None, None, gr.update(open=False)
96
 
97
  def get_point(point_type, tracking_points, trackings_input_label, first_frame_path, evt: gr.SelectData):
98
  print(f"You selected {evt.value} at {evt.index} from {evt.target}")
 
227
 
228
 
229
 
230
+ def sam_process(input_first_frame_image, checkpoint, tracking_points, trackings_input_label, video_frames_dir, scanned_frames, progress=gr.Progress(track_tqdm=True)):
231
  # 1. We need to preprocess the video and store frames in the right directory
232
  # — Penser à utiliser un ID unique pour le dossier
233
 
 
240
  video_dir = video_frames_dir
241
 
242
  # scan all the JPEG frame names in this directory
243
+ frame_names = scanned_frames
 
 
 
 
 
244
 
245
  inference_state = predictor.init_state(video_path=video_dir)
246
 
 
359
  tracking_points = gr.State([])
360
  trackings_input_label = gr.State([])
361
  video_frames_dir = gr.State()
362
+ scanned_frames = gr.State()
363
  stored_inference_state = gr.State()
364
  stored_frame_names = gr.State()
365
  with gr.Column():
 
415
  video_in.upload(
416
  fn = preprocess_video_in,
417
  inputs = [video_in],
418
+ outputs = [first_frame_path, tracking_points, trackings_input_label, input_first_frame_image, points_map, video_frames_dir, scanned_frames, stored_inference_state, stored_frame_names, video_in_drawer],
419
  queue = False
420
  )
421
 
 
428
 
429
  submit_btn.click(
430
  fn = sam_process,
431
+ inputs = [input_first_frame_image, checkpoint, tracking_points, trackings_input_label, video_frames_dir, scanned_frames],
432
  outputs = [output_result, stored_frame_names, stored_inference_state]
433
  )
434