Krokodilpirat commited on
Commit
4423b71
·
verified ·
1 Parent(s): 46294a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -11,6 +11,7 @@ from utils.dc_utils import read_video_frames, save_video
11
  from huggingface_hub import hf_hub_download
12
 
13
  # Examples for the Gradio Demo
 
14
  examples = [
15
  ['assets/example_videos/davis_rollercoaster.mp4', -1, -1, 1280, False, False, 0],
16
  ['assets/example_videos/Tokyo-Walk_rgb.mp4', -1, -1, 1280, False, False, 0],
@@ -62,7 +63,7 @@ def infer_video_depth(
62
  stitch: bool = False,
63
  grayscale: bool = False,
64
  blur: float = 0.0,
65
- *, # Ab hier folgen keyword-only Parameter:
66
  output_dir: str = './outputs',
67
  input_size: int = 518,
68
  ):
@@ -96,11 +97,10 @@ def infer_video_depth(
96
  depth_vis = np.stack([depth_norm] * 3, axis=-1)
97
  else:
98
  cmap = cm.get_cmap("inferno")
99
- # cmap returns RGBA; we use only the first 3 channels and scale to 255
100
  depth_vis = (cmap(depth_norm / 255.0)[..., :3] * 255).astype(np.uint8)
101
  # Apply Gaussian blur if requested (blur factor > 0)
102
  if blur > 0:
103
- kernel_size = int(blur * 20) * 2 + 1 # ensures odd kernel size
104
  depth_vis = cv2.GaussianBlur(depth_vis, (kernel_size, kernel_size), 0)
105
  # Concatenate side-by-side: RGB frame on the left, processed depth on the right
106
  stitched = cv2.hconcat([rgb_frame, depth_vis])
@@ -123,8 +123,8 @@ def construct_demo():
123
 
124
  with gr.Row(equal_height=True):
125
  with gr.Column(scale=1):
126
- # Verwende hier source="upload" für das Datei-Upload-Feld
127
- input_video = gr.Video(label="Input Video", source="upload")
128
  with gr.Column(scale=2):
129
  with gr.Row(equal_height=True):
130
  processed_video = gr.Video(label="Preprocessed Video", interactive=False, autoplay=True, loop=True, show_share_button=True, scale=5)
@@ -149,7 +149,8 @@ def construct_demo():
149
  inputs=[input_video, max_len, target_fps, max_res, stitch_option, grayscale_option, blur_slider],
150
  outputs=[processed_video, depth_vis_video, stitched_video],
151
  fn=infer_video_depth,
152
- cache_examples="lazy",
 
153
  )
154
 
155
  generate_btn.click(
 
11
  from huggingface_hub import hf_hub_download
12
 
13
  # Examples for the Gradio Demo
14
+ # Hier wurden die zusätzlichen Parameter (stitch, grayscale, blur) mit Standardwerten ergänzt.
15
  examples = [
16
  ['assets/example_videos/davis_rollercoaster.mp4', -1, -1, 1280, False, False, 0],
17
  ['assets/example_videos/Tokyo-Walk_rgb.mp4', -1, -1, 1280, False, False, 0],
 
63
  stitch: bool = False,
64
  grayscale: bool = False,
65
  blur: float = 0.0,
66
+ *, # Keyword-only parameters folgen ab hier:
67
  output_dir: str = './outputs',
68
  input_size: int = 518,
69
  ):
 
97
  depth_vis = np.stack([depth_norm] * 3, axis=-1)
98
  else:
99
  cmap = cm.get_cmap("inferno")
 
100
  depth_vis = (cmap(depth_norm / 255.0)[..., :3] * 255).astype(np.uint8)
101
  # Apply Gaussian blur if requested (blur factor > 0)
102
  if blur > 0:
103
+ kernel_size = int(blur * 20) * 2 + 1 # ensures an odd kernel size
104
  depth_vis = cv2.GaussianBlur(depth_vis, (kernel_size, kernel_size), 0)
105
  # Concatenate side-by-side: RGB frame on the left, processed depth on the right
106
  stitched = cv2.hconcat([rgb_frame, depth_vis])
 
123
 
124
  with gr.Row(equal_height=True):
125
  with gr.Column(scale=1):
126
+ # Hier verwenden wir den Video-Component ohne den 'source'-Parameter.
127
+ input_video = gr.Video(label="Input Video")
128
  with gr.Column(scale=2):
129
  with gr.Row(equal_height=True):
130
  processed_video = gr.Video(label="Preprocessed Video", interactive=False, autoplay=True, loop=True, show_share_button=True, scale=5)
 
149
  inputs=[input_video, max_len, target_fps, max_res, stitch_option, grayscale_option, blur_slider],
150
  outputs=[processed_video, depth_vis_video, stitched_video],
151
  fn=infer_video_depth,
152
+ cache_examples=True,
153
+ cache_mode="lazy",
154
  )
155
 
156
  generate_btn.click(