ostapagon commited on
Commit
ab602e8
·
1 Parent(s): 82b4450

Add some notes

Browse files
Files changed (2) hide show
  1. app.py +1 -0
  2. demo/mast3r_demo.py +8 -9
app.py CHANGED
@@ -30,6 +30,7 @@ if __name__ == '__main__':
30
  <li>MASt3R is used to obtain the initial point cloud and camera parameters.</li>
31
  <li>3DGS is then trained on the results from MASt3R to refine the 3D scene representation.</li>
32
  </ol>
 
33
  <p style="font-size: 16px;">For a full version of this pipeline, please visit the repository at:</p>
34
  <a href="https://github.com/nerlfield/wild-gaussian-splatting" target="_blank" style="font-size: 16px; text-decoration: none;">nerlfield/wild-gaussian-splatting</a>
35
  </div>
 
30
  <li>MASt3R is used to obtain the initial point cloud and camera parameters.</li>
31
  <li>3DGS is then trained on the results from MASt3R to refine the 3D scene representation.</li>
32
  </ol>
33
+ <p style="font-size: 16px;">Note: After a page reload, any generated MASt3R datasets in the 3DGS tab will be deleted.</p>
34
  <p style="font-size: 16px;">For a full version of this pipeline, please visit the repository at:</p>
35
  <a href="https://github.com/nerlfield/wild-gaussian-splatting" target="_blank" style="font-size: 16px; text-decoration: none;">nerlfield/wild-gaussian-splatting</a>
36
  </div>
demo/mast3r_demo.py CHANGED
@@ -274,7 +274,7 @@ def mast3r_demo_tab():
274
  <div style="padding: 10px; border-radius: 5px; margin-bottom: 10px;">
275
  <h3>Instructions for MASt3R Demo</h3>
276
  <ul style="text-align: left; color: #333;">
277
- <li>Upload images. It is recommended to use no more than 10-12 images to avoid exceeding the 3-minute runtime limit for zeroGPU dynamic resources.</li>
278
  <li>Press the "Run" button to start the process.</li>
279
  <li>Once the stage is finished and the point cloud with cameras is visible below, switch to the 3DGS tab and follow the instructions there.</li>
280
  </ul>
@@ -312,34 +312,33 @@ def mast3r_demo_tab():
312
  turtle_images = [os.path.join(turtle_folder, file) for file in os.listdir(turtle_folder) if file.endswith('.jpg')]
313
  puma_images = [os.path.join(puma_folder, file) for file in os.listdir(puma_folder)[:12] if file.endswith('.jpg')]
314
 
315
-
316
  examples = gradio.Examples(
317
  examples=[
318
  [
319
- puma_images[0],
320
  None,
321
  1.5, 0.0, 0.2, True, True, False,
322
- puma_images,
323
  ]
324
  ],
325
  inputs=[snapshot, scene, min_conf_thr, matching_conf_thr, cam_size, as_pointcloud, shared_intrinsics, clean_depth, inputfiles],
326
  outputs=[scene, outmodel],
327
  fn=get_reconstructed_scene,
328
- run_on_click=True,
329
  )
330
  examples = gradio.Examples(
331
  examples=[
332
  [
333
- turtle_images[0],
334
  None,
335
  1.5, 0.0, 0.2, True, True, False,
336
- turtle_images,
337
  ]
338
  ],
339
  inputs=[snapshot, scene, min_conf_thr, matching_conf_thr, cam_size, as_pointcloud, shared_intrinsics, clean_depth, inputfiles],
340
  outputs=[scene, outmodel],
341
  fn=get_reconstructed_scene,
342
- run_on_click=True,
343
  )
344
  examples = gradio.Examples(
345
  examples=[
@@ -353,7 +352,7 @@ def mast3r_demo_tab():
353
  inputs=[snapshot, scene, min_conf_thr, matching_conf_thr, cam_size, as_pointcloud, shared_intrinsics, clean_depth, inputfiles],
354
  outputs=[scene, outmodel],
355
  fn=get_reconstructed_scene,
356
- run_on_click=True,
357
  )
358
 
359
 
 
274
  <div style="padding: 10px; border-radius: 5px; margin-bottom: 10px;">
275
  <h3>Instructions for MASt3R Demo</h3>
276
  <ul style="text-align: left; color: #333;">
277
+ <li>Upload images. It is recommended to use no more than 7-10 images to avoid exceeding the 3-minute runtime limit for zeroGPU dynamic resources.</li>
278
  <li>Press the "Run" button to start the process.</li>
279
  <li>Once the stage is finished and the point cloud with cameras is visible below, switch to the 3DGS tab and follow the instructions there.</li>
280
  </ul>
 
312
  turtle_images = [os.path.join(turtle_folder, file) for file in os.listdir(turtle_folder) if file.endswith('.jpg')]
313
  puma_images = [os.path.join(puma_folder, file) for file in os.listdir(puma_folder)[:12] if file.endswith('.jpg')]
314
 
 
315
  examples = gradio.Examples(
316
  examples=[
317
  [
318
+ turtle_images[0],
319
  None,
320
  1.5, 0.0, 0.2, True, True, False,
321
+ turtle_images,
322
  ]
323
  ],
324
  inputs=[snapshot, scene, min_conf_thr, matching_conf_thr, cam_size, as_pointcloud, shared_intrinsics, clean_depth, inputfiles],
325
  outputs=[scene, outmodel],
326
  fn=get_reconstructed_scene,
327
+ run_on_click=False,
328
  )
329
  examples = gradio.Examples(
330
  examples=[
331
  [
332
+ puma_images[0],
333
  None,
334
  1.5, 0.0, 0.2, True, True, False,
335
+ puma_images,
336
  ]
337
  ],
338
  inputs=[snapshot, scene, min_conf_thr, matching_conf_thr, cam_size, as_pointcloud, shared_intrinsics, clean_depth, inputfiles],
339
  outputs=[scene, outmodel],
340
  fn=get_reconstructed_scene,
341
+ run_on_click=False,
342
  )
343
  examples = gradio.Examples(
344
  examples=[
 
352
  inputs=[snapshot, scene, min_conf_thr, matching_conf_thr, cam_size, as_pointcloud, shared_intrinsics, clean_depth, inputfiles],
353
  outputs=[scene, outmodel],
354
  fn=get_reconstructed_scene,
355
+ run_on_click=False,
356
  )
357
 
358