OlegCh commited on
Commit
f7da830
·
verified ·
1 Parent(s): 5ca2c54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -91
app.py CHANGED
@@ -1,82 +1,4 @@
1
- import gradio as gr
2
- import cv2
3
- import numpy as np
4
- import os
5
- from PIL import Image
6
- import spaces
7
- import torch
8
- import torch.nn.functional as F
9
- from torchvision.transforms import Compose
10
- import tempfile
11
- from gradio_imageslider import ImageSlider
12
-
13
- from depth_anything.dpt import DepthAnything
14
- from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
15
-
16
- css = """
17
- #img-display-container {
18
- max-height: 100vh;
19
- }
20
- #img-display-input {
21
- max-height: 80vh;
22
- }
23
- #img-display-output {
24
- max-height: 80vh;
25
- }
26
- """
27
- DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
28
- encoder = 'vitl' # can also be 'vitb' or 'vitl'
29
- model = DepthAnything.from_pretrained(f"LiheYoung/depth_anything_{encoder}14").to(DEVICE).eval()
30
-
31
- title = "# Depth Anything"
32
- description = """Official demo for **Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data**.
33
- Please refer to our [paper](https://arxiv.org/abs/2401.10891), [project page](https://depth-anything.github.io), or [github](https://github.com/LiheYoung/Depth-Anything) for more details."""
34
-
35
- transform = Compose([
36
- Resize(
37
- width=518,
38
- height=518,
39
- resize_target=False,
40
- keep_aspect_ratio=True,
41
- ensure_multiple_of=14,
42
- resize_method='lower_bound',
43
- image_interpolation_method=cv2.INTER_CUBIC,
44
- ),
45
- NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
46
- PrepareForNet(),
47
- ])
48
-
49
- @spaces.GPU
50
- @torch.no_grad()
51
- def predict_depth(model, image):
52
- return model(image)
53
-
54
-
55
- with gr.Blocks(css=css) as demo:
56
- gr.Markdown(title)
57
- gr.Markdown(description)
58
- gr.Markdown("### Depth Prediction demo")
59
- gr.Markdown("You can slide the output to compare the depth prediction with input image")
60
-
61
- with gr.Row():
62
- input_image = gr.Image(label="Input Image", type='numpy', elem_id='img-display-input')
63
- depth_image_slider = ImageSlider(label="Depth Map with Slider View", elem_id='img-display-output', position=0.5,)
64
- raw_file = gr.File(label="16-bit raw depth (can be considered as disparity)")
65
- submit = gr.Button("Submit")
66
-
67
- def on_submit(image):
68
- original_image = image.copy()
69
-
70
- h, w = image.shape[:2]
71
-
72
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) / 255.0
73
- image = transform({'image': image})['image']
74
- image = torch.from_numpy(image).unsqueeze(0).to(DEVICE)
75
-
76
- depth = predict_depth(model, image)
77
- depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
78
-
79
- raw_depth = Image.fromarray(depth.cpu().numpy().astype('uint16'))
80
  tmp = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
81
  raw_depth.save(tmp.name)
82
 
@@ -84,15 +6,4 @@ with gr.Blocks(css=css) as demo:
84
  depth = depth.cpu().numpy().astype(np.uint8)
85
  colored_depth = cv2.applyColorMap(depth, cv2.COLORMAP_INFERNO)[:, :, ::-1]
86
 
87
- return [(original_image, colored_depth), tmp.name]
88
-
89
- submit.click(on_submit, inputs=[input_image], outputs=[depth_image_slider, raw_file])
90
-
91
- example_files = os.listdir('examples')
92
- example_files.sort()
93
- example_files = [os.path.join('examples', filename) for filename in example_files]
94
- examples = gr.Examples(examples=example_files, inputs=[input_image], outputs=[depth_image_slider, raw_file], fn=on_submit, cache_examples=True)
95
-
96
-
97
- if __name__ == '__main__':
98
- demo.queue().launch()
 
1
+ raw_depth = Image.fromarray(depth.cpu().numpy().astype('uint16'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  tmp = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
3
  raw_depth.save(tmp.name)
4
 
 
6
  depth = depth.cpu().numpy().astype(np.uint8)
7
  colored_depth = cv2.applyColorMap(depth, cv2.COLORMAP_INFERNO)[:, :, ::-1]
8
 
9
+ return [(original_image, colored_depth), tmp.name]