OlegCh commited on
Commit
65a3200
·
verified ·
1 Parent(s): f7da830

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +111 -6
app.py CHANGED
@@ -1,9 +1,114 @@
1
- raw_depth = Image.fromarray(depth.cpu().numpy().astype('uint16'))
2
- tmp = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  raw_depth.save(tmp.name)
4
 
5
- depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
6
- depth = depth.cpu().numpy().astype(np.uint8)
7
- colored_depth = cv2.applyColorMap(depth, cv2.COLORMAP_INFERNO)[:, :, ::-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- return [(original_image, colored_depth), tmp.name]
 
1
+ import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+ import os
5
+ from PIL import Image
6
+ import spaces
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from torchvision.transforms import Compose
10
+ import tempfile
11
+ from gradio_imageslider import ImageSlider
12
+
13
+ from depth_anything.dpt import DepthAnything
14
+ from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
15
+
16
+ css = """
17
+ #img-display-container {
18
+ max-height: 100vh;
19
+ }
20
+ #img-display-input {
21
+ max-height: 80vh;
22
+ }
23
+ #img-display-output {
24
+ max-height: 80vh;
25
+ }
26
+ """
27
+ DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
28
+ encoder = 'vitl' # can also be 'vitb' or 'vitl'
29
+ model = DepthAnything.from_pretrained(f"LiheYoung/depth_anything_{encoder}14").to(DEVICE).eval()
30
+
31
+ title = "# Depth Anything"
32
+ description = """Official demo for **Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data**.
33
+ Please refer to our [paper](https://arxiv.org/abs/2401.10891), [project page](https://depth-anything.github.io), or [github](https://github.com/LiheYoung/Depth-Anything) for more details."""
34
+
35
+ transform = Compose([
36
+ Resize(
37
+ width=518,
38
+ height=518,
39
+ resize_target=False,
40
+ keep_aspect_ratio=True,
41
+ ensure_multiple_of=14,
42
+ resize_method='lower_bound',
43
+ image_interpolation_method=cv2.INTER_CUBIC,
44
+ ),
45
+ NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
46
+ PrepareForNet(),
47
+ ])
48
+
49
+ @spaces.GPU
50
+ @torch.no_grad()
51
+ def predict_depth(model, image):
52
+ return model(image)
53
+
54
+
55
+ with gr.Blocks(css=css) as demo:
56
+ gr.Markdown(title)
57
+ gr.Markdown(description)
58
+ gr.Markdown("### Depth Prediction demo")
59
+ gr.Markdown("You can slide the output to compare the depth prediction with input image")
60
+
61
+ with gr.Row():
62
+ input_image = gr.Image(label="Input Image", type='numpy', elem_id='img-display-input')
63
+ depth_image_slider = ImageSlider(label="Depth Map with Slider View", elem_id='img-display-output', position=0.5,)
64
+ raw_file = gr.File(label="Normalized 16-bit depth")
65
+ submit = gr.Button("Submit")
66
+
67
+ def on_submit(image):
68
+ original_image = image.copy()
69
+
70
+ h, w = image.shape[:2]
71
+
72
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) / 255.0
73
+ image = transform({'image': image})['image']
74
+ image = torch.from_numpy(image).unsqueeze(0).to(DEVICE)
75
+
76
+ depth = predict_depth(model, image)
77
+ depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
78
+
79
+ # disp1 = depth.cpu().numpy()
80
+ # range1 = np.minimum (disp1.max() / (disp1.min() + 0.001), 100.0) # clamping the farthest depth to 100x of the nearest
81
+ # max1 = disp1.max()
82
+ # min1 = max1 / range1
83
+
84
+ # depth1 = 1 / np.maximum (disp1, min1)
85
+ # depth1 = (depth1 - depth1.min()) / (depth1.max() - depth1.min()) * 65535.0
86
+ # raw_depth = Image.fromarray(depth1.astype('uint16'))
87
+ # tmp = tempfile.NamedTemporaryFile(suffix=f'_{min1:.3f}_{max1:.3f}_.png', delete=False)
88
+ # raw_depth.save(tmp.name)
89
+
90
+ depth1 = (depth - depth.min()) / (depth.max() - depth.min()) * 65535.0
91
+ raw_depth = Image.fromarray(depth1.cpu().numpy().astype('uint16'))
92
+ tmp = tempfile.NamedTemporaryFile(suffix=f'_{depth.min():.6f}_{depth.max():.6f}_.png', delete=False)
93
  raw_depth.save(tmp.name)
94
 
95
+ depth2 = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
96
+ depth2 = depth2.cpu().numpy().astype(np.uint8)
97
+ colored_depth = cv2.applyColorMap(depth2, cv2.COLORMAP_INFERNO)[:, :, ::-1]
98
+
99
+ return [(original_image, colored_depth), tmp.name]
100
+
101
+ submit.click(on_submit, inputs=[input_image], outputs=[depth_image_slider, raw_file])
102
+
103
+ example_files = os.listdir('examples')
104
+ example_files.sort()
105
+ example_files = [os.path.join('examples', filename) for filename in example_files]
106
+ examples = gr.Examples(examples=example_files, inputs=[input_image], outputs=[depth_image_slider, raw_file], fn=on_submit, cache_examples=True)
107
+
108
+
109
+ if __name__ == '__main__':
110
+ demo.queue().launch()
111
+
112
+
113
+
114