Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -12,6 +12,22 @@ model_configs = {
|
|
12 |
'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}
|
13 |
}
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
# Initialize model globally
|
16 |
def initialize_model():
|
17 |
encoder = 'vitl'
|
@@ -42,21 +58,21 @@ MODEL = initialize_model()
|
|
42 |
@spaces.GPU
|
43 |
def process_image(input_image):
|
44 |
"""
|
45 |
-
Process the input image and return depth maps
|
46 |
"""
|
47 |
if input_image is None:
|
48 |
-
return None, None
|
49 |
|
50 |
# Move model to GPU for processing
|
51 |
MODEL.to('cuda')
|
52 |
MODEL.eval()
|
53 |
|
54 |
-
# Convert from RGB to BGR
|
55 |
-
|
56 |
|
57 |
with torch.no_grad():
|
58 |
# Get depth map
|
59 |
-
depth = MODEL.infer_image(
|
60 |
|
61 |
# Normalize depth for visualization (0-255)
|
62 |
depth_normalized = ((depth - depth.min()) / (depth.max() - depth.min()) * 255).astype(np.uint8)
|
@@ -68,16 +84,19 @@ def process_image(input_image):
|
|
68 |
# Move model back to CPU after processing
|
69 |
MODEL.to('cpu')
|
70 |
|
71 |
-
|
|
|
|
|
|
|
72 |
|
73 |
@spaces.GPU
|
74 |
def gradio_interface(input_img):
|
75 |
try:
|
76 |
-
depth_raw, depth_colored = process_image(input_img)
|
77 |
-
return [input_img, depth_raw, depth_colored]
|
78 |
except Exception as e:
|
79 |
print(f"Error processing image: {str(e)}")
|
80 |
-
return [input_img, None, None]
|
81 |
|
82 |
# Define interface
|
83 |
iface = gr.Interface(
|
@@ -86,10 +105,11 @@ iface = gr.Interface(
|
|
86 |
outputs=[
|
87 |
gr.Image(label="Original Image"),
|
88 |
gr.Image(label="Raw Depth Map"),
|
89 |
-
gr.Image(label="Colored Depth Map")
|
|
|
90 |
],
|
91 |
-
title="Depth Estimation",
|
92 |
-
description="Upload an image to generate its depth map.",
|
93 |
examples=["image.jpg"]
|
94 |
)
|
95 |
|
|
|
12 |
'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}
|
13 |
}
|
14 |
|
15 |
+
def get_image_intensity(img, gamma_correction=1.0):
|
16 |
+
"""
|
17 |
+
Extract intensity map from an image using HSV color space
|
18 |
+
"""
|
19 |
+
# Convert to HSV color space
|
20 |
+
result = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
|
21 |
+
# Extract Value channel (intensity)
|
22 |
+
result = result[:, :, 2].astype(np.float32) / 255.0
|
23 |
+
# Apply gamma correction
|
24 |
+
result = result ** gamma_correction
|
25 |
+
# Convert back to 0-255 range
|
26 |
+
result = (result * 255.0).clip(0, 255).astype(np.uint8)
|
27 |
+
# Convert to RGB (still grayscale but in RGB format)
|
28 |
+
result = cv2.cvtColor(result, cv2.COLOR_GRAY2RGB)
|
29 |
+
return result
|
30 |
+
|
31 |
# Initialize model globally
|
32 |
def initialize_model():
|
33 |
encoder = 'vitl'
|
|
|
58 |
@spaces.GPU
|
59 |
def process_image(input_image):
|
60 |
"""
|
61 |
+
Process the input image and return depth maps and intensity map
|
62 |
"""
|
63 |
if input_image is None:
|
64 |
+
return None, None, None
|
65 |
|
66 |
# Move model to GPU for processing
|
67 |
MODEL.to('cuda')
|
68 |
MODEL.eval()
|
69 |
|
70 |
+
# Convert from RGB to BGR for depth processing
|
71 |
+
input_bgr = cv2.cvtColor(np.array(input_image), cv2.COLOR_RGB2BGR)
|
72 |
|
73 |
with torch.no_grad():
|
74 |
# Get depth map
|
75 |
+
depth = MODEL.infer_image(input_bgr)
|
76 |
|
77 |
# Normalize depth for visualization (0-255)
|
78 |
depth_normalized = ((depth - depth.min()) / (depth.max() - depth.min()) * 255).astype(np.uint8)
|
|
|
84 |
# Move model back to CPU after processing
|
85 |
MODEL.to('cpu')
|
86 |
|
87 |
+
# Get intensity map
|
88 |
+
intensity_map = get_image_intensity(np.array(input_image), gamma_correction=1.0)
|
89 |
+
|
90 |
+
return depth_normalized, depth_colormap, intensity_map
|
91 |
|
92 |
@spaces.GPU
|
93 |
def gradio_interface(input_img):
|
94 |
try:
|
95 |
+
depth_raw, depth_colored, intensity = process_image(input_img)
|
96 |
+
return [input_img, depth_raw, depth_colored, intensity]
|
97 |
except Exception as e:
|
98 |
print(f"Error processing image: {str(e)}")
|
99 |
+
return [input_img, None, None, None]
|
100 |
|
101 |
# Define interface
|
102 |
iface = gr.Interface(
|
|
|
105 |
outputs=[
|
106 |
gr.Image(label="Original Image"),
|
107 |
gr.Image(label="Raw Depth Map"),
|
108 |
+
gr.Image(label="Colored Depth Map"),
|
109 |
+
gr.Image(label="Intensity Map")
|
110 |
],
|
111 |
+
title="Depth and Intensity Estimation",
|
112 |
+
description="Upload an image to generate its depth map and intensity map.",
|
113 |
examples=["image.jpg"]
|
114 |
)
|
115 |
|