update to new version
Browse files- app.py +27 -91
- assets/imgs/AdobeStock_286429091.jpeg +2 -2
- assets/imgs/AdobeStock_331358641.jpeg +2 -2
- assets/imgs/i4.png +2 -2
- requirements.txt +11 -16
app.py
CHANGED
@@ -3,24 +3,23 @@ import os
|
|
3 |
try:
|
4 |
import perspective2d
|
5 |
except:
|
6 |
-
os.system(f"pip install git+https://github.com/jinlinyi/PerspectiveFields.git@
|
7 |
|
8 |
|
9 |
import gradio as gr
|
10 |
import cv2
|
11 |
import copy
|
|
|
|
|
|
|
|
|
12 |
import torch
|
13 |
from PIL import Image, ImageDraw
|
14 |
from glob import glob
|
15 |
-
import numpy as np
|
16 |
-
import os.path as osp
|
17 |
-
from detectron2.config import get_cfg
|
18 |
-
from detectron2.data.detection_utils import read_image
|
19 |
-
from perspective2d.utils.predictor import VisualizationDemo
|
20 |
-
import perspective2d.modeling # noqa
|
21 |
-
from perspective2d.config import get_perspective2d_cfg_defaults
|
22 |
-
from perspective2d.utils import draw_from_r_p_f_cx_cy
|
23 |
|
|
|
|
|
|
|
24 |
|
25 |
|
26 |
|
@@ -32,25 +31,6 @@ description = ""
|
|
32 |
article = ""
|
33 |
|
34 |
|
35 |
-
|
36 |
-
|
37 |
-
def setup_cfg(args):
|
38 |
-
cfgs = []
|
39 |
-
configs = args['config_file'].split('#')
|
40 |
-
weights_id = args['opts'].index('MODEL.WEIGHTS') + 1
|
41 |
-
weights = args['opts'][weights_id].split('#')
|
42 |
-
for i, conf in enumerate(configs):
|
43 |
-
if len(conf) != 0:
|
44 |
-
tmp_opts = copy.deepcopy(args['opts'])
|
45 |
-
tmp_opts[weights_id] = weights[i]
|
46 |
-
cfg = get_cfg()
|
47 |
-
get_perspective2d_cfg_defaults(cfg)
|
48 |
-
cfg.merge_from_file(conf)
|
49 |
-
cfg.merge_from_list(tmp_opts)
|
50 |
-
cfg.freeze()
|
51 |
-
cfgs.append(cfg)
|
52 |
-
return cfgs
|
53 |
-
|
54 |
def resize_fix_aspect_ratio(img, field, target_width=None, target_height=None):
|
55 |
height = img.shape[0]
|
56 |
width = img.shape[1]
|
@@ -80,36 +60,26 @@ def resize_fix_aspect_ratio(img, field, target_width=None, target_height=None):
|
|
80 |
return img, field
|
81 |
|
82 |
|
83 |
-
def inference(
|
84 |
if model_type is None:
|
85 |
return None, ""
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
# img = read_image(image_path, format="BGR")
|
90 |
-
img = img[..., ::-1] # rgb->bgr
|
91 |
-
pred = demo.run_on_image(img)
|
92 |
field = {
|
93 |
'up': pred['pred_gravity_original'].cpu().detach(),
|
94 |
'lati': pred['pred_latitude_original'].cpu().detach(),
|
95 |
}
|
96 |
-
|
97 |
if not model_zoo[model_type]['param']:
|
98 |
-
pred_vis =
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
)
|
104 |
param = "Not Implemented"
|
105 |
else:
|
106 |
-
if 'pred_general_vfov' not in pred.keys():
|
107 |
-
pred['pred_general_vfov'] = pred['pred_vfov']
|
108 |
-
if 'pred_rel_cx' not in pred.keys():
|
109 |
-
pred['pred_rel_cx'] = torch.FloatTensor([0])
|
110 |
-
if 'pred_rel_cy' not in pred.keys():
|
111 |
-
pred['pred_rel_cy'] = torch.FloatTensor([0])
|
112 |
-
|
113 |
r_p_f_rad = np.radians(
|
114 |
[
|
115 |
pred['pred_roll'].cpu().item(),
|
@@ -121,15 +91,21 @@ def inference(img, model_type):
|
|
121 |
pred['pred_rel_cx'].cpu().item(),
|
122 |
pred['pred_rel_cy'].cpu().item(),
|
123 |
]
|
124 |
-
param = f"roll {pred['pred_roll'].cpu().item() :.2f}\npitch {pred['pred_pitch'].cpu().item() :.2f}\
|
125 |
param += f"principal point {pred['pred_rel_cx'].cpu().item() :.2f} {pred['pred_rel_cy'].cpu().item() :.2f}"
|
126 |
pred_vis = draw_from_r_p_f_cx_cy(
|
127 |
-
|
128 |
*r_p_f_rad,
|
129 |
*cx_cy,
|
130 |
'rad',
|
131 |
up_color=(0,1,0),
|
132 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
return Image.fromarray(pred_vis), param
|
134 |
|
135 |
examples = []
|
@@ -138,46 +114,6 @@ for img_name in glob('assets/imgs/*.*g'):
|
|
138 |
print(examples)
|
139 |
|
140 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
141 |
-
model_zoo = {
|
142 |
-
|
143 |
-
'NEW:Paramnet-360Cities-edina-centered': {
|
144 |
-
'weights': ['https://www.dropbox.com/s/z2dja70bgy007su/paramnet_360cities_edina_rpf.pth'],
|
145 |
-
'opts': ['MODEL.WEIGHTS', 'models/paramnet_360cities_edina_rpf.pth', 'MODEL.DEVICE', device,],
|
146 |
-
'config_file': 'models/paramnet_360cities_edina_rpf.yaml',
|
147 |
-
'param': True,
|
148 |
-
},
|
149 |
-
|
150 |
-
'NEW:Paramnet-360Cities-edina-uncentered': {
|
151 |
-
'weights': ['https://www.dropbox.com/s/nt29e1pi83mm1va/paramnet_360cities_edina_rpfpp.pth'],
|
152 |
-
'opts': ['MODEL.WEIGHTS', 'models/paramnet_360cities_edina_rpfpp.pth', 'MODEL.DEVICE', device,],
|
153 |
-
'config_file': 'models/paramnet_360cities_edina_rpfpp.yaml',
|
154 |
-
'param': True,
|
155 |
-
},
|
156 |
-
|
157 |
-
'PersNet-360Cities': {
|
158 |
-
'weights': ['https://www.dropbox.com/s/czqrepqe7x70b7y/cvpr2023.pth'],
|
159 |
-
'opts': ['MODEL.WEIGHTS', 'models/cvpr2023.pth', 'MODEL.DEVICE', device,],
|
160 |
-
'config_file': 'models/cvpr2023.yaml',
|
161 |
-
'param': False,
|
162 |
-
},
|
163 |
-
'PersNet_Paramnet-GSV-uncentered': {
|
164 |
-
'weights': ['https://www.dropbox.com/s/ufdadxigewakzlz/paramnet_gsv_rpfpp.pth'],
|
165 |
-
'opts': ['MODEL.WEIGHTS', 'models/paramnet_gsv_rpfpp.pth', 'MODEL.DEVICE', device,],
|
166 |
-
'config_file': 'models/paramnet_gsv_rpfpp.yaml',
|
167 |
-
'param': True,
|
168 |
-
},
|
169 |
-
# trained on GSV dataset, predicts Perspective Fields + camera parameters (roll, pitch, fov), assuming centered principal point
|
170 |
-
'PersNet_Paramnet-GSV-centered': {
|
171 |
-
'weights': ['https://www.dropbox.com/s/g6xwbgnkggapyeu/paramnet_gsv_rpf.pth'],
|
172 |
-
'opts': ['MODEL.WEIGHTS', 'models/paramnet_gsv_rpf.pth', 'MODEL.DEVICE', device,],
|
173 |
-
'config_file': 'models/paramnet_gsv_rpf.yaml',
|
174 |
-
'param': True,
|
175 |
-
},
|
176 |
-
}
|
177 |
-
for model_id in model_zoo:
|
178 |
-
html = model_zoo[model_id]['weights'][0]
|
179 |
-
if not os.path.exists(os.path.join('models', html.split('/')[-1])):
|
180 |
-
os.system(f"wget -P models/ {html}")
|
181 |
|
182 |
info = """Select model\n"""
|
183 |
gr.Interface(
|
|
|
3 |
try:
|
4 |
import perspective2d
|
5 |
except:
|
6 |
+
os.system(f"pip install git+https://github.com/jinlinyi/PerspectiveFields.git@v1.0.0")
|
7 |
|
8 |
|
9 |
import gradio as gr
|
10 |
import cv2
|
11 |
import copy
|
12 |
+
import numpy as np
|
13 |
+
import os.path as osp
|
14 |
+
from datetime import datetime
|
15 |
+
|
16 |
import torch
|
17 |
from PIL import Image, ImageDraw
|
18 |
from glob import glob
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
from perspective2d import PerspectiveFields
|
21 |
+
from perspective2d.utils import draw_perspective_fields, draw_from_r_p_f_cx_cy
|
22 |
+
from perspective2d.perspectivefields import model_zoo
|
23 |
|
24 |
|
25 |
|
|
|
31 |
article = ""
|
32 |
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
def resize_fix_aspect_ratio(img, field, target_width=None, target_height=None):
|
35 |
height = img.shape[0]
|
36 |
width = img.shape[1]
|
|
|
60 |
return img, field
|
61 |
|
62 |
|
63 |
+
def inference(img_rgb, model_type):
|
64 |
if model_type is None:
|
65 |
return None, ""
|
66 |
+
pf_model = PerspectiveFields(model_type).eval().cuda()
|
67 |
+
pred = pf_model.inference(img_bgr=img_rgb[...,::-1])
|
68 |
+
img_h = img_rgb.shape[0]
|
|
|
|
|
|
|
69 |
field = {
|
70 |
'up': pred['pred_gravity_original'].cpu().detach(),
|
71 |
'lati': pred['pred_latitude_original'].cpu().detach(),
|
72 |
}
|
73 |
+
img_rgb, field = resize_fix_aspect_ratio(img_rgb, field, 640)
|
74 |
if not model_zoo[model_type]['param']:
|
75 |
+
pred_vis = draw_perspective_fields(
|
76 |
+
img_rgb,
|
77 |
+
field['up'],
|
78 |
+
torch.deg2rad(field['lati']),
|
79 |
+
color=(0,1,0),
|
80 |
+
)
|
81 |
param = "Not Implemented"
|
82 |
else:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
r_p_f_rad = np.radians(
|
84 |
[
|
85 |
pred['pred_roll'].cpu().item(),
|
|
|
91 |
pred['pred_rel_cx'].cpu().item(),
|
92 |
pred['pred_rel_cy'].cpu().item(),
|
93 |
]
|
94 |
+
param = f"roll {pred['pred_roll'].cpu().item() :.2f}\npitch {pred['pred_pitch'].cpu().item() :.2f}\nvertical fov {pred['pred_general_vfov'].cpu().item() :.2f}\nfocal_length {pred['pred_rel_focal'].cpu().item()*img_h :.2f}\n"
|
95 |
param += f"principal point {pred['pred_rel_cx'].cpu().item() :.2f} {pred['pred_rel_cy'].cpu().item() :.2f}"
|
96 |
pred_vis = draw_from_r_p_f_cx_cy(
|
97 |
+
img_rgb,
|
98 |
*r_p_f_rad,
|
99 |
*cx_cy,
|
100 |
'rad',
|
101 |
up_color=(0,1,0),
|
102 |
)
|
103 |
+
print(f"""time {datetime.now().strftime("%H:%M:%S")}
|
104 |
+
img.shape {img_rgb.shape}
|
105 |
+
model_type {model_type}
|
106 |
+
param {param}
|
107 |
+
"""
|
108 |
+
)
|
109 |
return Image.fromarray(pred_vis), param
|
110 |
|
111 |
examples = []
|
|
|
114 |
print(examples)
|
115 |
|
116 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
info = """Select model\n"""
|
119 |
gr.Interface(
|
assets/imgs/AdobeStock_286429091.jpeg
CHANGED
Git LFS Details
|
Git LFS Details
|
assets/imgs/AdobeStock_331358641.jpeg
CHANGED
Git LFS Details
|
Git LFS Details
|
assets/imgs/i4.png
CHANGED
Git LFS Details
|
Git LFS Details
|
requirements.txt
CHANGED
@@ -3,22 +3,17 @@
|
|
3 |
|
4 |
torch==1.11.0+cu113
|
5 |
torchvision==0.12.0+cu113
|
6 |
-
mmcv==2.0.0rc4
|
7 |
albumentations==1.3.0
|
8 |
-
fvcore==0.1.5.post20221221
|
9 |
gradio==3.29.0
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
opencv_python_headless==4.7.0.72
|
17 |
-
Pillow==9.5.0
|
18 |
-
pycocotools==2.0.6
|
19 |
pyequilib==0.3.0
|
20 |
-
|
21 |
-
scipy
|
22 |
-
setuptools
|
23 |
-
timm
|
24 |
-
|
|
|
3 |
|
4 |
torch==1.11.0+cu113
|
5 |
torchvision==0.12.0+cu113
|
|
|
6 |
albumentations==1.3.0
|
|
|
7 |
gradio==3.29.0
|
8 |
+
albumentations
|
9 |
+
matplotlib
|
10 |
+
numpy
|
11 |
+
omegaconf
|
12 |
+
opencv-contrib-python
|
13 |
+
pillow
|
|
|
|
|
|
|
14 |
pyequilib==0.3.0
|
15 |
+
scikit-learn
|
16 |
+
scipy
|
17 |
+
setuptools
|
18 |
+
timm
|
19 |
+
yacs
|