JerryFan011018 commited on
Commit
9d081dc
·
verified ·
1 Parent(s): 15bdcbb

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +210 -0
app.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023-2024, Zexin He
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import os
17
+ from PIL import Image
18
+ import numpy as np
19
+ import gradio as gr
20
+
21
+
22
+ def assert_input_image(input_image):
23
+ if input_image is None:
24
+ raise gr.Error("No image selected or uploaded!")
25
+
26
+ def prepare_working_dir():
27
+ import tempfile
28
+ working_dir = tempfile.TemporaryDirectory()
29
+ return working_dir
30
+
31
+ def init_preprocessor():
32
+ from openlrm.utils.preprocess import Preprocessor
33
+ global preprocessor
34
+ preprocessor = Preprocessor()
35
+
36
+ def preprocess_fn(image_in: np.ndarray, remove_bg: bool, recenter: bool, working_dir):
37
+ image_raw = os.path.join(working_dir.name, "raw.png")
38
+ with Image.fromarray(image_in) as img:
39
+ img.save(image_raw)
40
+ image_out = os.path.join(working_dir.name, "rembg.png")
41
+ success = preprocessor.preprocess(image_path=image_raw, save_path=image_out, rmbg=remove_bg, recenter=recenter)
42
+ assert success, f"Failed under preprocess_fn!"
43
+ return image_out
44
+
45
+
46
+ def demo_openlrm(infer_impl):
47
+
48
+ def core_fn(image: str, source_cam_dist: float, working_dir):
49
+ dump_video_path = os.path.join(working_dir.name, "output.mp4")
50
+ dump_mesh_path = os.path.join(working_dir.name, "output.ply")
51
+ infer_impl(
52
+ image_path=image,
53
+ source_cam_dist=source_cam_dist,
54
+ export_video=True,
55
+ export_mesh=False,
56
+ dump_video_path=dump_video_path,
57
+ dump_mesh_path=dump_mesh_path,
58
+ )
59
+ return dump_video_path
60
+
61
+ def example_fn(image: np.ndarray):
62
+ from gradio.utils import get_cache_folder
63
+ working_dir = get_cache_folder()
64
+ image = preprocess_fn(
65
+ image_in=image,
66
+ remove_bg=True,
67
+ recenter=True,
68
+ working_dir=working_dir,
69
+ )
70
+ video = core_fn(
71
+ image=image,
72
+ source_cam_dist=2.0,
73
+ working_dir=working_dir,
74
+ )
75
+ return image, video
76
+
77
+
78
+ _TITLE = '''OpenLRM: Open-Source Large Reconstruction Models'''
79
+
80
+ _DESCRIPTION = '''
81
+ <div>
82
+ <a style="display:inline-block" href='https://github.com/3DTopia/OpenLRM'><img src='https://img.shields.io/github/stars/3DTopia/OpenLRM?style=social'/></a>
83
+ <a style="display:inline-block; margin-left: .5em" href="https://huggingface.co/zxhezexin"><img src='https://img.shields.io/badge/Model-Weights-blue'/></a>
84
+ </div>
85
+ OpenLRM is an open-source implementation of Large Reconstruction Models.
86
+
87
+ <strong>Image-to-3D in 10 seconds with A100!</strong>
88
+
89
+ <strong>Disclaimer:</strong> This demo uses `openlrm-mix-base-1.1` model with 288x288 rendering resolution here for a quick demonstration.
90
+ '''
91
+
92
+ with gr.Blocks(analytics_enabled=False) as demo:
93
+
94
+ # HEADERS
95
+ with gr.Row():
96
+ with gr.Column(scale=1):
97
+ gr.Markdown('# ' + _TITLE)
98
+ with gr.Row():
99
+ gr.Markdown(_DESCRIPTION)
100
+
101
+ # DISPLAY
102
+ with gr.Row():
103
+
104
+ with gr.Column(variant='panel', scale=1):
105
+ with gr.Tabs(elem_id="openlrm_input_image"):
106
+ with gr.TabItem('Input Image'):
107
+ with gr.Row():
108
+ input_image = gr.Image(label="Input Image", image_mode="RGBA", width="auto", sources="upload", type="numpy", elem_id="content_image")
109
+
110
+ with gr.Column(variant='panel', scale=1):
111
+ with gr.Tabs(elem_id="openlrm_processed_image"):
112
+ with gr.TabItem('Processed Image'):
113
+ with gr.Row():
114
+ processed_image = gr.Image(label="Processed Image", image_mode="RGBA", type="filepath", elem_id="processed_image", width="auto", interactive=False)
115
+
116
+ with gr.Column(variant='panel', scale=1):
117
+ with gr.Tabs(elem_id="openlrm_render_video"):
118
+ with gr.TabItem('Rendered Video'):
119
+ with gr.Row():
120
+ output_video = gr.Video(label="Rendered Video", format="mp4", width="auto", autoplay=True)
121
+
122
+ # SETTING
123
+ with gr.Row():
124
+ with gr.Column(variant='panel', scale=1):
125
+ with gr.Tabs(elem_id="openlrm_attrs"):
126
+ with gr.TabItem('Settings'):
127
+ with gr.Column(variant='panel'):
128
+ gr.Markdown(
129
+ """
130
+ <strong>Best Practice</strong>:
131
+ Centered objects in reasonable sizes. Try adjusting source camera distances.
132
+ """
133
+ )
134
+ checkbox_rembg = gr.Checkbox(True, label='Remove background')
135
+ checkbox_recenter = gr.Checkbox(True, label='Recenter the object')
136
+ slider_cam_dist = gr.Slider(1.0, 3.5, value=2.0, step=0.1, label="Source Camera Distance")
137
+ submit = gr.Button('Generate', elem_id="openlrm_generate", variant='primary')
138
+
139
+ # EXAMPLES
140
+ with gr.Row():
141
+ examples = [
142
+ ['assets/sample_input/owl.png'],
143
+ ['assets/sample_input/building.png'],
144
+ ['assets/sample_input/mailbox.png'],
145
+ ['assets/sample_input/fire.png'],
146
+ ['assets/sample_input/girl.png'],
147
+ ['assets/sample_input/lamp.png'],
148
+ ['assets/sample_input/hydrant.png'],
149
+ ['assets/sample_input/hotdogs.png'],
150
+ ['assets/sample_input/traffic.png'],
151
+ ['assets/sample_input/ceramic.png'],
152
+ ]
153
+ gr.Examples(
154
+ examples=examples,
155
+ inputs=[input_image],
156
+ outputs=[processed_image, output_video],
157
+ fn=example_fn,
158
+ cache_examples=bool(os.getenv('SPACE_ID')),
159
+ examples_per_page=20,
160
+ )
161
+
162
+ working_dir = gr.State()
163
+ submit.click(
164
+ fn=assert_input_image,
165
+ inputs=[input_image],
166
+ queue=False,
167
+ ).success(
168
+ fn=prepare_working_dir,
169
+ outputs=[working_dir],
170
+ queue=False,
171
+ ).success(
172
+ fn=preprocess_fn,
173
+ inputs=[input_image, checkbox_rembg, checkbox_recenter, working_dir],
174
+ outputs=[processed_image],
175
+ ).success(
176
+ fn=core_fn,
177
+ inputs=[processed_image, slider_cam_dist, working_dir],
178
+ outputs=[output_video],
179
+ )
180
+
181
+ demo.queue()
182
+ demo.launch()
183
+
184
+
185
+ def launch_gradio_app():
186
+
187
+ os.environ.update({
188
+ "APP_ENABLED": "1",
189
+ "APP_MODEL_NAME": "zxhezexin/openlrm-mix-base-1.1",
190
+ "APP_INFER": "./configs/infer-gradio.yaml",
191
+ "APP_TYPE": "infer.lrm",
192
+ "NUMBA_THREADING_LAYER": 'omp',
193
+ })
194
+
195
+ from openlrm.runners import REGISTRY_RUNNERS
196
+ from openlrm.runners.infer.base_inferrer import Inferrer
197
+ InferrerClass : Inferrer = REGISTRY_RUNNERS[os.getenv("APP_TYPE")]
198
+ with InferrerClass() as inferrer:
199
+ init_preprocessor()
200
+ if not bool(os.getenv('SPACE_ID')):
201
+ from openlrm.utils.proxy import no_proxy
202
+ demo = no_proxy(demo_openlrm)
203
+ else:
204
+ demo = demo_openlrm
205
+ demo(infer_impl=inferrer.infer_single)
206
+
207
+
208
+ if __name__ == '__main__':
209
+
210
+ launch_gradio_app()