Spaces:
Running
on
L40S
Running
on
L40S
Upload folder using huggingface_hub
Browse files
app.py
CHANGED
@@ -34,20 +34,80 @@ from PIL import Image
|
|
34 |
from einops import rearrange
|
35 |
import pandas as pd
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
from infer import seed_everything, save_gif
|
38 |
from infer import Text2Image, Removebg, Image2Views, Views2Mesh, GifRenderer
|
39 |
from third_party.check import check_bake_available
|
40 |
|
41 |
try:
|
42 |
from third_party.mesh_baker import MeshBaker
|
|
|
43 |
BAKE_AVAILEBLE = True
|
44 |
except Exception as err:
|
45 |
print(err)
|
46 |
print("import baking related fail, run without baking")
|
47 |
-
check_bake_available()
|
48 |
BAKE_AVAILEBLE = False
|
49 |
|
50 |
-
|
51 |
warnings.simplefilter('ignore', category=UserWarning)
|
52 |
warnings.simplefilter('ignore', category=FutureWarning)
|
53 |
warnings.simplefilter('ignore', category=DeprecationWarning)
|
@@ -57,17 +117,15 @@ parser.add_argument("--use_lite", default=False, action="store_true")
|
|
57 |
parser.add_argument("--mv23d_cfg_path", default="./svrm/configs/svrm.yaml", type=str)
|
58 |
parser.add_argument("--mv23d_ckt_path", default="weights/svrm/svrm.safetensors", type=str)
|
59 |
parser.add_argument("--text2image_path", default="weights/hunyuanDiT", type=str)
|
60 |
-
parser.add_argument("--save_memory", default=False
|
61 |
parser.add_argument("--device", default="cuda:0", type=str)
|
62 |
args = parser.parse_args()
|
63 |
|
|
|
64 |
################################################################
|
65 |
# initial setting
|
66 |
################################################################
|
67 |
|
68 |
-
CONST_PORT = 8080
|
69 |
-
CONST_MAX_QUEUE = 1
|
70 |
-
CONST_SERVER = '0.0.0.0'
|
71 |
|
72 |
CONST_HEADER = '''
|
73 |
<h2><a href='https://github.com/tencent/Hunyuan3D-1' target='_blank'><b>Tencent Hunyuan3D-1.0: A Unified Framework for Text-to-3D and Image-to-3D Generation</b></a></h2>
|
@@ -127,36 +185,36 @@ worker_v23 = Views2Mesh(
|
|
127 |
)
|
128 |
worker_gif = GifRenderer(args.device)
|
129 |
|
130 |
-
|
131 |
if BAKE_AVAILEBLE:
|
132 |
worker_baker = MeshBaker()
|
133 |
|
134 |
|
135 |
### functional modules
|
136 |
|
137 |
-
def
|
138 |
os.makedirs('./outputs/app_output', exist_ok=True)
|
139 |
exists = set(int(_) for _ in os.listdir('./outputs/app_output') if not _.startswith("."))
|
140 |
-
if len(exists) ==
|
141 |
-
|
142 |
-
|
143 |
-
|
|
|
|
|
|
|
144 |
save_folder = f'./outputs/app_output/{cur_id}'
|
145 |
os.makedirs(save_folder, exist_ok=True)
|
|
|
|
|
146 |
|
|
|
|
|
147 |
dst = save_folder + '/img.png'
|
148 |
-
|
149 |
-
if not text:
|
150 |
-
if image is None:
|
151 |
-
return dst, save_folder
|
152 |
-
raise gr.Error("Upload image or provide text ...")
|
153 |
-
image.save(dst)
|
154 |
-
return dst, save_folder
|
155 |
-
|
156 |
image = worker_t2i(text, seed, step)
|
157 |
image.save(dst)
|
158 |
-
|
159 |
-
|
|
|
|
|
160 |
|
161 |
def stage_1_xbg(image, save_folder, force_remove):
|
162 |
if isinstance(image, str):
|
@@ -165,7 +223,8 @@ def stage_1_xbg(image, save_folder, force_remove):
|
|
165 |
rgba = worker_xbg(image, force=force_remove)
|
166 |
rgba.save(dst)
|
167 |
return dst
|
168 |
-
|
|
|
169 |
def stage_2_i2v(image, seed, step, save_folder):
|
170 |
if isinstance(image, str):
|
171 |
image = Image.open(image)
|
@@ -180,6 +239,7 @@ def stage_2_i2v(image, seed, step, save_folder):
|
|
180 |
show_img = Image.fromarray(show_img)
|
181 |
return views_img, cond_img, show_img
|
182 |
|
|
|
183 |
def stage_3_v23(
|
184 |
views_pil,
|
185 |
cond_pil,
|
@@ -202,32 +262,32 @@ def stage_3_v23(
|
|
202 |
obj_dst = save_folder + '/mesh_vertex_colors.obj' # gradio just only can show vertex shading
|
203 |
return obj_dst, glb_dst
|
204 |
|
205 |
-
|
|
|
206 |
if color == "texture" and bake:
|
207 |
-
obj_dst = worker_baker(save_folder)
|
208 |
glb_dst = obj_dst.replace(".obj", ".glb")
|
209 |
return glb_dst
|
210 |
else:
|
211 |
return None
|
212 |
|
|
|
213 |
def stage_4_gif(save_folder, color, bake, render):
|
214 |
if not render: return None
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
obj_dst = save_folder + '/view_0/bake/mesh.obj'
|
219 |
-
elif os.path.exists(save_folder + '/mesh.obj'):
|
220 |
-
obj_dst = save_folder + '/mesh.obj'
|
221 |
-
else:
|
222 |
-
print(save_folder)
|
223 |
-
raise FileNotFoundError("mesh obj file not found")
|
224 |
gif_dst = obj_dst.replace(".obj", ".gif")
|
225 |
worker_gif(obj_dst, gif_dst_path=gif_dst)
|
226 |
return gif_dst
|
227 |
|
228 |
|
229 |
def check_image_available(image):
|
230 |
-
if image
|
|
|
|
|
|
|
|
|
231 |
data = np.array(image)
|
232 |
alpha_channel = data[:, :, 3]
|
233 |
unique_alpha_values = np.unique(alpha_channel)
|
@@ -243,11 +303,29 @@ def check_image_available(image):
|
|
243 |
else:
|
244 |
raise Exception("Image Error")
|
245 |
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
251 |
|
252 |
# ===============================================================
|
253 |
# gradio display
|
@@ -267,35 +345,113 @@ with gr.Blocks() as demo:
|
|
267 |
with gr.Column():
|
268 |
text = gr.TextArea('一只黑���相间的熊猫在白色背景上居中坐着,呈现出卡通风格和可爱氛围。',
|
269 |
lines=3, max_lines=20, label='Input text')
|
270 |
-
with gr.Row():
|
271 |
-
textgen_color = gr.Radio(choices=["vertex", "texture"], label="Color", value="texture")
|
272 |
-
with gr.Row():
|
273 |
-
textgen_render = gr.Checkbox(label="Do Rendering", value=True, interactive=True)
|
274 |
-
if BAKE_AVAILEBLE:
|
275 |
-
textgen_bake = gr.Checkbox(label="Do Baking", value=True, interactive=True)
|
276 |
-
else:
|
277 |
-
textgen_bake = gr.Checkbox(label="Do Baking", value=False, interactive=False)
|
278 |
-
|
279 |
-
textgen_color.change(
|
280 |
-
fn=update_bake_render,
|
281 |
-
inputs=textgen_color,
|
282 |
-
outputs=[textgen_bake, textgen_render]
|
283 |
-
)
|
284 |
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
294 |
with gr.Row():
|
295 |
textgen_submit = gr.Button("Generate", variant="primary")
|
296 |
|
297 |
with gr.Row():
|
298 |
gr.Examples(examples=example_ts, inputs=[text], label="Text examples", examples_per_page=10)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
299 |
|
300 |
### Image iutput region
|
301 |
|
@@ -305,39 +461,112 @@ with gr.Blocks() as demo:
|
|
305 |
image_mode="RGBA", sources="upload", interactive=True)
|
306 |
with gr.Row():
|
307 |
alert_message = gr.Markdown("") # for warning
|
308 |
-
with gr.Row():
|
309 |
-
imggen_color = gr.Radio(choices=["vertex", "texture"], label="Color", value="texture")
|
310 |
-
with gr.Row():
|
311 |
-
imggen_removebg = gr.Checkbox(label="Remove Background", value=True, interactive=True)
|
312 |
-
imggen_render = gr.Checkbox(label="Do Rendering", value=True, interactive=True)
|
313 |
-
if BAKE_AVAILEBLE:
|
314 |
-
imggen_bake = gr.Checkbox(label="Do Baking", value=True, interactive=True)
|
315 |
-
else:
|
316 |
-
imggen_bake = gr.Checkbox(label="Do Baking", value=False, interactive=False)
|
317 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
318 |
input_image.change(
|
319 |
fn=check_image_available,
|
320 |
inputs=input_image,
|
321 |
outputs=[alert_message, imggen_removebg]
|
322 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
323 |
imggen_color.change(
|
324 |
-
fn=
|
325 |
inputs=imggen_color,
|
326 |
outputs=[imggen_bake, imggen_render]
|
327 |
)
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
with gr.Row():
|
336 |
imggen_submit = gr.Button("Generate", variant="primary")
|
337 |
|
338 |
with gr.Row():
|
339 |
gr.Examples(examples=example_is, inputs=[input_image],
|
340 |
label="Img examples", examples_per_page=10)
|
|
|
341 |
|
342 |
gr.Markdown(CONST_NOTE)
|
343 |
|
@@ -359,33 +588,32 @@ with gr.Blocks() as demo:
|
|
359 |
interactive=False
|
360 |
)
|
361 |
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
)
|
371 |
-
result_gif = gr.Image(label="GIF", interactive=False)
|
372 |
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
|
|
389 |
|
390 |
with gr.Row():
|
391 |
gr.Markdown(
|
@@ -399,17 +627,23 @@ with gr.Blocks() as demo:
|
|
399 |
# gradio running code
|
400 |
#===============================================================
|
401 |
|
402 |
-
none = gr.State(None)
|
403 |
save_folder = gr.State()
|
404 |
cond_image = gr.State()
|
405 |
views_image = gr.State()
|
406 |
-
text_image = gr.State()
|
407 |
-
|
408 |
|
|
|
|
|
|
|
|
|
|
|
409 |
textgen_submit.click(
|
|
|
|
|
|
|
|
|
410 |
fn=stage_0_t2i,
|
411 |
-
inputs=[text,
|
412 |
-
outputs=[rem_bg_image
|
413 |
).success(
|
414 |
fn=stage_2_i2v,
|
415 |
inputs=[rem_bg_image, textgen_SEED, textgen_STEP, save_folder],
|
@@ -420,7 +654,8 @@ with gr.Blocks() as demo:
|
|
420 |
outputs=[result_3dobj, result_3dglb_texture],
|
421 |
).success(
|
422 |
fn=stage_3p_baking,
|
423 |
-
inputs=[save_folder, textgen_color, textgen_bake
|
|
|
424 |
outputs=[result_3dglb_baked],
|
425 |
).success(
|
426 |
fn=stage_4_gif,
|
@@ -430,12 +665,12 @@ with gr.Blocks() as demo:
|
|
430 |
|
431 |
|
432 |
imggen_submit.click(
|
433 |
-
fn=
|
434 |
-
inputs=[
|
435 |
-
outputs=[
|
436 |
).success(
|
437 |
fn=stage_1_xbg,
|
438 |
-
inputs=[
|
439 |
outputs=[rem_bg_image],
|
440 |
).success(
|
441 |
fn=stage_2_i2v,
|
@@ -447,7 +682,8 @@ with gr.Blocks() as demo:
|
|
447 |
outputs=[result_3dobj, result_3dglb_texture],
|
448 |
).success(
|
449 |
fn=stage_3p_baking,
|
450 |
-
inputs=[save_folder, imggen_color, imggen_bake
|
|
|
451 |
outputs=[result_3dglb_baked],
|
452 |
).success(
|
453 |
fn=stage_4_gif,
|
@@ -458,7 +694,11 @@ with gr.Blocks() as demo:
|
|
458 |
#===============================================================
|
459 |
# start gradio server
|
460 |
#===============================================================
|
|
|
|
|
|
|
461 |
|
462 |
demo.queue(max_size=CONST_MAX_QUEUE)
|
463 |
demo.launch(server_name=CONST_SERVER, server_port=CONST_PORT)
|
464 |
-
|
|
|
|
34 |
from einops import rearrange
|
35 |
import pandas as pd
|
36 |
|
37 |
+
# import sys
|
38 |
+
# import spaces
|
39 |
+
# import subprocess
|
40 |
+
# from huggingface_hub import snapshot_download
|
41 |
+
|
42 |
+
# def install_cuda_toolkit():
|
43 |
+
# # CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"
|
44 |
+
# CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run"
|
45 |
+
# CUDA_TOOLKIT_FILE = "/tmp/%s" % os.path.basename(CUDA_TOOLKIT_URL)
|
46 |
+
# subprocess.call(["wget", "-q", CUDA_TOOLKIT_URL, "-O", CUDA_TOOLKIT_FILE])
|
47 |
+
# subprocess.call(["chmod", "+x", CUDA_TOOLKIT_FILE])
|
48 |
+
# subprocess.call([CUDA_TOOLKIT_FILE, "--silent", "--toolkit"])
|
49 |
+
|
50 |
+
# os.environ["CUDA_HOME"] = "/usr/local/cuda"
|
51 |
+
# os.environ["PATH"] = "%s/bin:%s" % (os.environ["CUDA_HOME"], os.environ["PATH"])
|
52 |
+
# os.environ["LD_LIBRARY_PATH"] = "%s/lib:%s" % (
|
53 |
+
# os.environ["CUDA_HOME"],
|
54 |
+
# "" if "LD_LIBRARY_PATH" not in os.environ else os.environ["LD_LIBRARY_PATH"],
|
55 |
+
# )
|
56 |
+
# # Fix: arch_list[-1] += '+PTX'; IndexError: list index out of range
|
57 |
+
# os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6"
|
58 |
+
|
59 |
+
# def install_requirements():
|
60 |
+
# subprocess.check_call([sys.executable, "-m", "pip", "install", "git+https://github.com/NVlabs/nvdiffrast"])
|
61 |
+
# subprocess.check_call([sys.executable, "-m", "pip", "install", "git+https://github.com/facebookresearch/pytorch3d@stable"])
|
62 |
+
|
63 |
+
# def download_models():
|
64 |
+
# os.makedirs("weights", exist_ok=True)
|
65 |
+
# os.makedirs("weights/hunyuanDiT", exist_ok=True)
|
66 |
+
# os.makedirs("third_party/weights/DUSt3R_ViTLarge_BaseDecoder_512_dpt", exist_ok=True)
|
67 |
+
# try:
|
68 |
+
# snapshot_download(
|
69 |
+
# repo_id="tencent/Hunyuan3D-1",
|
70 |
+
# local_dir="./weights",
|
71 |
+
# resume_download=True
|
72 |
+
# )
|
73 |
+
# print("Successfully downloaded Hunyuan3D-1 model")
|
74 |
+
# except Exception as e:
|
75 |
+
# print(f"Error downloading Hunyuan3D-1: {e}")
|
76 |
+
# try:
|
77 |
+
# snapshot_download(
|
78 |
+
# repo_id="Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers-Distilled",
|
79 |
+
# local_dir="./weights/hunyuanDiT",
|
80 |
+
# resume_download=True
|
81 |
+
# )
|
82 |
+
# print("Successfully downloaded HunyuanDiT model")
|
83 |
+
# except Exception as e:
|
84 |
+
# print(f"Error downloading HunyuanDiT: {e}")
|
85 |
+
# try:
|
86 |
+
# snapshot_download(
|
87 |
+
# repo_id="naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt",
|
88 |
+
# local_dir="./third_party/weights/DUSt3R_ViTLarge_BaseDecoder_512_dpt",
|
89 |
+
# resume_download=True
|
90 |
+
# )
|
91 |
+
# print("Successfully downloaded DUSt3R model")
|
92 |
+
# except Exception as e:
|
93 |
+
# print(f"Error downloading DUSt3R: {e}")
|
94 |
+
# install_cuda_toolkit()
|
95 |
+
# install_requirements()
|
96 |
+
# download_models()
|
97 |
+
|
98 |
from infer import seed_everything, save_gif
|
99 |
from infer import Text2Image, Removebg, Image2Views, Views2Mesh, GifRenderer
|
100 |
from third_party.check import check_bake_available
|
101 |
|
102 |
try:
|
103 |
from third_party.mesh_baker import MeshBaker
|
104 |
+
assert check_bake_available()
|
105 |
BAKE_AVAILEBLE = True
|
106 |
except Exception as err:
|
107 |
print(err)
|
108 |
print("import baking related fail, run without baking")
|
|
|
109 |
BAKE_AVAILEBLE = False
|
110 |
|
|
|
111 |
warnings.simplefilter('ignore', category=UserWarning)
|
112 |
warnings.simplefilter('ignore', category=FutureWarning)
|
113 |
warnings.simplefilter('ignore', category=DeprecationWarning)
|
|
|
117 |
parser.add_argument("--mv23d_cfg_path", default="./svrm/configs/svrm.yaml", type=str)
|
118 |
parser.add_argument("--mv23d_ckt_path", default="weights/svrm/svrm.safetensors", type=str)
|
119 |
parser.add_argument("--text2image_path", default="weights/hunyuanDiT", type=str)
|
120 |
+
parser.add_argument("--save_memory", default=False)
|
121 |
parser.add_argument("--device", default="cuda:0", type=str)
|
122 |
args = parser.parse_args()
|
123 |
|
124 |
+
|
125 |
################################################################
|
126 |
# initial setting
|
127 |
################################################################
|
128 |
|
|
|
|
|
|
|
129 |
|
130 |
CONST_HEADER = '''
|
131 |
<h2><a href='https://github.com/tencent/Hunyuan3D-1' target='_blank'><b>Tencent Hunyuan3D-1.0: A Unified Framework for Text-to-3D and Image-to-3D Generation</b></a></h2>
|
|
|
185 |
)
|
186 |
worker_gif = GifRenderer(args.device)
|
187 |
|
|
|
188 |
if BAKE_AVAILEBLE:
|
189 |
worker_baker = MeshBaker()
|
190 |
|
191 |
|
192 |
### functional modules
|
193 |
|
194 |
+
def gen_save_folder(max_size=30):
|
195 |
os.makedirs('./outputs/app_output', exist_ok=True)
|
196 |
exists = set(int(_) for _ in os.listdir('./outputs/app_output') if not _.startswith("."))
|
197 |
+
if len(exists) == max_size:
|
198 |
+
shutil.rmtree(f"./outputs/app_output/0")
|
199 |
+
cur_id = 0
|
200 |
+
else:
|
201 |
+
cur_id = min(set(range(max_size)) - exists)
|
202 |
+
if os.path.exists(f"./outputs/app_output/{(cur_id + 1) % max_size}"):
|
203 |
+
shutil.rmtree(f"./outputs/app_output/{(cur_id + 1) % max_size}")
|
204 |
save_folder = f'./outputs/app_output/{cur_id}'
|
205 |
os.makedirs(save_folder, exist_ok=True)
|
206 |
+
print(f"mkdir {save_folder} suceess !!!")
|
207 |
+
return save_folder
|
208 |
|
209 |
+
@space.GPU(duration=120)
|
210 |
+
def stage_0_t2i(text, seed, step, save_folder):
|
211 |
dst = save_folder + '/img.png'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
212 |
image = worker_t2i(text, seed, step)
|
213 |
image.save(dst)
|
214 |
+
img_nobg = worker_xbg(image, force=True)
|
215 |
+
dst = save_folder + '/img_nobg.png'
|
216 |
+
img_nobg.save(dst)
|
217 |
+
return dst
|
218 |
|
219 |
def stage_1_xbg(image, save_folder, force_remove):
|
220 |
if isinstance(image, str):
|
|
|
223 |
rgba = worker_xbg(image, force=force_remove)
|
224 |
rgba.save(dst)
|
225 |
return dst
|
226 |
+
|
227 |
+
# @space.GPU
|
228 |
def stage_2_i2v(image, seed, step, save_folder):
|
229 |
if isinstance(image, str):
|
230 |
image = Image.open(image)
|
|
|
239 |
show_img = Image.fromarray(show_img)
|
240 |
return views_img, cond_img, show_img
|
241 |
|
242 |
+
# @space.GPU
|
243 |
def stage_3_v23(
|
244 |
views_pil,
|
245 |
cond_pil,
|
|
|
262 |
obj_dst = save_folder + '/mesh_vertex_colors.obj' # gradio just only can show vertex shading
|
263 |
return obj_dst, glb_dst
|
264 |
|
265 |
+
# @space.GPU
|
266 |
+
def stage_3p_baking(save_folder, color, bake, force, front, others, align_times):
|
267 |
if color == "texture" and bake:
|
268 |
+
obj_dst = worker_baker(save_folder, force, front, others, align_times)
|
269 |
glb_dst = obj_dst.replace(".obj", ".glb")
|
270 |
return glb_dst
|
271 |
else:
|
272 |
return None
|
273 |
|
274 |
+
# @space.GPU
|
275 |
def stage_4_gif(save_folder, color, bake, render):
|
276 |
if not render: return None
|
277 |
+
baked_fld_list = sorted(glob(save_folder + '/view_*/bake/mesh.obj'))
|
278 |
+
obj_dst = baked_fld_list[-1] if len(baked_fld_list)>=1 else save_folder+'/mesh.obj'
|
279 |
+
assert os.path.exists(obj_dst), f"{obj_dst} file not found"
|
|
|
|
|
|
|
|
|
|
|
|
|
280 |
gif_dst = obj_dst.replace(".obj", ".gif")
|
281 |
worker_gif(obj_dst, gif_dst_path=gif_dst)
|
282 |
return gif_dst
|
283 |
|
284 |
|
285 |
def check_image_available(image):
|
286 |
+
if image is None:
|
287 |
+
return "Please upload image", gr.update()
|
288 |
+
elif not hasattr(image, 'mode'):
|
289 |
+
return "Not support, please upload other image", gr.update()
|
290 |
+
elif image.mode == "RGBA":
|
291 |
data = np.array(image)
|
292 |
alpha_channel = data[:, :, 3]
|
293 |
unique_alpha_values = np.unique(alpha_channel)
|
|
|
303 |
else:
|
304 |
raise Exception("Image Error")
|
305 |
|
306 |
+
|
307 |
+
def update_mode(mode):
|
308 |
+
color_change = {
|
309 |
+
'Quick': gr.update(value='vertex'),
|
310 |
+
'Moderate': gr.update(value='texture'),
|
311 |
+
'Appearance': gr.update(value='texture')
|
312 |
+
}[mode]
|
313 |
+
bake_change = {
|
314 |
+
'Quick': gr.update(value=False, interactive=False, visible=False),
|
315 |
+
'Moderate': gr.update(value=False),
|
316 |
+
'Appearance': gr.update(value=BAKE_AVAILEBLE)
|
317 |
+
}[mode]
|
318 |
+
face_change = {
|
319 |
+
'Quick': gr.update(value=120000, maximum=300000),
|
320 |
+
'Moderate': gr.update(value=60000, maximum=300000),
|
321 |
+
'Appearance': gr.update(value=10000, maximum=60000)
|
322 |
+
}[mode]
|
323 |
+
render_change = {
|
324 |
+
'Quick': gr.update(value=False, interactive=False, visible=False),
|
325 |
+
'Moderate': gr.update(value=True),
|
326 |
+
'Appearance': gr.update(value=True)
|
327 |
+
}[mode]
|
328 |
+
return color_change, bake_change, face_change, render_change
|
329 |
|
330 |
# ===============================================================
|
331 |
# gradio display
|
|
|
345 |
with gr.Column():
|
346 |
text = gr.TextArea('一只黑���相间的熊猫在白色背景上居中坐着,呈现出卡通风格和可爱氛围。',
|
347 |
lines=3, max_lines=20, label='Input text')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
348 |
|
349 |
+
textgen_mode = gr.Radio(
|
350 |
+
choices=['Quick', 'Moderate', 'Appearance'],
|
351 |
+
label="Simple settings",
|
352 |
+
value='Appearance',
|
353 |
+
interactive=True
|
354 |
+
)
|
355 |
+
|
356 |
+
with gr.Accordion("Custom settings", open=False):
|
357 |
+
textgen_color = gr.Radio(choices=["vertex", "texture"], label="Color", value="texture")
|
358 |
+
|
359 |
+
with gr.Row():
|
360 |
+
textgen_render = gr.Checkbox(
|
361 |
+
label="Do Rendering",
|
362 |
+
value=True,
|
363 |
+
interactive=True
|
364 |
+
)
|
365 |
+
textgen_bake = gr.Checkbox(
|
366 |
+
label="Do Baking",
|
367 |
+
value=True if BAKE_AVAILEBLE else False,
|
368 |
+
interactive=True if BAKE_AVAILEBLE else False
|
369 |
+
)
|
370 |
+
|
371 |
+
with gr.Row():
|
372 |
+
textgen_seed = gr.Number(value=0, label="T2I seed", precision=0, interactive=True)
|
373 |
+
textgen_SEED = gr.Number(value=0, label="Gen seed", precision=0, interactive=True)
|
374 |
+
|
375 |
+
textgen_step = gr.Slider(
|
376 |
+
value=25,
|
377 |
+
minimum=15,
|
378 |
+
maximum=50,
|
379 |
+
step=1,
|
380 |
+
label="T2I steps",
|
381 |
+
interactive=True
|
382 |
+
)
|
383 |
+
textgen_STEP = gr.Slider(
|
384 |
+
value=50,
|
385 |
+
minimum=20,
|
386 |
+
maximum=80,
|
387 |
+
step=1,
|
388 |
+
label="Gen steps",
|
389 |
+
interactive=True
|
390 |
+
)
|
391 |
+
textgen_max_faces =gr.Slider(
|
392 |
+
value=10000,
|
393 |
+
minimum=2000,
|
394 |
+
maximum=60000,
|
395 |
+
step=1000,
|
396 |
+
label="Face number limit",
|
397 |
+
interactive=True
|
398 |
+
)
|
399 |
+
|
400 |
+
with gr.Accordion("Baking Options", open=False):
|
401 |
+
textgen_force_bake = gr.Checkbox(
|
402 |
+
label="Force (Ignore the degree of matching)",
|
403 |
+
value=False,
|
404 |
+
interactive=True
|
405 |
+
)
|
406 |
+
textgen_front_baking = gr.Radio(
|
407 |
+
choices=['input image', 'multi-view front view', 'auto'],
|
408 |
+
label="Front view baking",
|
409 |
+
value='auto',
|
410 |
+
interactive=True,
|
411 |
+
visible=True
|
412 |
+
)
|
413 |
+
textgen_other_views = gr.CheckboxGroup(
|
414 |
+
choices=['60°', '120°', '180°', '240°', '300°'],
|
415 |
+
label="Other views Baking",
|
416 |
+
value=['180°'],
|
417 |
+
interactive=True,
|
418 |
+
visible=True
|
419 |
+
)
|
420 |
+
textgen_align_times =gr.Slider(
|
421 |
+
value=3,
|
422 |
+
minimum=1,
|
423 |
+
maximum=5,
|
424 |
+
step=1,
|
425 |
+
label="Number of alignment attempts per view",
|
426 |
+
interactive=True
|
427 |
+
)
|
428 |
+
|
429 |
with gr.Row():
|
430 |
textgen_submit = gr.Button("Generate", variant="primary")
|
431 |
|
432 |
with gr.Row():
|
433 |
gr.Examples(examples=example_ts, inputs=[text], label="Text examples", examples_per_page=10)
|
434 |
+
|
435 |
+
|
436 |
+
textgen_mode.change(
|
437 |
+
fn=update_mode,
|
438 |
+
inputs=textgen_mode,
|
439 |
+
outputs=[textgen_color, textgen_bake, textgen_max_faces, textgen_render]
|
440 |
+
)
|
441 |
+
textgen_color.change(
|
442 |
+
fn=lambda x:[
|
443 |
+
gr.update(value=(x=='texture'), interactive=(x=='texture'), visible=(x=='texture')),
|
444 |
+
gr.update(value=(x=='texture'), interactive=(x=='texture'), visible=(x=='texture')),
|
445 |
+
],
|
446 |
+
inputs=textgen_color,
|
447 |
+
outputs=[textgen_bake, textgen_render]
|
448 |
+
)
|
449 |
+
textgen_bake.change(
|
450 |
+
fn= lambda x:[gr.update(visible=x)]*4+[gr.update(value=10000, minimum=2000, maximum=60000 if x else 300000)],
|
451 |
+
inputs=textgen_bake,
|
452 |
+
outputs=[textgen_front_baking, textgen_other_views, textgen_align_times, textgen_force_bake, textgen_max_faces]
|
453 |
+
)
|
454 |
+
|
455 |
|
456 |
### Image iutput region
|
457 |
|
|
|
461 |
image_mode="RGBA", sources="upload", interactive=True)
|
462 |
with gr.Row():
|
463 |
alert_message = gr.Markdown("") # for warning
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
464 |
|
465 |
+
imggen_mode = gr.Radio(
|
466 |
+
choices=['Quick', 'Moderate', 'Appearance'],
|
467 |
+
label="Simple settings",
|
468 |
+
value='Appearance',
|
469 |
+
interactive=True
|
470 |
+
)
|
471 |
+
|
472 |
+
with gr.Accordion("Custom settings", open=False):
|
473 |
+
imggen_color = gr.Radio(choices=["vertex", "texture"], label="Color", value="texture")
|
474 |
+
|
475 |
+
with gr.Row():
|
476 |
+
imggen_removebg = gr.Checkbox(
|
477 |
+
label="Remove Background",
|
478 |
+
value=True,
|
479 |
+
interactive=True
|
480 |
+
)
|
481 |
+
imggen_render = gr.Checkbox(
|
482 |
+
label="Do Rendering",
|
483 |
+
value=True,
|
484 |
+
interactive=True
|
485 |
+
)
|
486 |
+
imggen_bake = gr.Checkbox(
|
487 |
+
label="Do Baking",
|
488 |
+
value=True if BAKE_AVAILEBLE else False,
|
489 |
+
interactive=True if BAKE_AVAILEBLE else False
|
490 |
+
)
|
491 |
+
imggen_SEED = gr.Number(value=0, label="Gen seed", precision=0, interactive=True)
|
492 |
+
|
493 |
+
imggen_STEP = gr.Slider(
|
494 |
+
value=50,
|
495 |
+
minimum=20,
|
496 |
+
maximum=80,
|
497 |
+
step=1,
|
498 |
+
label="Gen steps",
|
499 |
+
interactive=True
|
500 |
+
)
|
501 |
+
imggen_max_faces =gr.Slider(
|
502 |
+
value=10000,
|
503 |
+
minimum=2000,
|
504 |
+
maximum=60000,
|
505 |
+
step=1000,
|
506 |
+
label="Face number limit",
|
507 |
+
interactive=True
|
508 |
+
)
|
509 |
+
|
510 |
+
with gr.Accordion("Baking Options", open=False):
|
511 |
+
imggen_force_bake = gr.Checkbox(
|
512 |
+
label="Force (Ignore the degree of matching)",
|
513 |
+
value=False,
|
514 |
+
interactive=True
|
515 |
+
)
|
516 |
+
imggen_front_baking = gr.Radio(
|
517 |
+
choices=['input image', 'multi-view front view', 'auto'],
|
518 |
+
label="Front view baking",
|
519 |
+
value='auto',
|
520 |
+
interactive=True,
|
521 |
+
visible=True
|
522 |
+
)
|
523 |
+
imggen_other_views = gr.CheckboxGroup(
|
524 |
+
choices=['60°', '120°', '180°', '240°', '300°'],
|
525 |
+
label="Other views Baking",
|
526 |
+
value=['180°'],
|
527 |
+
interactive=True,
|
528 |
+
visible=True
|
529 |
+
)
|
530 |
+
imggen_align_times =gr.Slider(
|
531 |
+
value=3,
|
532 |
+
minimum=1,
|
533 |
+
maximum=5,
|
534 |
+
step=1,
|
535 |
+
label="Number of alignment attempts per view",
|
536 |
+
interactive=True
|
537 |
+
)
|
538 |
+
|
539 |
input_image.change(
|
540 |
fn=check_image_available,
|
541 |
inputs=input_image,
|
542 |
outputs=[alert_message, imggen_removebg]
|
543 |
)
|
544 |
+
|
545 |
+
imggen_mode.change(
|
546 |
+
fn=update_mode,
|
547 |
+
inputs=imggen_mode,
|
548 |
+
outputs=[imggen_color, imggen_bake, imggen_max_faces, imggen_render]
|
549 |
+
)
|
550 |
+
|
551 |
imggen_color.change(
|
552 |
+
fn=lambda x:[gr.update(value=(x=='texture'), interactive=(x=='texture'), visible=(x=='texture'))]*2,
|
553 |
inputs=imggen_color,
|
554 |
outputs=[imggen_bake, imggen_render]
|
555 |
)
|
556 |
+
|
557 |
+
imggen_bake.change(
|
558 |
+
fn= lambda x:[gr.update(visible=x)]*4+[gr.update(value=120000, minimum=2000, maximum=60000 if x else 300000)],
|
559 |
+
inputs=imggen_bake,
|
560 |
+
outputs=[imggen_front_baking, imggen_other_views, imggen_align_times, imggen_force_bake, imggen_max_faces]
|
561 |
+
)
|
562 |
+
|
563 |
with gr.Row():
|
564 |
imggen_submit = gr.Button("Generate", variant="primary")
|
565 |
|
566 |
with gr.Row():
|
567 |
gr.Examples(examples=example_is, inputs=[input_image],
|
568 |
label="Img examples", examples_per_page=10)
|
569 |
+
|
570 |
|
571 |
gr.Markdown(CONST_NOTE)
|
572 |
|
|
|
588 |
interactive=False
|
589 |
)
|
590 |
|
591 |
+
result_3dobj = gr.Model3D(
|
592 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
593 |
+
label="OBJ vertex color",
|
594 |
+
show_label=True,
|
595 |
+
visible=True,
|
596 |
+
camera_position=[90, 90, None],
|
597 |
+
interactive=False
|
598 |
+
)
|
|
|
|
|
599 |
|
600 |
+
result_3dglb_texture = gr.Model3D(
|
601 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
602 |
+
label="GLB texture color",
|
603 |
+
show_label=True,
|
604 |
+
visible=True,
|
605 |
+
camera_position=[90, 90, None],
|
606 |
+
interactive=False)
|
607 |
+
|
608 |
+
result_3dglb_baked = gr.Model3D(
|
609 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
610 |
+
label="GLB baked color",
|
611 |
+
show_label=True,
|
612 |
+
visible=True,
|
613 |
+
camera_position=[90, 90, None],
|
614 |
+
interactive=False)
|
615 |
+
|
616 |
+
result_gif = gr.Image(label="GIF", interactive=False)
|
617 |
|
618 |
with gr.Row():
|
619 |
gr.Markdown(
|
|
|
627 |
# gradio running code
|
628 |
#===============================================================
|
629 |
|
|
|
630 |
save_folder = gr.State()
|
631 |
cond_image = gr.State()
|
632 |
views_image = gr.State()
|
|
|
|
|
633 |
|
634 |
+
def handle_click(save_folder):
|
635 |
+
if save_folder is None:
|
636 |
+
save_folder = gen_save_folder()
|
637 |
+
return save_folder
|
638 |
+
|
639 |
textgen_submit.click(
|
640 |
+
fn=handle_click,
|
641 |
+
inputs=[save_folder],
|
642 |
+
outputs=[save_folder]
|
643 |
+
).success(
|
644 |
fn=stage_0_t2i,
|
645 |
+
inputs=[text, textgen_seed, textgen_step, save_folder],
|
646 |
+
outputs=[rem_bg_image],
|
647 |
).success(
|
648 |
fn=stage_2_i2v,
|
649 |
inputs=[rem_bg_image, textgen_SEED, textgen_STEP, save_folder],
|
|
|
654 |
outputs=[result_3dobj, result_3dglb_texture],
|
655 |
).success(
|
656 |
fn=stage_3p_baking,
|
657 |
+
inputs=[save_folder, textgen_color, textgen_bake,
|
658 |
+
textgen_force_bake, textgen_front_baking, textgen_other_views, textgen_align_times],
|
659 |
outputs=[result_3dglb_baked],
|
660 |
).success(
|
661 |
fn=stage_4_gif,
|
|
|
665 |
|
666 |
|
667 |
imggen_submit.click(
|
668 |
+
fn=handle_click,
|
669 |
+
inputs=[save_folder],
|
670 |
+
outputs=[save_folder]
|
671 |
).success(
|
672 |
fn=stage_1_xbg,
|
673 |
+
inputs=[input_image, save_folder, imggen_removebg],
|
674 |
outputs=[rem_bg_image],
|
675 |
).success(
|
676 |
fn=stage_2_i2v,
|
|
|
682 |
outputs=[result_3dobj, result_3dglb_texture],
|
683 |
).success(
|
684 |
fn=stage_3p_baking,
|
685 |
+
inputs=[save_folder, imggen_color, imggen_bake,
|
686 |
+
imggen_force_bake, imggen_front_baking, imggen_other_views, imggen_align_times],
|
687 |
outputs=[result_3dglb_baked],
|
688 |
).success(
|
689 |
fn=stage_4_gif,
|
|
|
694 |
#===============================================================
|
695 |
# start gradio server
|
696 |
#===============================================================
|
697 |
+
CONST_PORT = 8080
|
698 |
+
CONST_MAX_QUEUE = 1
|
699 |
+
CONST_SERVER = '0.0.0.0'
|
700 |
|
701 |
demo.queue(max_size=CONST_MAX_QUEUE)
|
702 |
demo.launch(server_name=CONST_SERVER, server_port=CONST_PORT)
|
703 |
+
# demo.launch()
|
704 |
+
|
app_hg.py
CHANGED
@@ -700,6 +700,6 @@ with gr.Blocks() as demo:
|
|
700 |
CONST_MAX_QUEUE = 1
|
701 |
CONST_SERVER = '0.0.0.0'
|
702 |
|
703 |
-
demo.queue()
|
704 |
demo.launch()
|
705 |
|
|
|
700 |
CONST_MAX_QUEUE = 1
|
701 |
CONST_SERVER = '0.0.0.0'
|
702 |
|
703 |
+
demo.queue(max_size=CONST_MAX_QUEUE)
|
704 |
demo.launch()
|
705 |
|
main.py
CHANGED
@@ -195,14 +195,10 @@ if __name__ == "__main__":
|
|
195 |
if args.do_render:
|
196 |
if mesh_file_for_render and os.path.exists(mesh_file_for_render):
|
197 |
mesh_file_for_render = mesh_file_for_render
|
198 |
-
elif os.path.exists(os.path.join(args.save_folder, 'view_1/bake/mesh.obj')):
|
199 |
-
mesh_file_for_render = os.path.join(args.save_folder, 'view_1/bake/mesh.obj')
|
200 |
-
elif os.path.exists(os.path.join(args.save_folder, 'view_0/bake/mesh.obj')):
|
201 |
-
mesh_file_for_render = os.path.join(args.save_folder, 'view_0/bake/mesh.obj')
|
202 |
-
elif os.path.exists(os.path.join(args.save_folder, 'mesh.obj')):
|
203 |
-
mesh_file_for_render = os.path.join(args.save_folder, 'mesh.obj')
|
204 |
else:
|
205 |
-
|
|
|
|
|
206 |
|
207 |
print("Rendering 3d file:", mesh_file_for_render)
|
208 |
|
|
|
195 |
if args.do_render:
|
196 |
if mesh_file_for_render and os.path.exists(mesh_file_for_render):
|
197 |
mesh_file_for_render = mesh_file_for_render
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
else:
|
199 |
+
baked_fld_list = sorted(glob(args.save_folder + '/view_*/bake/mesh.obj'))
|
200 |
+
mesh_file_for_render = baked_fld_list[-1] if len(baked_fld_list)>=1 else args.save_folder+'/mesh.obj'
|
201 |
+
assert os.path.exists(mesh_file_for_render), f"{mesh_file_for_render} file not found"
|
202 |
|
203 |
print("Rendering 3d file:", mesh_file_for_render)
|
204 |
|