Spaces:
Runtime error
Runtime error
Da-pose to A-pose
Browse files- apps/avatarizer.py +25 -16
- lib/smplx/body_models.py +18 -0
apps/avatarizer.py
CHANGED
@@ -64,7 +64,7 @@ smpl_model = smplx.create(
|
|
64 |
smpl_out_lst = []
|
65 |
|
66 |
# obtain the pose params of T-pose, DA-pose, and the original pose
|
67 |
-
for pose_type in ["t-pose", "da-pose", "pose"]:
|
68 |
smpl_out_lst.append(
|
69 |
smpl_model(
|
70 |
body_pose=smplx_param["body_pose"],
|
@@ -88,7 +88,7 @@ for pose_type in ["t-pose", "da-pose", "pose"]:
|
|
88 |
# 3. ECON (w/o hands & over-streched faces) + SMPL-X (w/ hands & registered inpainting parts)
|
89 |
# ------------------------------------------------------------------------------------------- #
|
90 |
|
91 |
-
smpl_verts = smpl_out_lst[
|
92 |
smpl_tree = cKDTree(smpl_verts.cpu().numpy())
|
93 |
dist, idx = smpl_tree.query(econ_obj.vertices, k=5)
|
94 |
|
@@ -96,7 +96,7 @@ if not osp.exists(f"{prefix}_econ_da.obj") or not osp.exists(f"{prefix}_smpl_da.
|
|
96 |
|
97 |
# t-pose for ECON
|
98 |
econ_verts = torch.tensor(econ_obj.vertices).float()
|
99 |
-
rot_mat_t = smpl_out_lst[
|
100 |
homo_coord = torch.ones_like(econ_verts)[..., :1]
|
101 |
econ_cano_verts = torch.inverse(rot_mat_t) @ torch.cat([econ_verts, homo_coord],
|
102 |
dim=1).unsqueeze(-1)
|
@@ -104,13 +104,13 @@ if not osp.exists(f"{prefix}_econ_da.obj") or not osp.exists(f"{prefix}_smpl_da.
|
|
104 |
econ_cano = trimesh.Trimesh(econ_cano_verts, econ_obj.faces)
|
105 |
|
106 |
# da-pose for ECON
|
107 |
-
rot_mat_da = smpl_out_lst[
|
108 |
econ_da_verts = rot_mat_da @ torch.cat([econ_cano_verts, homo_coord], dim=1).unsqueeze(-1)
|
109 |
econ_da = trimesh.Trimesh(econ_da_verts[:, :3, 0].cpu(), econ_obj.faces)
|
110 |
|
111 |
# da-pose for SMPL-X
|
112 |
smpl_da = trimesh.Trimesh(
|
113 |
-
smpl_out_lst[
|
114 |
)
|
115 |
smpl_da.export(f"{prefix}_smpl_da.obj")
|
116 |
|
@@ -199,7 +199,7 @@ econ_posedirs = (
|
|
199 |
econ_J_regressor /= econ_J_regressor.sum(dim=1, keepdims=True).clip(min=1e-10)
|
200 |
econ_lbs_weights /= econ_lbs_weights.sum(dim=1, keepdims=True)
|
201 |
|
202 |
-
rot_mat_da = smpl_out_lst[
|
203 |
econ_da_verts = torch.tensor(econ_da.vertices).float()
|
204 |
econ_cano_verts = torch.inverse(rot_mat_da) @ torch.cat([
|
205 |
econ_da_verts, torch.ones_like(econ_da_verts)[..., :1]
|
@@ -211,7 +211,7 @@ econ_cano_verts = econ_cano_verts[:, :3, 0].double()
|
|
211 |
# use original pose to animate ECON reconstruction
|
212 |
# ----------------------------------------------------
|
213 |
|
214 |
-
new_pose = smpl_out_lst[
|
215 |
# new_pose[:, :3] = 0.
|
216 |
|
217 |
posed_econ_verts, _ = general_lbs(
|
@@ -222,7 +222,6 @@ posed_econ_verts, _ = general_lbs(
|
|
222 |
parents=smpl_model.parents,
|
223 |
lbs_weights=econ_lbs_weights
|
224 |
)
|
225 |
-
|
226 |
aligned_econ_verts = posed_econ_verts[0].detach().cpu().numpy()
|
227 |
aligned_econ_verts += smplx_param["transl"].cpu().numpy()
|
228 |
aligned_econ_verts *= smplx_param["scale"].cpu().numpy() * np.array([1.0, -1.0, -1.0])
|
@@ -322,14 +321,17 @@ texture_npy = uv_rasterizer.get_texture(
|
|
322 |
torch.tensor(final_colors).unsqueeze(0).float() / 255.0,
|
323 |
)
|
324 |
|
325 |
-
|
|
|
|
|
326 |
|
327 |
# UV mask for TEXTure (https://readpaper.com/paper/4720151447010820097)
|
328 |
-
texture_npy
|
329 |
-
|
|
|
330 |
|
331 |
-
# generate
|
332 |
-
new_pose = smpl_out_lst[
|
333 |
new_pose[:, :3] = 0.
|
334 |
|
335 |
posed_econ_verts, _ = general_lbs(
|
@@ -342,7 +344,14 @@ posed_econ_verts, _ = general_lbs(
|
|
342 |
)
|
343 |
|
344 |
# export mtl file
|
345 |
-
|
346 |
-
|
347 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
348 |
export_obj(posed_econ_verts[0].detach().cpu().numpy(), f_np, vt, ft, f"{cache_path}/mesh.obj")
|
|
|
64 |
smpl_out_lst = []
|
65 |
|
66 |
# obtain the pose params of T-pose, DA-pose, and the original pose
|
67 |
+
for pose_type in ["a-pose", "t-pose", "da-pose", "pose"]:
|
68 |
smpl_out_lst.append(
|
69 |
smpl_model(
|
70 |
body_pose=smplx_param["body_pose"],
|
|
|
88 |
# 3. ECON (w/o hands & over-streched faces) + SMPL-X (w/ hands & registered inpainting parts)
|
89 |
# ------------------------------------------------------------------------------------------- #
|
90 |
|
91 |
+
smpl_verts = smpl_out_lst[3].vertices.detach()[0]
|
92 |
smpl_tree = cKDTree(smpl_verts.cpu().numpy())
|
93 |
dist, idx = smpl_tree.query(econ_obj.vertices, k=5)
|
94 |
|
|
|
96 |
|
97 |
# t-pose for ECON
|
98 |
econ_verts = torch.tensor(econ_obj.vertices).float()
|
99 |
+
rot_mat_t = smpl_out_lst[3].vertex_transformation.detach()[0][idx[:, 0]]
|
100 |
homo_coord = torch.ones_like(econ_verts)[..., :1]
|
101 |
econ_cano_verts = torch.inverse(rot_mat_t) @ torch.cat([econ_verts, homo_coord],
|
102 |
dim=1).unsqueeze(-1)
|
|
|
104 |
econ_cano = trimesh.Trimesh(econ_cano_verts, econ_obj.faces)
|
105 |
|
106 |
# da-pose for ECON
|
107 |
+
rot_mat_da = smpl_out_lst[2].vertex_transformation.detach()[0][idx[:, 0]]
|
108 |
econ_da_verts = rot_mat_da @ torch.cat([econ_cano_verts, homo_coord], dim=1).unsqueeze(-1)
|
109 |
econ_da = trimesh.Trimesh(econ_da_verts[:, :3, 0].cpu(), econ_obj.faces)
|
110 |
|
111 |
# da-pose for SMPL-X
|
112 |
smpl_da = trimesh.Trimesh(
|
113 |
+
smpl_out_lst[2].vertices.detach()[0], smpl_model.faces, maintain_orders=True, process=False
|
114 |
)
|
115 |
smpl_da.export(f"{prefix}_smpl_da.obj")
|
116 |
|
|
|
199 |
econ_J_regressor /= econ_J_regressor.sum(dim=1, keepdims=True).clip(min=1e-10)
|
200 |
econ_lbs_weights /= econ_lbs_weights.sum(dim=1, keepdims=True)
|
201 |
|
202 |
+
rot_mat_da = smpl_out_lst[2].vertex_transformation.detach()[0][idx[:, 0]]
|
203 |
econ_da_verts = torch.tensor(econ_da.vertices).float()
|
204 |
econ_cano_verts = torch.inverse(rot_mat_da) @ torch.cat([
|
205 |
econ_da_verts, torch.ones_like(econ_da_verts)[..., :1]
|
|
|
211 |
# use original pose to animate ECON reconstruction
|
212 |
# ----------------------------------------------------
|
213 |
|
214 |
+
new_pose = smpl_out_lst[3].full_pose
|
215 |
# new_pose[:, :3] = 0.
|
216 |
|
217 |
posed_econ_verts, _ = general_lbs(
|
|
|
222 |
parents=smpl_model.parents,
|
223 |
lbs_weights=econ_lbs_weights
|
224 |
)
|
|
|
225 |
aligned_econ_verts = posed_econ_verts[0].detach().cpu().numpy()
|
226 |
aligned_econ_verts += smplx_param["transl"].cpu().numpy()
|
227 |
aligned_econ_verts *= smplx_param["scale"].cpu().numpy() * np.array([1.0, -1.0, -1.0])
|
|
|
321 |
torch.tensor(final_colors).unsqueeze(0).float() / 255.0,
|
322 |
)
|
323 |
|
324 |
+
gray_texture = texture_npy.copy()
|
325 |
+
gray_texture[texture_npy.sum(axis=2) == 0.0] = 0.5
|
326 |
+
Image.fromarray((gray_texture * 255.0).astype(np.uint8)).save(f"{cache_path}/texture.png")
|
327 |
|
328 |
# UV mask for TEXTure (https://readpaper.com/paper/4720151447010820097)
|
329 |
+
white_texture = texture_npy.copy()
|
330 |
+
white_texture[texture_npy.sum(axis=2) == 0.0] = 1.0
|
331 |
+
Image.fromarray((white_texture * 255.0).astype(np.uint8)).save(f"{cache_path}/mask.png")
|
332 |
|
333 |
+
# generate a-pose vertices
|
334 |
+
new_pose = smpl_out_lst[0].full_pose
|
335 |
new_pose[:, :3] = 0.
|
336 |
|
337 |
posed_econ_verts, _ = general_lbs(
|
|
|
344 |
)
|
345 |
|
346 |
# export mtl file
|
347 |
+
with open(f"{cache_path}/material.mtl", 'w') as fp:
|
348 |
+
fp.write(f'newmtl mat0 \n')
|
349 |
+
fp.write(f'Ka 1.000000 1.000000 1.000000 \n')
|
350 |
+
fp.write(f'Kd 1.000000 1.000000 1.000000 \n')
|
351 |
+
fp.write(f'Ks 0.000000 0.000000 0.000000 \n')
|
352 |
+
fp.write(f'Tr 1.000000 \n')
|
353 |
+
fp.write(f'illum 1 \n')
|
354 |
+
fp.write(f'Ns 0.000000 \n')
|
355 |
+
fp.write(f'map_Kd texture.png \n')
|
356 |
+
|
357 |
export_obj(posed_econ_verts[0].detach().cpu().numpy(), f_np, vt, ft, f"{cache_path}/mesh.obj")
|
lib/smplx/body_models.py
CHANGED
@@ -1262,6 +1262,24 @@ class SMPLX(SMPLH):
|
|
1262 |
|
1263 |
if pose_type == "t-pose":
|
1264 |
full_pose *= 0.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1265 |
elif pose_type == "da-pose":
|
1266 |
body_pose = torch.zeros_like(body_pose).view(body_pose.shape[0], -1, 3)
|
1267 |
body_pose[:, 0] = torch.tensor([0., 0., 30 * np.pi / 180.])
|
|
|
1262 |
|
1263 |
if pose_type == "t-pose":
|
1264 |
full_pose *= 0.0
|
1265 |
+
elif pose_type == "a-pose":
|
1266 |
+
body_pose = torch.zeros_like(body_pose).view(body_pose.shape[0], -1, 3)
|
1267 |
+
body_pose[:, 15] = torch.tensor([0., 0., -45 * np.pi / 180.])
|
1268 |
+
body_pose[:, 16] = torch.tensor([0., 0., 45 * np.pi / 180.])
|
1269 |
+
body_pose = body_pose.view(body_pose.shape[0], -1)
|
1270 |
+
|
1271 |
+
full_pose = torch.cat(
|
1272 |
+
[
|
1273 |
+
global_orient * 0.,
|
1274 |
+
body_pose,
|
1275 |
+
jaw_pose * 0.,
|
1276 |
+
leye_pose * 0.,
|
1277 |
+
reye_pose * 0.,
|
1278 |
+
left_hand_pose * 0.,
|
1279 |
+
right_hand_pose * 0.,
|
1280 |
+
],
|
1281 |
+
dim=1,
|
1282 |
+
)
|
1283 |
elif pose_type == "da-pose":
|
1284 |
body_pose = torch.zeros_like(body_pose).view(body_pose.shape[0], -1, 3)
|
1285 |
body_pose[:, 0] = torch.tensor([0., 0., 30 * np.pi / 180.])
|