syedMohib44 commited on
Commit
ad17ddf
Β·
1 Parent(s): 4ebc49e

removed cache

Browse files
Files changed (49) hide show
  1. .gitignore +3 -1
  2. {build/lib/hy3dgen β†’ hy3dgen}/__init__.py +0 -0
  3. {build/lib/hy3dgen β†’ hy3dgen}/rembg.py +0 -0
  4. {build/lib/hy3dgen β†’ hy3dgen}/shapegen/__init__.py +0 -0
  5. {build/lib/hy3dgen β†’ hy3dgen}/shapegen/models/__init__.py +0 -0
  6. {build/lib/hy3dgen β†’ hy3dgen}/shapegen/models/conditioner.py +0 -0
  7. {build/lib/hy3dgen β†’ hy3dgen}/shapegen/models/hunyuan3ddit.py +0 -0
  8. {build/lib/hy3dgen β†’ hy3dgen}/shapegen/models/vae.py +0 -0
  9. {build/lib/hy3dgen β†’ hy3dgen}/shapegen/pipelines.py +0 -0
  10. {build/lib/hy3dgen β†’ hy3dgen}/shapegen/postprocessors.py +0 -0
  11. {build/lib/hy3dgen β†’ hy3dgen}/shapegen/preprocessors.py +0 -0
  12. {build/lib/hy3dgen β†’ hy3dgen}/shapegen/schedulers.py +0 -0
  13. {build/lib/hy3dgen β†’ hy3dgen}/texgen/__init__.py +0 -0
  14. hy3dgen/texgen/custom_rasterizer/custom_rasterizer/__init__.py +32 -0
  15. hy3dgen/texgen/custom_rasterizer/custom_rasterizer/io_glb.py +248 -0
  16. hy3dgen/texgen/custom_rasterizer/custom_rasterizer/io_obj.py +76 -0
  17. hy3dgen/texgen/custom_rasterizer/custom_rasterizer/render.py +41 -0
  18. {build/lib/hy3dgen/texgen/differentiable_renderer β†’ hy3dgen/texgen/custom_rasterizer/lib/custom_rasterizer_kernel}/__init__.py +0 -0
  19. hy3dgen/texgen/custom_rasterizer/lib/custom_rasterizer_kernel/grid_neighbor.cpp +575 -0
  20. hy3dgen/texgen/custom_rasterizer/lib/custom_rasterizer_kernel/rasterizer.cpp +139 -0
  21. hy3dgen/texgen/custom_rasterizer/lib/custom_rasterizer_kernel/rasterizer.h +54 -0
  22. hy3dgen/texgen/custom_rasterizer/lib/custom_rasterizer_kernel/rasterizer_gpu.cu +127 -0
  23. hy3dgen/texgen/custom_rasterizer/setup.py +26 -0
  24. {build/lib/hy3dgen/texgen/hunyuanpaint β†’ hy3dgen/texgen/differentiable_renderer}/__init__.py +0 -0
  25. {build/lib/hy3dgen β†’ hy3dgen}/texgen/differentiable_renderer/camera_utils.py +0 -0
  26. hy3dgen/texgen/differentiable_renderer/compile_mesh_painter.bat +3 -0
  27. hy3dgen/texgen/differentiable_renderer/mesh_processor.cpp +161 -0
  28. hy3dgen/texgen/differentiable_renderer/mesh_processor.egg-info/PKG-INFO +7 -0
  29. hy3dgen/texgen/differentiable_renderer/mesh_processor.egg-info/SOURCES.txt +7 -0
  30. hy3dgen/texgen/differentiable_renderer/mesh_processor.egg-info/dependency_links.txt +1 -0
  31. hy3dgen/texgen/differentiable_renderer/mesh_processor.egg-info/requires.txt +1 -0
  32. hy3dgen/texgen/differentiable_renderer/mesh_processor.egg-info/top_level.txt +1 -0
  33. {build/lib/hy3dgen β†’ hy3dgen}/texgen/differentiable_renderer/mesh_processor.py +0 -0
  34. {build/lib/hy3dgen β†’ hy3dgen}/texgen/differentiable_renderer/mesh_render.py +0 -0
  35. {build/lib/hy3dgen β†’ hy3dgen}/texgen/differentiable_renderer/mesh_utils.py +0 -0
  36. {build/lib/hy3dgen β†’ hy3dgen}/texgen/differentiable_renderer/setup.py +0 -0
  37. {build/lib/hy3dgen/texgen/hunyuanpaint/unet β†’ hy3dgen/texgen/hunyuanpaint}/__init__.py +0 -0
  38. {build/lib/hy3dgen β†’ hy3dgen}/texgen/hunyuanpaint/pipeline.py +0 -0
  39. {build/lib/hy3dgen/texgen/utils β†’ hy3dgen/texgen/hunyuanpaint/unet}/__init__.py +0 -0
  40. {build/lib/hy3dgen β†’ hy3dgen}/texgen/hunyuanpaint/unet/modules.py +0 -0
  41. {build/lib/hy3dgen β†’ hy3dgen}/texgen/pipelines.py +1 -1
  42. hy3dgen/texgen/utils/__init__.py +23 -0
  43. {build/lib/hy3dgen β†’ hy3dgen}/texgen/utils/alignImg4Tex_utils.py +0 -0
  44. {build/lib/hy3dgen β†’ hy3dgen}/texgen/utils/counter_utils.py +0 -0
  45. {build/lib/hy3dgen β†’ hy3dgen}/texgen/utils/dehighlight_utils.py +0 -0
  46. {build/lib/hy3dgen β†’ hy3dgen}/texgen/utils/multiview_utils.py +0 -0
  47. {build/lib/hy3dgen β†’ hy3dgen}/texgen/utils/simplify_mesh_utils.py +0 -0
  48. {build/lib/hy3dgen β†’ hy3dgen}/texgen/utils/uv_warp_utils.py +0 -0
  49. {build/lib/hy3dgen β†’ hy3dgen}/text2image.py +0 -0
.gitignore CHANGED
@@ -7,6 +7,8 @@ __pycache__/
7
  venv/
8
  env/
9
  .venv/
 
 
10
  build/
11
  dist/
12
 
@@ -27,4 +29,4 @@ dist/
27
  .vscode/
28
 
29
  # Hugging Face cache (optional)
30
- ~/.cache/huggingface/
 
7
  venv/
8
  env/
9
  .venv/
10
+
11
+ # Build
12
  build/
13
  dist/
14
 
 
29
  .vscode/
30
 
31
  # Hugging Face cache (optional)
32
+ /content/huggingface/
{build/lib/hy3dgen β†’ hy3dgen}/__init__.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/rembg.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/shapegen/__init__.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/shapegen/models/__init__.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/shapegen/models/conditioner.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/shapegen/models/hunyuan3ddit.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/shapegen/models/vae.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/shapegen/pipelines.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/shapegen/postprocessors.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/shapegen/preprocessors.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/shapegen/schedulers.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/texgen/__init__.py RENAMED
File without changes
hy3dgen/texgen/custom_rasterizer/custom_rasterizer/__init__.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Open Source Model Licensed under the Apache License Version 2.0
2
+ # and Other Licenses of the Third-Party Components therein:
3
+ # The below Model in this distribution may have been modified by THL A29 Limited
4
+ # ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
5
+
6
+ # Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
7
+ # The below software and/or models in this distribution may have been
8
+ # modified by THL A29 Limited ("Tencent Modifications").
9
+ # All Tencent Modifications are Copyright (C) THL A29 Limited.
10
+
11
+ # Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
12
+ # except for the third-party components listed below.
13
+ # Hunyuan 3D does not impose any additional limitations beyond what is outlined
14
+ # in the repsective licenses of these third-party components.
15
+ # Users must comply with all terms and conditions of original licenses of these third-party
16
+ # components and must ensure that the usage of the third party components adheres to
17
+ # all relevant laws and regulations.
18
+
19
+ # For avoidance of doubts, Hunyuan 3D means the large language models and
20
+ # their software and algorithms, including trained model weights, parameters (including
21
+ # optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
22
+ # fine-tuning enabling code and other elements of the foregoing made publicly available
23
+ # by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
24
+
25
+ '''
26
+ from .hierarchy import BuildHierarchy, BuildHierarchyWithColor
27
+ from .io_obj import LoadObj, LoadObjWithTexture
28
+ from .render import rasterize, interpolate
29
+ '''
30
+ from .io_glb import *
31
+ from .io_obj import *
32
+ from .render import *
hy3dgen/texgen/custom_rasterizer/custom_rasterizer/io_glb.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Open Source Model Licensed under the Apache License Version 2.0
2
+ # and Other Licenses of the Third-Party Components therein:
3
+ # The below Model in this distribution may have been modified by THL A29 Limited
4
+ # ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
5
+
6
+ # Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
7
+ # The below software and/or models in this distribution may have been
8
+ # modified by THL A29 Limited ("Tencent Modifications").
9
+ # All Tencent Modifications are Copyright (C) THL A29 Limited.
10
+
11
+ # Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
12
+ # except for the third-party components listed below.
13
+ # Hunyuan 3D does not impose any additional limitations beyond what is outlined
14
+ # in the repsective licenses of these third-party components.
15
+ # Users must comply with all terms and conditions of original licenses of these third-party
16
+ # components and must ensure that the usage of the third party components adheres to
17
+ # all relevant laws and regulations.
18
+
19
+ # For avoidance of doubts, Hunyuan 3D means the large language models and
20
+ # their software and algorithms, including trained model weights, parameters (including
21
+ # optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
22
+ # fine-tuning enabling code and other elements of the foregoing made publicly available
23
+ # by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
24
+
25
+ import base64
26
+ import io
27
+ import os
28
+
29
+ import numpy as np
30
+ from PIL import Image as PILImage
31
+ from pygltflib import GLTF2
32
+ from scipy.spatial.transform import Rotation as R
33
+
34
+
35
+ # Function to extract buffer data
36
+ def get_buffer_data(gltf, buffer_view):
37
+ buffer = gltf.buffers[buffer_view.buffer]
38
+ buffer_data = gltf.get_data_from_buffer_uri(buffer.uri)
39
+ byte_offset = buffer_view.byteOffset if buffer_view.byteOffset else 0
40
+ byte_length = buffer_view.byteLength
41
+ return buffer_data[byte_offset:byte_offset + byte_length]
42
+
43
+
44
+ # Function to extract attribute data
45
+ def get_attribute_data(gltf, accessor_index):
46
+ accessor = gltf.accessors[accessor_index]
47
+ buffer_view = gltf.bufferViews[accessor.bufferView]
48
+ buffer_data = get_buffer_data(gltf, buffer_view)
49
+
50
+ comptype = {5120: np.int8, 5121: np.uint8, 5122: np.int16, 5123: np.uint16, 5125: np.uint32, 5126: np.float32}
51
+ dtype = comptype[accessor.componentType]
52
+
53
+ t2n = {'SCALAR': 1, 'VEC2': 2, 'VEC3': 3, 'VEC4': 4, 'MAT2': 4, 'MAT3': 9, 'MAT4': 16}
54
+ num_components = t2n[accessor.type]
55
+
56
+ # Calculate the correct slice of data
57
+ byte_offset = accessor.byteOffset if accessor.byteOffset else 0
58
+ byte_stride = buffer_view.byteStride if buffer_view.byteStride else num_components * np.dtype(dtype).itemsize
59
+ count = accessor.count
60
+
61
+ # Extract the attribute data
62
+ attribute_data = np.zeros((count, num_components), dtype=dtype)
63
+ for i in range(count):
64
+ start = byte_offset + i * byte_stride
65
+ end = start + num_components * np.dtype(dtype).itemsize
66
+ attribute_data[i] = np.frombuffer(buffer_data[start:end], dtype=dtype)
67
+
68
+ return attribute_data
69
+
70
+
71
+ # Function to extract image data
72
+ def get_image_data(gltf, image, folder):
73
+ if image.uri:
74
+ if image.uri.startswith('data:'):
75
+ # Data URI
76
+ header, encoded = image.uri.split(',', 1)
77
+ data = base64.b64decode(encoded)
78
+ else:
79
+ # External file
80
+ fn = image.uri
81
+ if not os.path.isabs(fn):
82
+ fn = folder + '/' + fn
83
+ with open(fn, 'rb') as f:
84
+ data = f.read()
85
+ else:
86
+ buffer_view = gltf.bufferViews[image.bufferView]
87
+ data = get_buffer_data(gltf, buffer_view)
88
+ return data
89
+
90
+
91
+ # Function to convert triangle strip to triangles
92
+ def convert_triangle_strip_to_triangles(indices):
93
+ triangles = []
94
+ for i in range(len(indices) - 2):
95
+ if i % 2 == 0:
96
+ triangles.append([indices[i], indices[i + 1], indices[i + 2]])
97
+ else:
98
+ triangles.append([indices[i], indices[i + 2], indices[i + 1]])
99
+ return np.array(triangles).reshape(-1, 3)
100
+
101
+
102
+ # Function to convert triangle fan to triangles
103
+ def convert_triangle_fan_to_triangles(indices):
104
+ triangles = []
105
+ for i in range(1, len(indices) - 1):
106
+ triangles.append([indices[0], indices[i], indices[i + 1]])
107
+ return np.array(triangles).reshape(-1, 3)
108
+
109
+
110
+ # Function to get the transformation matrix from a node
111
+ def get_node_transform(node):
112
+ if node.matrix:
113
+ return np.array(node.matrix).reshape(4, 4).T
114
+ else:
115
+ T = np.eye(4)
116
+ if node.translation:
117
+ T[:3, 3] = node.translation
118
+ if node.rotation:
119
+ R_mat = R.from_quat(node.rotation).as_matrix()
120
+ T[:3, :3] = R_mat
121
+ if node.scale:
122
+ S = np.diag(node.scale + [1])
123
+ T = T @ S
124
+ return T
125
+
126
+
127
+ def get_world_transform(gltf, node_index, parents, world_transforms):
128
+ if parents[node_index] == -2:
129
+ return world_transforms[node_index]
130
+
131
+ node = gltf.nodes[node_index]
132
+ if parents[node_index] == -1:
133
+ world_transforms[node_index] = get_node_transform(node)
134
+ parents[node_index] = -2
135
+ return world_transforms[node_index]
136
+
137
+ parent_index = parents[node_index]
138
+ parent_transform = get_world_transform(gltf, parent_index, parents, world_transforms)
139
+ world_transforms[node_index] = parent_transform @ get_node_transform(node)
140
+ parents[node_index] = -2
141
+ return world_transforms[node_index]
142
+
143
+
144
+ def LoadGlb(path):
145
+ # Load the GLB file using pygltflib
146
+ gltf = GLTF2().load(path)
147
+
148
+ primitives = []
149
+ images = {}
150
+ # Iterate through the meshes in the GLB file
151
+
152
+ world_transforms = [np.identity(4) for i in range(len(gltf.nodes))]
153
+ parents = [-1 for i in range(len(gltf.nodes))]
154
+ for node_index, node in enumerate(gltf.nodes):
155
+ for idx in node.children:
156
+ parents[idx] = node_index
157
+ # for i in range(len(gltf.nodes)):
158
+ # get_world_transform(gltf, i, parents, world_transform)
159
+
160
+ for node_index, node in enumerate(gltf.nodes):
161
+ if node.mesh is not None:
162
+ world_transform = get_world_transform(gltf, node_index, parents, world_transforms)
163
+ # Iterate through the primitives in the mesh
164
+ mesh = gltf.meshes[node.mesh]
165
+ for primitive in mesh.primitives:
166
+ # Access the attributes of the primitive
167
+ attributes = primitive.attributes.__dict__
168
+ mode = primitive.mode if primitive.mode is not None else 4 # Default to TRIANGLES
169
+ result = {}
170
+ if primitive.indices is not None:
171
+ indices = get_attribute_data(gltf, primitive.indices)
172
+ if mode == 4: # TRIANGLES
173
+ face_indices = indices.reshape(-1, 3)
174
+ elif mode == 5: # TRIANGLE_STRIP
175
+ face_indices = convert_triangle_strip_to_triangles(indices)
176
+ elif mode == 6: # TRIANGLE_FAN
177
+ face_indices = convert_triangle_fan_to_triangles(indices)
178
+ else:
179
+ continue
180
+ result['F'] = face_indices
181
+
182
+ # Extract vertex positions
183
+ if 'POSITION' in attributes and attributes['POSITION'] is not None:
184
+ positions = get_attribute_data(gltf, attributes['POSITION'])
185
+ # Apply the world transformation to the positions
186
+ positions_homogeneous = np.hstack([positions, np.ones((positions.shape[0], 1))])
187
+ transformed_positions = (world_transform @ positions_homogeneous.T).T[:, :3]
188
+ result['V'] = transformed_positions
189
+
190
+ # Extract vertex colors
191
+ if 'COLOR_0' in attributes and attributes['COLOR_0'] is not None:
192
+ colors = get_attribute_data(gltf, attributes['COLOR_0'])
193
+ if colors.shape[-1] > 3:
194
+ colors = colors[..., :3]
195
+ result['VC'] = colors
196
+
197
+ # Extract UVs
198
+ if 'TEXCOORD_0' in attributes and not attributes['TEXCOORD_0'] is None:
199
+ uvs = get_attribute_data(gltf, attributes['TEXCOORD_0'])
200
+ result['UV'] = uvs
201
+
202
+ if primitive.material is not None:
203
+ material = gltf.materials[primitive.material]
204
+ if material.pbrMetallicRoughness is not None and material.pbrMetallicRoughness.baseColorTexture is not None:
205
+ texture_index = material.pbrMetallicRoughness.baseColorTexture.index
206
+ texture = gltf.textures[texture_index]
207
+ image_index = texture.source
208
+ if not image_index in images:
209
+ image = gltf.images[image_index]
210
+ image_data = get_image_data(gltf, image, os.path.dirname(path))
211
+ pil_image = PILImage.open(io.BytesIO(image_data))
212
+ if pil_image.mode != 'RGB':
213
+ pil_image = pil_image.convert('RGB')
214
+ images[image_index] = pil_image
215
+ result['TEX'] = image_index
216
+ elif material.emissiveTexture is not None:
217
+ texture_index = material.emissiveTexture.index
218
+ texture = gltf.textures[texture_index]
219
+ image_index = texture.source
220
+ if not image_index in images:
221
+ image = gltf.images[image_index]
222
+ image_data = get_image_data(gltf, image, os.path.dirname(path))
223
+ pil_image = PILImage.open(io.BytesIO(image_data))
224
+ if pil_image.mode != 'RGB':
225
+ pil_image = pil_image.convert('RGB')
226
+ images[image_index] = pil_image
227
+ result['TEX'] = image_index
228
+ else:
229
+ if material.pbrMetallicRoughness is not None:
230
+ base_color = material.pbrMetallicRoughness.baseColorFactor
231
+ else:
232
+ base_color = np.array([0.8, 0.8, 0.8], dtype=np.float32)
233
+ result['MC'] = base_color
234
+
235
+ primitives.append(result)
236
+
237
+ return primitives, images
238
+
239
+
240
+ def RotatePrimitives(primitives, transform):
241
+ for i in range(len(primitives)):
242
+ if 'V' in primitives[i]:
243
+ primitives[i]['V'] = primitives[i]['V'] @ transform.T
244
+
245
+
246
+ if __name__ == '__main__':
247
+ path = 'data/test.glb'
248
+ LoadGlb(path)
hy3dgen/texgen/custom_rasterizer/custom_rasterizer/io_obj.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Open Source Model Licensed under the Apache License Version 2.0
2
+ # and Other Licenses of the Third-Party Components therein:
3
+ # The below Model in this distribution may have been modified by THL A29 Limited
4
+ # ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
5
+
6
+ # Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
7
+ # The below software and/or models in this distribution may have been
8
+ # modified by THL A29 Limited ("Tencent Modifications").
9
+ # All Tencent Modifications are Copyright (C) THL A29 Limited.
10
+
11
+ # Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
12
+ # except for the third-party components listed below.
13
+ # Hunyuan 3D does not impose any additional limitations beyond what is outlined
14
+ # in the repsective licenses of these third-party components.
15
+ # Users must comply with all terms and conditions of original licenses of these third-party
16
+ # components and must ensure that the usage of the third party components adheres to
17
+ # all relevant laws and regulations.
18
+
19
+ # For avoidance of doubts, Hunyuan 3D means the large language models and
20
+ # their software and algorithms, including trained model weights, parameters (including
21
+ # optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
22
+ # fine-tuning enabling code and other elements of the foregoing made publicly available
23
+ # by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
24
+
25
+ import cv2
26
+ import numpy as np
27
+
28
+
29
+ def LoadObj(fn):
30
+ lines = [l.strip() for l in open(fn)]
31
+ vertices = []
32
+ faces = []
33
+ for l in lines:
34
+ words = [w for w in l.split(' ') if w != '']
35
+ if len(words) == 0:
36
+ continue
37
+ if words[0] == 'v':
38
+ v = [float(words[i]) for i in range(1, 4)]
39
+ vertices.append(v)
40
+ elif words[0] == 'f':
41
+ f = [int(words[i]) - 1 for i in range(1, 4)]
42
+ faces.append(f)
43
+
44
+ return np.array(vertices).astype('float32'), np.array(faces).astype('int32')
45
+
46
+
47
+ def LoadObjWithTexture(fn, tex_fn):
48
+ lines = [l.strip() for l in open(fn)]
49
+ vertices = []
50
+ vertex_textures = []
51
+ faces = []
52
+ face_textures = []
53
+ for l in lines:
54
+ words = [w for w in l.split(' ') if w != '']
55
+ if len(words) == 0:
56
+ continue
57
+ if words[0] == 'v':
58
+ v = [float(words[i]) for i in range(1, len(words))]
59
+ vertices.append(v)
60
+ elif words[0] == 'vt':
61
+ v = [float(words[i]) for i in range(1, len(words))]
62
+ vertex_textures.append(v)
63
+ elif words[0] == 'f':
64
+ f = []
65
+ ft = []
66
+ for i in range(1, len(words)):
67
+ t = words[i].split('/')
68
+ f.append(int(t[0]) - 1)
69
+ ft.append(int(t[1]) - 1)
70
+ for i in range(2, len(f)):
71
+ faces.append([f[0], f[i - 1], f[i]])
72
+ face_textures.append([ft[0], ft[i - 1], ft[i]])
73
+
74
+ tex_image = cv2.cvtColor(cv2.imread(tex_fn), cv2.COLOR_BGR2RGB)
75
+ return np.array(vertices).astype('float32'), np.array(vertex_textures).astype('float32'), np.array(faces).astype(
76
+ 'int32'), np.array(face_textures).astype('int32'), tex_image
hy3dgen/texgen/custom_rasterizer/custom_rasterizer/render.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Open Source Model Licensed under the Apache License Version 2.0
2
+ # and Other Licenses of the Third-Party Components therein:
3
+ # The below Model in this distribution may have been modified by THL A29 Limited
4
+ # ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
5
+
6
+ # Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
7
+ # The below software and/or models in this distribution may have been
8
+ # modified by THL A29 Limited ("Tencent Modifications").
9
+ # All Tencent Modifications are Copyright (C) THL A29 Limited.
10
+
11
+ # Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
12
+ # except for the third-party components listed below.
13
+ # Hunyuan 3D does not impose any additional limitations beyond what is outlined
14
+ # in the repsective licenses of these third-party components.
15
+ # Users must comply with all terms and conditions of original licenses of these third-party
16
+ # components and must ensure that the usage of the third party components adheres to
17
+ # all relevant laws and regulations.
18
+
19
+ # For avoidance of doubts, Hunyuan 3D means the large language models and
20
+ # their software and algorithms, including trained model weights, parameters (including
21
+ # optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
22
+ # fine-tuning enabling code and other elements of the foregoing made publicly available
23
+ # by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
24
+
25
+ import custom_rasterizer_kernel
26
+ import torch
27
+
28
+
29
+ def rasterize(pos, tri, resolution, clamp_depth=torch.zeros(0), use_depth_prior=0):
30
+ assert (pos.device == tri.device)
31
+ findices, barycentric = custom_rasterizer_kernel.rasterize_image(pos[0], tri, clamp_depth, resolution[1],
32
+ resolution[0], 1e-6, use_depth_prior)
33
+ return findices, barycentric
34
+
35
+
36
+ def interpolate(col, findices, barycentric, tri):
37
+ f = findices - 1 + (findices == 0)
38
+ vcol = col[0, tri.long()[f.long()]]
39
+ result = barycentric.view(*barycentric.shape, 1) * vcol
40
+ result = torch.sum(result, axis=-2)
41
+ return result.view(1, *result.shape)
{build/lib/hy3dgen/texgen/differentiable_renderer β†’ hy3dgen/texgen/custom_rasterizer/lib/custom_rasterizer_kernel}/__init__.py RENAMED
File without changes
hy3dgen/texgen/custom_rasterizer/lib/custom_rasterizer_kernel/grid_neighbor.cpp ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "rasterizer.h"
2
+ #include <fstream>
3
+
4
+ inline int pos2key(float* p, int resolution) {
5
+ int x = (p[0] * 0.5 + 0.5) * resolution;
6
+ int y = (p[1] * 0.5 + 0.5) * resolution;
7
+ int z = (p[2] * 0.5 + 0.5) * resolution;
8
+ return (x * resolution + y) * resolution + z;
9
+ }
10
+
11
+ inline void key2pos(int key, int resolution, float* p) {
12
+ int x = key / resolution / resolution;
13
+ int y = key / resolution % resolution;
14
+ int z = key % resolution;
15
+ p[0] = ((x + 0.5) / resolution - 0.5) * 2;
16
+ p[1] = ((y + 0.5) / resolution - 0.5) * 2;
17
+ p[2] = ((z + 0.5) / resolution - 0.5) * 2;
18
+ }
19
+
20
+ inline void key2cornerpos(int key, int resolution, float* p) {
21
+ int x = key / resolution / resolution;
22
+ int y = key / resolution % resolution;
23
+ int z = key % resolution;
24
+ p[0] = ((x + 0.75) / resolution - 0.5) * 2;
25
+ p[1] = ((y + 0.25) / resolution - 0.5) * 2;
26
+ p[2] = ((z + 0.75) / resolution - 0.5) * 2;
27
+ }
28
+
29
+ inline float* pos_ptr(int l, int i, int j, torch::Tensor t) {
30
+ float* pdata = t.data_ptr<float>();
31
+ int height = t.size(1);
32
+ int width = t.size(2);
33
+ return &pdata[((l * height + i) * width + j) * 4];
34
+ }
35
+
36
+ struct Grid
37
+ {
38
+ std::vector<int> seq2oddcorner;
39
+ std::vector<int> seq2evencorner;
40
+ std::vector<int> seq2grid;
41
+ std::vector<int> seq2normal;
42
+ std::vector<int> seq2neighbor;
43
+ std::unordered_map<int, int> grid2seq;
44
+ std::vector<int> downsample_seq;
45
+ int num_origin_seq;
46
+ int resolution;
47
+ int stride;
48
+ };
49
+
50
+ inline void pos_from_seq(Grid& grid, int seq, float* p) {
51
+ auto k = grid.seq2grid[seq];
52
+ key2pos(k, grid.resolution, p);
53
+ }
54
+
55
+ inline int fetch_seq(Grid& grid, int l, int i, int j, torch::Tensor pdata) {
56
+ float* p = pos_ptr(l, i, j, pdata);
57
+ if (p[3] == 0)
58
+ return -1;
59
+ auto key = pos2key(p, grid.resolution);
60
+ int seq = grid.grid2seq[key];
61
+ return seq;
62
+ }
63
+
64
+ inline int fetch_last_seq(Grid& grid, int i, int j, torch::Tensor pdata) {
65
+ int num_layers = pdata.size(0);
66
+ int l = 0;
67
+ int idx = fetch_seq(grid, l, i, j, pdata);
68
+ while (l < num_layers - 1) {
69
+ l += 1;
70
+ int new_idx = fetch_seq(grid, l, i, j, pdata);
71
+ if (new_idx == -1)
72
+ break;
73
+ idx = new_idx;
74
+ }
75
+ return idx;
76
+ }
77
+
78
+ inline int fetch_nearest_seq(Grid& grid, int i, int j, int dim, float d, torch::Tensor pdata) {
79
+ float p[3];
80
+ float max_dist = 1e10;
81
+ int best_idx = -1;
82
+ int num_layers = pdata.size(0);
83
+ for (int l = 0; l < num_layers; ++l) {
84
+ int idx = fetch_seq(grid, l, i, j, pdata);
85
+ if (idx == -1)
86
+ break;
87
+ pos_from_seq(grid, idx, p);
88
+ float dist = std::abs(d - p[(dim + 2) % 3]);
89
+ if (dist < max_dist) {
90
+ max_dist = dist;
91
+ best_idx = idx;
92
+ }
93
+ }
94
+ return best_idx;
95
+ }
96
+
97
+ inline int fetch_nearest_seq_layer(Grid& grid, int i, int j, int dim, float d, torch::Tensor pdata) {
98
+ float p[3];
99
+ float max_dist = 1e10;
100
+ int best_layer = -1;
101
+ int num_layers = pdata.size(0);
102
+ for (int l = 0; l < num_layers; ++l) {
103
+ int idx = fetch_seq(grid, l, i, j, pdata);
104
+ if (idx == -1)
105
+ break;
106
+ pos_from_seq(grid, idx, p);
107
+ float dist = std::abs(d - p[(dim + 2) % 3]);
108
+ if (dist < max_dist) {
109
+ max_dist = dist;
110
+ best_layer = l;
111
+ }
112
+ }
113
+ return best_layer;
114
+ }
115
+
116
+ void FetchNeighbor(Grid& grid, int seq, float* pos, int dim, int boundary_info, std::vector<torch::Tensor>& view_layer_positions,
117
+ int* output_indices)
118
+ {
119
+ auto t = view_layer_positions[dim];
120
+ int height = t.size(1);
121
+ int width = t.size(2);
122
+ int top = 0;
123
+ int ci = 0;
124
+ int cj = 0;
125
+ if (dim == 0) {
126
+ ci = (pos[1]/2+0.5)*height;
127
+ cj = (pos[0]/2+0.5)*width;
128
+ }
129
+ else if (dim == 1) {
130
+ ci = (pos[1]/2+0.5)*height;
131
+ cj = (pos[2]/2+0.5)*width;
132
+ }
133
+ else {
134
+ ci = (-pos[2]/2+0.5)*height;
135
+ cj = (pos[0]/2+0.5)*width;
136
+ }
137
+ int stride = grid.stride;
138
+ for (int ni = ci + stride; ni >= ci - stride; ni -= stride) {
139
+ for (int nj = cj - stride; nj <= cj + stride; nj += stride) {
140
+ int idx = -1;
141
+ if (ni == ci && nj == cj)
142
+ idx = seq;
143
+ else if (!(ni < 0 || ni >= height || nj < 0 || nj >= width)) {
144
+ if (boundary_info == -1)
145
+ idx = fetch_seq(grid, 0, ni, nj, t);
146
+ else if (boundary_info == 1)
147
+ idx = fetch_last_seq(grid, ni, nj, t);
148
+ else
149
+ idx = fetch_nearest_seq(grid, ni, nj, dim, pos[(dim + 2) % 3], t);
150
+ }
151
+ output_indices[top] = idx;
152
+ top += 1;
153
+ }
154
+ }
155
+ }
156
+
157
+ void DownsampleGrid(Grid& src, Grid& tar)
158
+ {
159
+ src.downsample_seq.resize(src.seq2grid.size(), -1);
160
+ tar.resolution = src.resolution / 2;
161
+ tar.stride = src.stride * 2;
162
+ float pos[3];
163
+ std::vector<int> seq2normal_count;
164
+ for (int i = 0; i < src.seq2grid.size(); ++i) {
165
+ key2pos(src.seq2grid[i], src.resolution, pos);
166
+ int k = pos2key(pos, tar.resolution);
167
+ int s = seq2normal_count.size();
168
+ if (!tar.grid2seq.count(k)) {
169
+ tar.grid2seq[k] = tar.seq2grid.size();
170
+ tar.seq2grid.emplace_back(k);
171
+ seq2normal_count.emplace_back(0);
172
+ seq2normal_count.emplace_back(0);
173
+ seq2normal_count.emplace_back(0);
174
+ //tar.seq2normal.emplace_back(src.seq2normal[i]);
175
+ } else {
176
+ s = tar.grid2seq[k] * 3;
177
+ }
178
+ seq2normal_count[s + src.seq2normal[i]] += 1;
179
+ src.downsample_seq[i] = tar.grid2seq[k];
180
+ }
181
+ tar.seq2normal.resize(seq2normal_count.size() / 3);
182
+ for (int i = 0; i < seq2normal_count.size(); i += 3) {
183
+ int t = 0;
184
+ for (int j = 1; j < 3; ++j) {
185
+ if (seq2normal_count[i + j] > seq2normal_count[i + t])
186
+ t = j;
187
+ }
188
+ tar.seq2normal[i / 3] = t;
189
+ }
190
+ }
191
+
192
+ void NeighborGrid(Grid& grid, std::vector<torch::Tensor> view_layer_positions, int v)
193
+ {
194
+ grid.seq2evencorner.resize(grid.seq2grid.size(), 0);
195
+ grid.seq2oddcorner.resize(grid.seq2grid.size(), 0);
196
+ std::unordered_set<int> visited_seq;
197
+ for (int vd = 0; vd < 3; ++vd) {
198
+ auto t = view_layer_positions[vd];
199
+ auto t0 = view_layer_positions[v];
200
+ int height = t.size(1);
201
+ int width = t.size(2);
202
+ int num_layers = t.size(0);
203
+ int num_view_layers = t0.size(0);
204
+ for (int i = 0; i < height; ++i) {
205
+ for (int j = 0; j < width; ++j) {
206
+ for (int l = 0; l < num_layers; ++l) {
207
+ int seq = fetch_seq(grid, l, i, j, t);
208
+ if (seq == -1)
209
+ break;
210
+ int dim = grid.seq2normal[seq];
211
+ if (dim != v)
212
+ continue;
213
+
214
+ float pos[3];
215
+ pos_from_seq(grid, seq, pos);
216
+
217
+ int ci = 0;
218
+ int cj = 0;
219
+ if (dim == 0) {
220
+ ci = (pos[1]/2+0.5)*height;
221
+ cj = (pos[0]/2+0.5)*width;
222
+ }
223
+ else if (dim == 1) {
224
+ ci = (pos[1]/2+0.5)*height;
225
+ cj = (pos[2]/2+0.5)*width;
226
+ }
227
+ else {
228
+ ci = (-pos[2]/2+0.5)*height;
229
+ cj = (pos[0]/2+0.5)*width;
230
+ }
231
+
232
+ if ((ci % (grid.stride * 2) < grid.stride) && (cj % (grid.stride * 2) >= grid.stride))
233
+ grid.seq2evencorner[seq] = 1;
234
+
235
+ if ((ci % (grid.stride * 2) >= grid.stride) && (cj % (grid.stride * 2) < grid.stride))
236
+ grid.seq2oddcorner[seq] = 1;
237
+
238
+ bool is_boundary = false;
239
+ if (vd == v) {
240
+ if (l == 0 || l == num_layers - 1)
241
+ is_boundary = true;
242
+ else {
243
+ int seq_new = fetch_seq(grid, l + 1, i, j, t);
244
+ if (seq_new == -1)
245
+ is_boundary = true;
246
+ }
247
+ }
248
+ int boundary_info = 0;
249
+ if (is_boundary && (l == 0))
250
+ boundary_info = -1;
251
+ else if (is_boundary)
252
+ boundary_info = 1;
253
+ if (visited_seq.count(seq))
254
+ continue;
255
+ visited_seq.insert(seq);
256
+
257
+ FetchNeighbor(grid, seq, pos, dim, boundary_info, view_layer_positions, &grid.seq2neighbor[seq * 9]);
258
+ }
259
+ }
260
+ }
261
+ }
262
+ }
263
+
264
+ void PadGrid(Grid& src, Grid& tar, std::vector<torch::Tensor>& view_layer_positions) {
265
+ auto& downsample_seq = src.downsample_seq;
266
+ auto& seq2evencorner = src.seq2evencorner;
267
+ auto& seq2oddcorner = src.seq2oddcorner;
268
+ int indices[9];
269
+ std::vector<int> mapped_even_corners(tar.seq2grid.size(), 0);
270
+ std::vector<int> mapped_odd_corners(tar.seq2grid.size(), 0);
271
+ for (int i = 0; i < downsample_seq.size(); ++i) {
272
+ if (seq2evencorner[i] > 0) {
273
+ mapped_even_corners[downsample_seq[i]] = 1;
274
+ }
275
+ if (seq2oddcorner[i] > 0) {
276
+ mapped_odd_corners[downsample_seq[i]] = 1;
277
+ }
278
+ }
279
+ auto& tar_seq2normal = tar.seq2normal;
280
+ auto& tar_seq2grid = tar.seq2grid;
281
+ for (int i = 0; i < tar_seq2grid.size(); ++i) {
282
+ if (mapped_even_corners[i] == 1 && mapped_odd_corners[i] == 1)
283
+ continue;
284
+ auto k = tar_seq2grid[i];
285
+ float p[3];
286
+ key2cornerpos(k, tar.resolution, p);
287
+
288
+ int src_key = pos2key(p, src.resolution);
289
+ if (!src.grid2seq.count(src_key)) {
290
+ int seq = src.seq2grid.size();
291
+ src.grid2seq[src_key] = seq;
292
+ src.seq2evencorner.emplace_back((mapped_even_corners[i] == 0));
293
+ src.seq2oddcorner.emplace_back((mapped_odd_corners[i] == 0));
294
+ src.seq2grid.emplace_back(src_key);
295
+ src.seq2normal.emplace_back(tar_seq2normal[i]);
296
+ FetchNeighbor(src, seq, p, tar_seq2normal[i], 0, view_layer_positions, indices);
297
+ for (int j = 0; j < 9; ++j) {
298
+ src.seq2neighbor.emplace_back(indices[j]);
299
+ }
300
+ src.downsample_seq.emplace_back(i);
301
+ } else {
302
+ int seq = src.grid2seq[src_key];
303
+ if (mapped_even_corners[i] == 0)
304
+ src.seq2evencorner[seq] = 1;
305
+ if (mapped_odd_corners[i] == 0)
306
+ src.seq2oddcorner[seq] = 1;
307
+ }
308
+ }
309
+ }
310
+
311
+ std::vector<std::vector<torch::Tensor>> build_hierarchy(std::vector<torch::Tensor> view_layer_positions,
312
+ std::vector<torch::Tensor> view_layer_normals, int num_level, int resolution)
313
+ {
314
+ if (view_layer_positions.size() != 3 || num_level < 1) {
315
+ printf("Alert! We require 3 layers and at least 1 level! (%d %d)\n", view_layer_positions.size(), num_level);
316
+ return {{},{},{},{}};
317
+ }
318
+
319
+ std::vector<Grid> grids;
320
+ grids.resize(num_level);
321
+
322
+ std::vector<float> seq2pos;
323
+ auto& seq2grid = grids[0].seq2grid;
324
+ auto& seq2normal = grids[0].seq2normal;
325
+ auto& grid2seq = grids[0].grid2seq;
326
+ grids[0].resolution = resolution;
327
+ grids[0].stride = 1;
328
+
329
+ auto int64_options = torch::TensorOptions().dtype(torch::kInt64).requires_grad(false);
330
+ auto float_options = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(false);
331
+
332
+ for (int v = 0; v < 3; ++v) {
333
+ int num_layers = view_layer_positions[v].size(0);
334
+ int height = view_layer_positions[v].size(1);
335
+ int width = view_layer_positions[v].size(2);
336
+ float* data = view_layer_positions[v].data_ptr<float>();
337
+ float* data_normal = view_layer_normals[v].data_ptr<float>();
338
+ for (int l = 0; l < num_layers; ++l) {
339
+ for (int i = 0; i < height; ++i) {
340
+ for (int j = 0; j < width; ++j) {
341
+ float* p = &data[(i * width + j) * 4];
342
+ float* n = &data_normal[(i * width + j) * 3];
343
+ if (p[3] == 0)
344
+ continue;
345
+ auto k = pos2key(p, resolution);
346
+ if (!grid2seq.count(k)) {
347
+ int dim = 0;
348
+ for (int d = 0; d < 3; ++d) {
349
+ if (std::abs(n[d]) > std::abs(n[dim]))
350
+ dim = d;
351
+ }
352
+ dim = (dim + 1) % 3;
353
+ grid2seq[k] = seq2grid.size();
354
+ seq2grid.emplace_back(k);
355
+ seq2pos.push_back(p[0]);
356
+ seq2pos.push_back(p[1]);
357
+ seq2pos.push_back(p[2]);
358
+ seq2normal.emplace_back(dim);
359
+ }
360
+ }
361
+ }
362
+ data += (height * width * 4);
363
+ data_normal += (height * width * 3);
364
+ }
365
+ }
366
+
367
+ for (int i = 0; i < num_level - 1; ++i) {
368
+ DownsampleGrid(grids[i], grids[i + 1]);
369
+ }
370
+
371
+ for (int l = 0; l < num_level; ++l) {
372
+ grids[l].seq2neighbor.resize(grids[l].seq2grid.size() * 9, -1);
373
+ grids[l].num_origin_seq = grids[l].seq2grid.size();
374
+ for (int d = 0; d < 3; ++d) {
375
+ NeighborGrid(grids[l], view_layer_positions, d);
376
+ }
377
+ }
378
+
379
+ for (int i = num_level - 2; i >= 0; --i) {
380
+ PadGrid(grids[i], grids[i + 1], view_layer_positions);
381
+ }
382
+ for (int i = grids[0].num_origin_seq; i < grids[0].seq2grid.size(); ++i) {
383
+ int k = grids[0].seq2grid[i];
384
+ float p[3];
385
+ key2pos(k, grids[0].resolution, p);
386
+ seq2pos.push_back(p[0]);
387
+ seq2pos.push_back(p[1]);
388
+ seq2pos.push_back(p[2]);
389
+ }
390
+
391
+ std::vector<torch::Tensor> texture_positions(2);
392
+ std::vector<torch::Tensor> grid_neighbors(grids.size());
393
+ std::vector<torch::Tensor> grid_downsamples(grids.size() - 1);
394
+ std::vector<torch::Tensor> grid_evencorners(grids.size());
395
+ std::vector<torch::Tensor> grid_oddcorners(grids.size());
396
+
397
+
398
+ texture_positions[0] = torch::zeros({static_cast<int64_t>(seq2pos.size() / 3), static_cast<int64_t>(3)}, float_options);
399
+ texture_positions[1] = torch::zeros({static_cast<int64_t>(seq2pos.size() / 3)}, float_options);
400
+ float* positions_out_ptr = texture_positions[0].data_ptr<float>();
401
+ memcpy(positions_out_ptr, seq2pos.data(), sizeof(float) * seq2pos.size());
402
+ positions_out_ptr = texture_positions[1].data_ptr<float>();
403
+ for (int i = 0; i < grids[0].seq2grid.size(); ++i) {
404
+ positions_out_ptr[i] = (i < grids[0].num_origin_seq);
405
+ }
406
+
407
+ for (int i = 0; i < grids.size(); ++i) {
408
+ grid_neighbors[i] = torch::zeros({static_cast<int64_t>(grids[i].seq2grid.size()), static_cast<int64_t>(9)}, int64_options);
409
+ int64_t* nptr = grid_neighbors[i].data_ptr<int64_t>();
410
+ for (int j = 0; j < grids[i].seq2neighbor.size(); ++j) {
411
+ nptr[j] = grids[i].seq2neighbor[j];
412
+ }
413
+
414
+ grid_evencorners[i] = torch::zeros({static_cast<int64_t>(grids[i].seq2evencorner.size())}, int64_options);
415
+ grid_oddcorners[i] = torch::zeros({static_cast<int64_t>(grids[i].seq2oddcorner.size())}, int64_options);
416
+ int64_t* dptr = grid_evencorners[i].data_ptr<int64_t>();
417
+ for (int j = 0; j < grids[i].seq2evencorner.size(); ++j) {
418
+ dptr[j] = grids[i].seq2evencorner[j];
419
+ }
420
+ dptr = grid_oddcorners[i].data_ptr<int64_t>();
421
+ for (int j = 0; j < grids[i].seq2oddcorner.size(); ++j) {
422
+ dptr[j] = grids[i].seq2oddcorner[j];
423
+ }
424
+ if (i + 1 < grids.size()) {
425
+ grid_downsamples[i] = torch::zeros({static_cast<int64_t>(grids[i].downsample_seq.size())}, int64_options);
426
+ int64_t* dptr = grid_downsamples[i].data_ptr<int64_t>();
427
+ for (int j = 0; j < grids[i].downsample_seq.size(); ++j) {
428
+ dptr[j] = grids[i].downsample_seq[j];
429
+ }
430
+ }
431
+
432
+ }
433
+ return {texture_positions, grid_neighbors, grid_downsamples, grid_evencorners, grid_oddcorners};
434
+ }
435
+
436
+ std::vector<std::vector<torch::Tensor>> build_hierarchy_with_feat(
437
+ std::vector<torch::Tensor> view_layer_positions,
438
+ std::vector<torch::Tensor> view_layer_normals,
439
+ std::vector<torch::Tensor> view_layer_feats,
440
+ int num_level, int resolution)
441
+ {
442
+ if (view_layer_positions.size() != 3 || num_level < 1) {
443
+ printf("Alert! We require 3 layers and at least 1 level! (%d %d)\n", view_layer_positions.size(), num_level);
444
+ return {{},{},{},{}};
445
+ }
446
+
447
+ std::vector<Grid> grids;
448
+ grids.resize(num_level);
449
+
450
+ std::vector<float> seq2pos;
451
+ std::vector<float> seq2feat;
452
+ auto& seq2grid = grids[0].seq2grid;
453
+ auto& seq2normal = grids[0].seq2normal;
454
+ auto& grid2seq = grids[0].grid2seq;
455
+ grids[0].resolution = resolution;
456
+ grids[0].stride = 1;
457
+
458
+ auto int64_options = torch::TensorOptions().dtype(torch::kInt64).requires_grad(false);
459
+ auto float_options = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(false);
460
+
461
+ int feat_channel = 3;
462
+ for (int v = 0; v < 3; ++v) {
463
+ int num_layers = view_layer_positions[v].size(0);
464
+ int height = view_layer_positions[v].size(1);
465
+ int width = view_layer_positions[v].size(2);
466
+ float* data = view_layer_positions[v].data_ptr<float>();
467
+ float* data_normal = view_layer_normals[v].data_ptr<float>();
468
+ float* data_feat = view_layer_feats[v].data_ptr<float>();
469
+ feat_channel = view_layer_feats[v].size(3);
470
+ for (int l = 0; l < num_layers; ++l) {
471
+ for (int i = 0; i < height; ++i) {
472
+ for (int j = 0; j < width; ++j) {
473
+ float* p = &data[(i * width + j) * 4];
474
+ float* n = &data_normal[(i * width + j) * 3];
475
+ float* f = &data_feat[(i * width + j) * feat_channel];
476
+ if (p[3] == 0)
477
+ continue;
478
+ auto k = pos2key(p, resolution);
479
+ if (!grid2seq.count(k)) {
480
+ int dim = 0;
481
+ for (int d = 0; d < 3; ++d) {
482
+ if (std::abs(n[d]) > std::abs(n[dim]))
483
+ dim = d;
484
+ }
485
+ dim = (dim + 1) % 3;
486
+ grid2seq[k] = seq2grid.size();
487
+ seq2grid.emplace_back(k);
488
+ seq2pos.push_back(p[0]);
489
+ seq2pos.push_back(p[1]);
490
+ seq2pos.push_back(p[2]);
491
+ for (int c = 0; c < feat_channel; ++c) {
492
+ seq2feat.emplace_back(f[c]);
493
+ }
494
+ seq2normal.emplace_back(dim);
495
+ }
496
+ }
497
+ }
498
+ data += (height * width * 4);
499
+ data_normal += (height * width * 3);
500
+ data_feat += (height * width * feat_channel);
501
+ }
502
+ }
503
+
504
+ for (int i = 0; i < num_level - 1; ++i) {
505
+ DownsampleGrid(grids[i], grids[i + 1]);
506
+ }
507
+
508
+ for (int l = 0; l < num_level; ++l) {
509
+ grids[l].seq2neighbor.resize(grids[l].seq2grid.size() * 9, -1);
510
+ grids[l].num_origin_seq = grids[l].seq2grid.size();
511
+ for (int d = 0; d < 3; ++d) {
512
+ NeighborGrid(grids[l], view_layer_positions, d);
513
+ }
514
+ }
515
+
516
+ for (int i = num_level - 2; i >= 0; --i) {
517
+ PadGrid(grids[i], grids[i + 1], view_layer_positions);
518
+ }
519
+ for (int i = grids[0].num_origin_seq; i < grids[0].seq2grid.size(); ++i) {
520
+ int k = grids[0].seq2grid[i];
521
+ float p[3];
522
+ key2pos(k, grids[0].resolution, p);
523
+ seq2pos.push_back(p[0]);
524
+ seq2pos.push_back(p[1]);
525
+ seq2pos.push_back(p[2]);
526
+ for (int c = 0; c < feat_channel; ++c) {
527
+ seq2feat.emplace_back(0.5);
528
+ }
529
+ }
530
+
531
+ std::vector<torch::Tensor> texture_positions(2);
532
+ std::vector<torch::Tensor> texture_feats(1);
533
+ std::vector<torch::Tensor> grid_neighbors(grids.size());
534
+ std::vector<torch::Tensor> grid_downsamples(grids.size() - 1);
535
+ std::vector<torch::Tensor> grid_evencorners(grids.size());
536
+ std::vector<torch::Tensor> grid_oddcorners(grids.size());
537
+
538
+ texture_positions[0] = torch::zeros({static_cast<int64_t>(seq2pos.size() / 3), static_cast<int64_t>(3)}, float_options);
539
+ texture_positions[1] = torch::zeros({static_cast<int64_t>(seq2pos.size() / 3)}, float_options);
540
+ texture_feats[0] = torch::zeros({static_cast<int64_t>(seq2feat.size() / feat_channel), static_cast<int64_t>(feat_channel)}, float_options);
541
+ float* positions_out_ptr = texture_positions[0].data_ptr<float>();
542
+ memcpy(positions_out_ptr, seq2pos.data(), sizeof(float) * seq2pos.size());
543
+ positions_out_ptr = texture_positions[1].data_ptr<float>();
544
+ for (int i = 0; i < grids[0].seq2grid.size(); ++i) {
545
+ positions_out_ptr[i] = (i < grids[0].num_origin_seq);
546
+ }
547
+ float* feats_out_ptr = texture_feats[0].data_ptr<float>();
548
+ memcpy(feats_out_ptr, seq2feat.data(), sizeof(float) * seq2feat.size());
549
+
550
+ for (int i = 0; i < grids.size(); ++i) {
551
+ grid_neighbors[i] = torch::zeros({static_cast<int64_t>(grids[i].seq2grid.size()), static_cast<int64_t>(9)}, int64_options);
552
+ int64_t* nptr = grid_neighbors[i].data_ptr<int64_t>();
553
+ for (int j = 0; j < grids[i].seq2neighbor.size(); ++j) {
554
+ nptr[j] = grids[i].seq2neighbor[j];
555
+ }
556
+ grid_evencorners[i] = torch::zeros({static_cast<int64_t>(grids[i].seq2evencorner.size())}, int64_options);
557
+ grid_oddcorners[i] = torch::zeros({static_cast<int64_t>(grids[i].seq2oddcorner.size())}, int64_options);
558
+ int64_t* dptr = grid_evencorners[i].data_ptr<int64_t>();
559
+ for (int j = 0; j < grids[i].seq2evencorner.size(); ++j) {
560
+ dptr[j] = grids[i].seq2evencorner[j];
561
+ }
562
+ dptr = grid_oddcorners[i].data_ptr<int64_t>();
563
+ for (int j = 0; j < grids[i].seq2oddcorner.size(); ++j) {
564
+ dptr[j] = grids[i].seq2oddcorner[j];
565
+ }
566
+ if (i + 1 < grids.size()) {
567
+ grid_downsamples[i] = torch::zeros({static_cast<int64_t>(grids[i].downsample_seq.size())}, int64_options);
568
+ int64_t* dptr = grid_downsamples[i].data_ptr<int64_t>();
569
+ for (int j = 0; j < grids[i].downsample_seq.size(); ++j) {
570
+ dptr[j] = grids[i].downsample_seq[j];
571
+ }
572
+ }
573
+ }
574
+ return {texture_positions, texture_feats, grid_neighbors, grid_downsamples, grid_evencorners, grid_oddcorners};
575
+ }
hy3dgen/texgen/custom_rasterizer/lib/custom_rasterizer_kernel/rasterizer.cpp ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "rasterizer.h"
2
+
3
+ void rasterizeTriangleCPU(int idx, float* vt0, float* vt1, float* vt2, int width, int height, INT64* zbuffer, float* d, float occlusion_truncation) {
4
+ float x_min = std::min(vt0[0], std::min(vt1[0],vt2[0]));
5
+ float x_max = std::max(vt0[0], std::max(vt1[0],vt2[0]));
6
+ float y_min = std::min(vt0[1], std::min(vt1[1],vt2[1]));
7
+ float y_max = std::max(vt0[1], std::max(vt1[1],vt2[1]));
8
+
9
+ for (int px = x_min; px < x_max + 1; ++px) {
10
+ if (px < 0 || px >= width)
11
+ continue;
12
+ for (int py = y_min; py < y_max + 1; ++py) {
13
+ if (py < 0 || py >= height)
14
+ continue;
15
+ float vt[2] = {px + 0.5f, py + 0.5f};
16
+ float baryCentricCoordinate[3];
17
+ calculateBarycentricCoordinate(vt0, vt1, vt2, vt, baryCentricCoordinate);
18
+ if (isBarycentricCoordInBounds(baryCentricCoordinate)) {
19
+ int pixel = py * width + px;
20
+ if (zbuffer == 0) {
21
+ zbuffer[pixel] = (INT64)(idx + 1);
22
+ continue;
23
+ }
24
+
25
+ float depth = baryCentricCoordinate[0] * vt0[2] + baryCentricCoordinate[1] * vt1[2] + baryCentricCoordinate[2] * vt2[2];
26
+ float depth_thres = 0;
27
+ if (d) {
28
+ depth_thres = d[pixel] * 0.49999f + 0.5f + occlusion_truncation;
29
+ }
30
+
31
+ int z_quantize = depth * (2<<17);
32
+ INT64 token = (INT64)z_quantize * MAXINT + (INT64)(idx + 1);
33
+ if (depth < depth_thres)
34
+ continue;
35
+ zbuffer[pixel] = std::min(zbuffer[pixel], token);
36
+ }
37
+ }
38
+ }
39
+ }
40
+
41
+ void barycentricFromImgcoordCPU(float* V, int* F, int* findices, INT64* zbuffer, int width, int height, int num_vertices, int num_faces,
42
+ float* barycentric_map, int pix)
43
+ {
44
+ INT64 f = zbuffer[pix] % MAXINT;
45
+ if (f == (MAXINT-1)) {
46
+ findices[pix] = 0;
47
+ barycentric_map[pix * 3] = 0;
48
+ barycentric_map[pix * 3 + 1] = 0;
49
+ barycentric_map[pix * 3 + 2] = 0;
50
+ return;
51
+ }
52
+ findices[pix] = f;
53
+ f -= 1;
54
+ float barycentric[3] = {0, 0, 0};
55
+ if (f >= 0) {
56
+ float vt[2] = {float(pix % width) + 0.5f, float(pix / width) + 0.5f};
57
+ float* vt0_ptr = V + (F[f * 3] * 4);
58
+ float* vt1_ptr = V + (F[f * 3 + 1] * 4);
59
+ float* vt2_ptr = V + (F[f * 3 + 2] * 4);
60
+
61
+ float vt0[2] = {(vt0_ptr[0] / vt0_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt0_ptr[1] / vt0_ptr[3]) * (height - 1) + 0.5f};
62
+ float vt1[2] = {(vt1_ptr[0] / vt1_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt1_ptr[1] / vt1_ptr[3]) * (height - 1) + 0.5f};
63
+ float vt2[2] = {(vt2_ptr[0] / vt2_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt2_ptr[1] / vt2_ptr[3]) * (height - 1) + 0.5f};
64
+
65
+ calculateBarycentricCoordinate(vt0, vt1, vt2, vt, barycentric);
66
+
67
+ barycentric[0] = barycentric[0] / vt0_ptr[3];
68
+ barycentric[1] = barycentric[1] / vt1_ptr[3];
69
+ barycentric[2] = barycentric[2] / vt2_ptr[3];
70
+ float w = 1.0f / (barycentric[0] + barycentric[1] + barycentric[2]);
71
+ barycentric[0] *= w;
72
+ barycentric[1] *= w;
73
+ barycentric[2] *= w;
74
+
75
+ }
76
+ barycentric_map[pix * 3] = barycentric[0];
77
+ barycentric_map[pix * 3 + 1] = barycentric[1];
78
+ barycentric_map[pix * 3 + 2] = barycentric[2];
79
+ }
80
+
81
+ void rasterizeImagecoordsKernelCPU(float* V, int* F, float* d, INT64* zbuffer, float occlusion_trunc, int width, int height, int num_vertices, int num_faces, int f)
82
+ {
83
+ float* vt0_ptr = V + (F[f * 3] * 4);
84
+ float* vt1_ptr = V + (F[f * 3 + 1] * 4);
85
+ float* vt2_ptr = V + (F[f * 3 + 2] * 4);
86
+
87
+ float vt0[3] = {(vt0_ptr[0] / vt0_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt0_ptr[1] / vt0_ptr[3]) * (height - 1) + 0.5f, vt0_ptr[2] / vt0_ptr[3] * 0.49999f + 0.5f};
88
+ float vt1[3] = {(vt1_ptr[0] / vt1_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt1_ptr[1] / vt1_ptr[3]) * (height - 1) + 0.5f, vt1_ptr[2] / vt1_ptr[3] * 0.49999f + 0.5f};
89
+ float vt2[3] = {(vt2_ptr[0] / vt2_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt2_ptr[1] / vt2_ptr[3]) * (height - 1) + 0.5f, vt2_ptr[2] / vt2_ptr[3] * 0.49999f + 0.5f};
90
+
91
+ rasterizeTriangleCPU(f, vt0, vt1, vt2, width, height, zbuffer, d, occlusion_trunc);
92
+ }
93
+
94
+ std::vector<torch::Tensor> rasterize_image_cpu(torch::Tensor V, torch::Tensor F, torch::Tensor D,
95
+ int width, int height, float occlusion_truncation, int use_depth_prior)
96
+ {
97
+ int num_faces = F.size(0);
98
+ int num_vertices = V.size(0);
99
+ auto options = torch::TensorOptions().dtype(torch::kInt32).requires_grad(false);
100
+ auto INT64_options = torch::TensorOptions().dtype(torch::kInt64).requires_grad(false);
101
+ auto findices = torch::zeros({height, width}, options);
102
+ INT64 maxint = (INT64)MAXINT * (INT64)MAXINT + (MAXINT - 1);
103
+ auto z_min = torch::ones({height, width}, INT64_options) * (int64_t)maxint;
104
+
105
+ if (!use_depth_prior) {
106
+ for (int i = 0; i < num_faces; ++i) {
107
+ rasterizeImagecoordsKernelCPU(V.data_ptr<float>(), F.data_ptr<int>(), 0,
108
+ (INT64*)z_min.data_ptr<int64_t>(), occlusion_truncation, width, height, num_vertices, num_faces, i);
109
+ }
110
+ } else {
111
+ for (int i = 0; i < num_faces; ++i)
112
+ rasterizeImagecoordsKernelCPU(V.data_ptr<float>(), F.data_ptr<int>(), D.data_ptr<float>(),
113
+ (INT64*)z_min.data_ptr<int64_t>(), occlusion_truncation, width, height, num_vertices, num_faces, i);
114
+ }
115
+
116
+ auto float_options = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(false);
117
+ auto barycentric = torch::zeros({height, width, 3}, float_options);
118
+ for (int i = 0; i < width * height; ++i)
119
+ barycentricFromImgcoordCPU(V.data_ptr<float>(), F.data_ptr<int>(),
120
+ findices.data_ptr<int>(), (INT64*)z_min.data_ptr<int64_t>(), width, height, num_vertices, num_faces, barycentric.data_ptr<float>(), i);
121
+
122
+ return {findices, barycentric};
123
+ }
124
+
125
+ std::vector<torch::Tensor> rasterize_image(torch::Tensor V, torch::Tensor F, torch::Tensor D,
126
+ int width, int height, float occlusion_truncation, int use_depth_prior)
127
+ {
128
+ int device_id = V.get_device();
129
+ if (device_id == -1)
130
+ return rasterize_image_cpu(V, F, D, width, height, occlusion_truncation, use_depth_prior);
131
+ else
132
+ return rasterize_image_gpu(V, F, D, width, height, occlusion_truncation, use_depth_prior);
133
+ }
134
+
135
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
136
+ m.def("rasterize_image", &rasterize_image, "Custom image rasterization");
137
+ m.def("build_hierarchy", &build_hierarchy, "Custom image rasterization");
138
+ m.def("build_hierarchy_with_feat", &build_hierarchy_with_feat, "Custom image rasterization");
139
+ }
hy3dgen/texgen/custom_rasterizer/lib/custom_rasterizer_kernel/rasterizer.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef RASTERIZER_H_
2
+ #define RASTERIZER_H_
3
+
4
+ #include <torch/extension.h>
5
+ #include <vector>
6
+ #include <ATen/ATen.h>
7
+ #include <ATen/cuda/CUDAContext.h> // For CUDA context
8
+ #include <cstdint>
9
+ #define INT64 uint64_t
10
+ #define MAXINT 2147483647
11
+
12
+ __host__ __device__ inline float calculateSignedArea2(float* a, float* b, float* c) {
13
+ return ((c[0] - a[0]) * (b[1] - a[1]) - (b[0] - a[0]) * (c[1] - a[1]));
14
+ }
15
+
16
+ __host__ __device__ inline void calculateBarycentricCoordinate(float* a, float* b, float* c, float* p,
17
+ float* barycentric)
18
+ {
19
+ float beta_tri = calculateSignedArea2(a, p, c);
20
+ float gamma_tri = calculateSignedArea2(a, b, p);
21
+ float area = calculateSignedArea2(a, b, c);
22
+ if (area == 0) {
23
+ barycentric[0] = -1.0;
24
+ barycentric[1] = -1.0;
25
+ barycentric[2] = -1.0;
26
+ return;
27
+ }
28
+ float tri_inv = 1.0 / area;
29
+ float beta = beta_tri * tri_inv;
30
+ float gamma = gamma_tri * tri_inv;
31
+ float alpha = 1.0 - beta - gamma;
32
+ barycentric[0] = alpha;
33
+ barycentric[1] = beta;
34
+ barycentric[2] = gamma;
35
+ }
36
+
37
+ __host__ __device__ inline bool isBarycentricCoordInBounds(float* barycentricCoord) {
38
+ return barycentricCoord[0] >= 0.0 && barycentricCoord[0] <= 1.0 &&
39
+ barycentricCoord[1] >= 0.0 && barycentricCoord[1] <= 1.0 &&
40
+ barycentricCoord[2] >= 0.0 && barycentricCoord[2] <= 1.0;
41
+ }
42
+
43
+ std::vector<torch::Tensor> rasterize_image_gpu(torch::Tensor V, torch::Tensor F, torch::Tensor D,
44
+ int width, int height, float occlusion_truncation, int use_depth_prior);
45
+
46
+ std::vector<std::vector<torch::Tensor>> build_hierarchy(std::vector<torch::Tensor> view_layer_positions, std::vector<torch::Tensor> view_layer_normals, int num_level, int resolution);
47
+
48
+ std::vector<std::vector<torch::Tensor>> build_hierarchy_with_feat(
49
+ std::vector<torch::Tensor> view_layer_positions,
50
+ std::vector<torch::Tensor> view_layer_normals,
51
+ std::vector<torch::Tensor> view_layer_feats,
52
+ int num_level, int resolution);
53
+
54
+ #endif
hy3dgen/texgen/custom_rasterizer/lib/custom_rasterizer_kernel/rasterizer_gpu.cu ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "rasterizer.h"
2
+
3
+ __device__ void rasterizeTriangleGPU(int idx, float* vt0, float* vt1, float* vt2, int width, int height, INT64* zbuffer, float* d, float occlusion_truncation) {
4
+ float x_min = std::min(vt0[0], std::min(vt1[0],vt2[0]));
5
+ float x_max = std::max(vt0[0], std::max(vt1[0],vt2[0]));
6
+ float y_min = std::min(vt0[1], std::min(vt1[1],vt2[1]));
7
+ float y_max = std::max(vt0[1], std::max(vt1[1],vt2[1]));
8
+
9
+ for (int px = x_min; px < x_max + 1; ++px) {
10
+ if (px < 0 || px >= width)
11
+ continue;
12
+ for (int py = y_min; py < y_max + 1; ++py) {
13
+ if (py < 0 || py >= height)
14
+ continue;
15
+ float vt[2] = {px + 0.5f, py + 0.5f};
16
+ float baryCentricCoordinate[3];
17
+ calculateBarycentricCoordinate(vt0, vt1, vt2, vt, baryCentricCoordinate);
18
+ if (isBarycentricCoordInBounds(baryCentricCoordinate)) {
19
+ int pixel = py * width + px;
20
+ if (zbuffer == 0) {
21
+ atomicExch(&zbuffer[pixel], (INT64)(idx + 1));
22
+ continue;
23
+ }
24
+ float depth = baryCentricCoordinate[0] * vt0[2] + baryCentricCoordinate[1] * vt1[2] + baryCentricCoordinate[2] * vt2[2];
25
+ float depth_thres = 0;
26
+ if (d) {
27
+ depth_thres = d[pixel] * 0.49999f + 0.5f + occlusion_truncation;
28
+ }
29
+
30
+ int z_quantize = depth * (2<<17);
31
+ INT64 token = (INT64)z_quantize * MAXINT + (INT64)(idx + 1);
32
+ if (depth < depth_thres)
33
+ continue;
34
+ atomicMin(&zbuffer[pixel], token);
35
+ }
36
+ }
37
+ }
38
+ }
39
+
40
+ __global__ void barycentricFromImgcoordGPU(float* V, int* F, int* findices, INT64* zbuffer, int width, int height, int num_vertices, int num_faces,
41
+ float* barycentric_map)
42
+ {
43
+ int pix = blockIdx.x * blockDim.x + threadIdx.x;
44
+ if (pix >= width * height)
45
+ return;
46
+ INT64 f = zbuffer[pix] % MAXINT;
47
+ if (f == (MAXINT-1)) {
48
+ findices[pix] = 0;
49
+ barycentric_map[pix * 3] = 0;
50
+ barycentric_map[pix * 3 + 1] = 0;
51
+ barycentric_map[pix * 3 + 2] = 0;
52
+ return;
53
+ }
54
+ findices[pix] = f;
55
+ f -= 1;
56
+ float barycentric[3] = {0, 0, 0};
57
+ if (f >= 0) {
58
+ float vt[2] = {float(pix % width) + 0.5f, float(pix / width) + 0.5f};
59
+ float* vt0_ptr = V + (F[f * 3] * 4);
60
+ float* vt1_ptr = V + (F[f * 3 + 1] * 4);
61
+ float* vt2_ptr = V + (F[f * 3 + 2] * 4);
62
+
63
+ float vt0[2] = {(vt0_ptr[0] / vt0_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt0_ptr[1] / vt0_ptr[3]) * (height - 1) + 0.5f};
64
+ float vt1[2] = {(vt1_ptr[0] / vt1_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt1_ptr[1] / vt1_ptr[3]) * (height - 1) + 0.5f};
65
+ float vt2[2] = {(vt2_ptr[0] / vt2_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt2_ptr[1] / vt2_ptr[3]) * (height - 1) + 0.5f};
66
+
67
+ calculateBarycentricCoordinate(vt0, vt1, vt2, vt, barycentric);
68
+
69
+ barycentric[0] = barycentric[0] / vt0_ptr[3];
70
+ barycentric[1] = barycentric[1] / vt1_ptr[3];
71
+ barycentric[2] = barycentric[2] / vt2_ptr[3];
72
+ float w = 1.0f / (barycentric[0] + barycentric[1] + barycentric[2]);
73
+ barycentric[0] *= w;
74
+ barycentric[1] *= w;
75
+ barycentric[2] *= w;
76
+
77
+ }
78
+ barycentric_map[pix * 3] = barycentric[0];
79
+ barycentric_map[pix * 3 + 1] = barycentric[1];
80
+ barycentric_map[pix * 3 + 2] = barycentric[2];
81
+ }
82
+
83
+ __global__ void rasterizeImagecoordsKernelGPU(float* V, int* F, float* d, INT64* zbuffer, float occlusion_trunc, int width, int height, int num_vertices, int num_faces)
84
+ {
85
+ int f = blockIdx.x * blockDim.x + threadIdx.x;
86
+ if (f >= num_faces)
87
+ return;
88
+
89
+ float* vt0_ptr = V + (F[f * 3] * 4);
90
+ float* vt1_ptr = V + (F[f * 3 + 1] * 4);
91
+ float* vt2_ptr = V + (F[f * 3 + 2] * 4);
92
+
93
+ float vt0[3] = {(vt0_ptr[0] / vt0_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt0_ptr[1] / vt0_ptr[3]) * (height - 1) + 0.5f, vt0_ptr[2] / vt0_ptr[3] * 0.49999f + 0.5f};
94
+ float vt1[3] = {(vt1_ptr[0] / vt1_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt1_ptr[1] / vt1_ptr[3]) * (height - 1) + 0.5f, vt1_ptr[2] / vt1_ptr[3] * 0.49999f + 0.5f};
95
+ float vt2[3] = {(vt2_ptr[0] / vt2_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt2_ptr[1] / vt2_ptr[3]) * (height - 1) + 0.5f, vt2_ptr[2] / vt2_ptr[3] * 0.49999f + 0.5f};
96
+
97
+ rasterizeTriangleGPU(f, vt0, vt1, vt2, width, height, zbuffer, d, occlusion_trunc);
98
+ }
99
+
100
+ std::vector<torch::Tensor> rasterize_image_gpu(torch::Tensor V, torch::Tensor F, torch::Tensor D,
101
+ int width, int height, float occlusion_truncation, int use_depth_prior)
102
+ {
103
+ int device_id = V.get_device();
104
+ cudaSetDevice(device_id);
105
+ int num_faces = F.size(0);
106
+ int num_vertices = V.size(0);
107
+ auto options = torch::TensorOptions().dtype(torch::kInt32).device(torch::kCUDA, device_id).requires_grad(false);
108
+ auto INT64_options = torch::TensorOptions().dtype(torch::kInt64).device(torch::kCUDA, device_id).requires_grad(false);
109
+ auto findices = torch::zeros({height, width}, options);
110
+ INT64 maxint = (INT64)MAXINT * (INT64)MAXINT + (MAXINT - 1);
111
+ auto z_min = torch::ones({height, width}, INT64_options) * (int64_t)maxint;
112
+
113
+ if (!use_depth_prior) {
114
+ rasterizeImagecoordsKernelGPU<<<(num_faces+255)/256,256,0,at::cuda::getCurrentCUDAStream()>>>(V.data_ptr<float>(), F.data_ptr<int>(), 0,
115
+ (INT64*)z_min.data_ptr<int64_t>(), occlusion_truncation, width, height, num_vertices, num_faces);
116
+ } else {
117
+ rasterizeImagecoordsKernelGPU<<<(num_faces+255)/256,256,0,at::cuda::getCurrentCUDAStream()>>>(V.data_ptr<float>(), F.data_ptr<int>(), D.data_ptr<float>(),
118
+ (INT64*)z_min.data_ptr<int64_t>(), occlusion_truncation, width, height, num_vertices, num_faces);
119
+ }
120
+
121
+ auto float_options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, device_id).requires_grad(false);
122
+ auto barycentric = torch::zeros({height, width, 3}, float_options);
123
+ barycentricFromImgcoordGPU<<<(width * height + 255)/256, 256>>>(V.data_ptr<float>(), F.data_ptr<int>(),
124
+ findices.data_ptr<int>(), (INT64*)z_min.data_ptr<int64_t>(), width, height, num_vertices, num_faces, barycentric.data_ptr<float>());
125
+
126
+ return {findices, barycentric};
127
+ }
hy3dgen/texgen/custom_rasterizer/setup.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, find_packages
2
+ from torch.utils.cpp_extension import BuildExtension, CUDAExtension
3
+
4
+ # build custom rasterizer
5
+ # build with `python setup.py install`
6
+ # nvcc is needed
7
+
8
+ custom_rasterizer_module = CUDAExtension('custom_rasterizer_kernel', [
9
+ 'lib/custom_rasterizer_kernel/rasterizer.cpp',
10
+ 'lib/custom_rasterizer_kernel/grid_neighbor.cpp',
11
+ 'lib/custom_rasterizer_kernel/rasterizer_gpu.cu',
12
+ ])
13
+
14
+ setup(
15
+ packages=find_packages(),
16
+ version='0.1',
17
+ name='custom_rasterizer',
18
+ include_package_data=True,
19
+ package_dir={'': '.'},
20
+ ext_modules=[
21
+ custom_rasterizer_module,
22
+ ],
23
+ cmdclass={
24
+ 'build_ext': BuildExtension
25
+ }
26
+ )
{build/lib/hy3dgen/texgen/hunyuanpaint β†’ hy3dgen/texgen/differentiable_renderer}/__init__.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/texgen/differentiable_renderer/camera_utils.py RENAMED
File without changes
hy3dgen/texgen/differentiable_renderer/compile_mesh_painter.bat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ FOR /F "tokens=*" %%i IN ('python -m pybind11 --includes') DO SET PYINCLUDES=%%i
2
+ echo %PYINCLUDES%
3
+ g++ -O3 -Wall -shared -std=c++11 -fPIC %PYINCLUDES% mesh_processor.cpp -o mesh_processor.pyd -lpython3.12
hy3dgen/texgen/differentiable_renderer/mesh_processor.cpp ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <vector>
2
+ #include <queue>
3
+ #include <cmath>
4
+ #include <algorithm>
5
+ #include <pybind11/pybind11.h>
6
+ #include <pybind11/numpy.h>
7
+ #include <pybind11/stl.h>
8
+
9
+ namespace py = pybind11;
10
+ using namespace std;
11
+
12
+ std::pair<py::array_t<float>,
13
+ py::array_t<uint8_t>> meshVerticeInpaint_smooth(py::array_t<float> texture,
14
+ py::array_t<uint8_t> mask,
15
+ py::array_t<float> vtx_pos, py::array_t<float> vtx_uv,
16
+ py::array_t<int> pos_idx, py::array_t<int> uv_idx) {
17
+ auto texture_buf = texture.request();
18
+ auto mask_buf = mask.request();
19
+ auto vtx_pos_buf = vtx_pos.request();
20
+ auto vtx_uv_buf = vtx_uv.request();
21
+ auto pos_idx_buf = pos_idx.request();
22
+ auto uv_idx_buf = uv_idx.request();
23
+
24
+ int texture_height = texture_buf.shape[0];
25
+ int texture_width = texture_buf.shape[1];
26
+ int texture_channel = texture_buf.shape[2];
27
+ float* texture_ptr = static_cast<float*>(texture_buf.ptr);
28
+ uint8_t* mask_ptr = static_cast<uint8_t*>(mask_buf.ptr);
29
+
30
+ int vtx_num = vtx_pos_buf.shape[0];
31
+ float* vtx_pos_ptr = static_cast<float*>(vtx_pos_buf.ptr);
32
+ float* vtx_uv_ptr = static_cast<float*>(vtx_uv_buf.ptr);
33
+ int* pos_idx_ptr = static_cast<int*>(pos_idx_buf.ptr);
34
+ int* uv_idx_ptr = static_cast<int*>(uv_idx_buf.ptr);
35
+
36
+ vector<float> vtx_mask(vtx_num, 0.0f);
37
+ vector<vector<float>> vtx_color(vtx_num, vector<float>(texture_channel, 0.0f));
38
+ vector<int> uncolored_vtxs;
39
+
40
+ vector<vector<int>> G(vtx_num);
41
+
42
+ for (int i = 0; i < uv_idx_buf.shape[0]; ++i) {
43
+ for (int k = 0; k < 3; ++k) {
44
+ int vtx_uv_idx = uv_idx_ptr[i * 3 + k];
45
+ int vtx_idx = pos_idx_ptr[i * 3 + k];
46
+ int uv_v = round(vtx_uv_ptr[vtx_uv_idx * 2] * (texture_width - 1));
47
+ int uv_u = round((1.0 - vtx_uv_ptr[vtx_uv_idx * 2 + 1]) * (texture_height - 1));
48
+
49
+ if (mask_ptr[uv_u * texture_width + uv_v] > 0) {
50
+ vtx_mask[vtx_idx] = 1.0f;
51
+ for (int c = 0; c < texture_channel; ++c) {
52
+ vtx_color[vtx_idx][c] = texture_ptr[(uv_u * texture_width + uv_v) * texture_channel + c];
53
+ }
54
+ }else{
55
+ uncolored_vtxs.push_back(vtx_idx);
56
+ }
57
+
58
+ G[pos_idx_ptr[i * 3 + k]].push_back(pos_idx_ptr[i * 3 + (k + 1) % 3]);
59
+ }
60
+ }
61
+
62
+ int smooth_count = 2;
63
+ int last_uncolored_vtx_count = 0;
64
+ while (smooth_count>0) {
65
+ int uncolored_vtx_count = 0;
66
+
67
+ for (int vtx_idx : uncolored_vtxs) {
68
+
69
+ vector<float> sum_color(texture_channel, 0.0f);
70
+ float total_weight = 0.0f;
71
+
72
+ array<float, 3> vtx_0 = {vtx_pos_ptr[vtx_idx * 3],
73
+ vtx_pos_ptr[vtx_idx * 3 + 1], vtx_pos_ptr[vtx_idx * 3 + 2]};
74
+ for (int connected_idx : G[vtx_idx]) {
75
+ if (vtx_mask[connected_idx] > 0) {
76
+ array<float, 3> vtx1 = {vtx_pos_ptr[connected_idx * 3],
77
+ vtx_pos_ptr[connected_idx * 3 + 1], vtx_pos_ptr[connected_idx * 3 + 2]};
78
+ float dist_weight = 1.0f / max(sqrt(pow(vtx_0[0] - vtx1[0], 2) + pow(vtx_0[1] - vtx1[1], 2) + \
79
+ pow(vtx_0[2] - vtx1[2], 2)), 1E-4);
80
+ dist_weight = dist_weight * dist_weight;
81
+ for (int c = 0; c < texture_channel; ++c) {
82
+ sum_color[c] += vtx_color[connected_idx][c] * dist_weight;
83
+ }
84
+ total_weight += dist_weight;
85
+ }
86
+ }
87
+
88
+ if (total_weight > 0.0f) {
89
+ for (int c = 0; c < texture_channel; ++c) {
90
+ vtx_color[vtx_idx][c] = sum_color[c] / total_weight;
91
+ }
92
+ vtx_mask[vtx_idx] = 1.0f;
93
+ } else {
94
+ uncolored_vtx_count++;
95
+ }
96
+
97
+ }
98
+
99
+ if(last_uncolored_vtx_count==uncolored_vtx_count){
100
+ smooth_count--;
101
+ }else{
102
+ smooth_count++;
103
+ }
104
+ last_uncolored_vtx_count = uncolored_vtx_count;
105
+ }
106
+
107
+ // Create new arrays for the output
108
+ py::array_t<float> new_texture(texture_buf.size);
109
+ py::array_t<uint8_t> new_mask(mask_buf.size);
110
+
111
+ auto new_texture_buf = new_texture.request();
112
+ auto new_mask_buf = new_mask.request();
113
+
114
+ float* new_texture_ptr = static_cast<float*>(new_texture_buf.ptr);
115
+ uint8_t* new_mask_ptr = static_cast<uint8_t*>(new_mask_buf.ptr);
116
+ // Copy original texture and mask to new arrays
117
+ std::copy(texture_ptr, texture_ptr + texture_buf.size, new_texture_ptr);
118
+ std::copy(mask_ptr, mask_ptr + mask_buf.size, new_mask_ptr);
119
+
120
+ for (int face_idx = 0; face_idx < uv_idx_buf.shape[0]; ++face_idx) {
121
+ for (int k = 0; k < 3; ++k) {
122
+ int vtx_uv_idx = uv_idx_ptr[face_idx * 3 + k];
123
+ int vtx_idx = pos_idx_ptr[face_idx * 3 + k];
124
+
125
+ if (vtx_mask[vtx_idx] == 1.0f) {
126
+ int uv_v = round(vtx_uv_ptr[vtx_uv_idx * 2] * (texture_width - 1));
127
+ int uv_u = round((1.0 - vtx_uv_ptr[vtx_uv_idx * 2 + 1]) * (texture_height - 1));
128
+
129
+ for (int c = 0; c < texture_channel; ++c) {
130
+ new_texture_ptr[(uv_u * texture_width + uv_v) * texture_channel + c] = vtx_color[vtx_idx][c];
131
+ }
132
+ new_mask_ptr[uv_u * texture_width + uv_v] = 255;
133
+ }
134
+ }
135
+ }
136
+
137
+ // Reshape the new arrays to match the original texture and mask shapes
138
+ new_texture.resize({texture_height, texture_width, 3});
139
+ new_mask.resize({texture_height, texture_width});
140
+ return std::make_pair(new_texture, new_mask);
141
+ }
142
+
143
+
144
+ std::pair<py::array_t<float>, py::array_t<uint8_t>> meshVerticeInpaint(py::array_t<float> texture,
145
+ py::array_t<uint8_t> mask,
146
+ py::array_t<float> vtx_pos, py::array_t<float> vtx_uv,
147
+ py::array_t<int> pos_idx, py::array_t<int> uv_idx, const std::string& method = "smooth") {
148
+ if (method == "smooth") {
149
+ return meshVerticeInpaint_smooth(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
150
+ } else {
151
+ throw std::invalid_argument("Invalid method. Use 'smooth' or 'forward'.");
152
+ }
153
+ }
154
+
155
+ PYBIND11_MODULE(mesh_processor, m) {
156
+ m.def("meshVerticeInpaint", &meshVerticeInpaint, "A function to process mesh",
157
+ py::arg("texture"), py::arg("mask"),
158
+ py::arg("vtx_pos"), py::arg("vtx_uv"),
159
+ py::arg("pos_idx"), py::arg("uv_idx"),
160
+ py::arg("method") = "smooth");
161
+ }
hy3dgen/texgen/differentiable_renderer/mesh_processor.egg-info/PKG-INFO ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.2
2
+ Name: mesh_processor
3
+ Version: 0.0.0
4
+ Requires-Python: >=3.6
5
+ Requires-Dist: pybind11>=2.6.0
6
+ Dynamic: requires-dist
7
+ Dynamic: requires-python
hy3dgen/texgen/differentiable_renderer/mesh_processor.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ mesh_processor.cpp
2
+ setup.py
3
+ mesh_processor.egg-info/PKG-INFO
4
+ mesh_processor.egg-info/SOURCES.txt
5
+ mesh_processor.egg-info/dependency_links.txt
6
+ mesh_processor.egg-info/requires.txt
7
+ mesh_processor.egg-info/top_level.txt
hy3dgen/texgen/differentiable_renderer/mesh_processor.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
hy3dgen/texgen/differentiable_renderer/mesh_processor.egg-info/requires.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ pybind11>=2.6.0
hy3dgen/texgen/differentiable_renderer/mesh_processor.egg-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ mesh_processor
{build/lib/hy3dgen β†’ hy3dgen}/texgen/differentiable_renderer/mesh_processor.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/texgen/differentiable_renderer/mesh_render.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/texgen/differentiable_renderer/mesh_utils.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/texgen/differentiable_renderer/setup.py RENAMED
File without changes
{build/lib/hy3dgen/texgen/hunyuanpaint/unet β†’ hy3dgen/texgen/hunyuanpaint}/__init__.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/texgen/hunyuanpaint/pipeline.py RENAMED
File without changes
{build/lib/hy3dgen/texgen/utils β†’ hy3dgen/texgen/hunyuanpaint/unet}/__init__.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/texgen/hunyuanpaint/unet/modules.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/texgen/pipelines.py RENAMED
@@ -61,7 +61,7 @@ class Hunyuan3DPaintPipeline:
61
  original_model_path = model_path
62
  if not os.path.exists(model_path):
63
  # try local path
64
- base_dir = os.environ.get('HY3DGEN_MODELS', '/content/hy3dgen')
65
  model_path = os.path.expanduser(os.path.join(base_dir, model_path))
66
 
67
  delight_model_path = os.path.join(model_path, 'hunyuan3d-delight-v2-0')
 
61
  original_model_path = model_path
62
  if not os.path.exists(model_path):
63
  # try local path
64
+ base_dir = os.environ.get('HY3DGEN_MODELS', '~/content/hy3dgen')
65
  model_path = os.path.expanduser(os.path.join(base_dir, model_path))
66
 
67
  delight_model_path = os.path.join(model_path, 'hunyuan3d-delight-v2-0')
hy3dgen/texgen/utils/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Open Source Model Licensed under the Apache License Version 2.0
2
+ # and Other Licenses of the Third-Party Components therein:
3
+ # The below Model in this distribution may have been modified by THL A29 Limited
4
+ # ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
5
+
6
+ # Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
7
+ # The below software and/or models in this distribution may have been
8
+ # modified by THL A29 Limited ("Tencent Modifications").
9
+ # All Tencent Modifications are Copyright (C) THL A29 Limited.
10
+
11
+ # Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
12
+ # except for the third-party components listed below.
13
+ # Hunyuan 3D does not impose any additional limitations beyond what is outlined
14
+ # in the repsective licenses of these third-party components.
15
+ # Users must comply with all terms and conditions of original licenses of these third-party
16
+ # components and must ensure that the usage of the third party components adheres to
17
+ # all relevant laws and regulations.
18
+
19
+ # For avoidance of doubts, Hunyuan 3D means the large language models and
20
+ # their software and algorithms, including trained model weights, parameters (including
21
+ # optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
22
+ # fine-tuning enabling code and other elements of the foregoing made publicly available
23
+ # by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
{build/lib/hy3dgen β†’ hy3dgen}/texgen/utils/alignImg4Tex_utils.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/texgen/utils/counter_utils.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/texgen/utils/dehighlight_utils.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/texgen/utils/multiview_utils.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/texgen/utils/simplify_mesh_utils.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/texgen/utils/uv_warp_utils.py RENAMED
File without changes
{build/lib/hy3dgen β†’ hy3dgen}/text2image.py RENAMED
File without changes