Spaces:
Paused
Paused
Abdulrahman1989
commited on
Commit
·
b2ebb9a
1
Parent(s):
27bd645
Fix render
Browse files- Image3DProcessor.py +12 -12
- app.py +8 -9
Image3DProcessor.py
CHANGED
@@ -86,7 +86,7 @@ class Image3DProcessor:
|
|
86 |
world_view_transforms, full_proj_transforms, camera_centers = get_target_cameras()
|
87 |
background = torch.tensor([1, 1, 1], dtype=torch.float32, device=self.device)
|
88 |
loop_renders = []
|
89 |
-
t_to_512 = torchvision.transforms.Resize(512, interpolation=torchvision.transforms.InterpolationMode.
|
90 |
|
91 |
for r_idx in range(world_view_transforms.shape[0]):
|
92 |
rendered_image = render_predicted(
|
@@ -101,16 +101,16 @@ class Image3DProcessor:
|
|
101 |
rendered_image = t_to_512(rendered_image)
|
102 |
loop_renders.append(torch.clamp(rendered_image * 255, 0.0, 255.0).detach().permute(1, 2, 0).cpu().numpy().astype(np.uint8))
|
103 |
|
104 |
-
# Save
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
|
110 |
-
# Save
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
|
116 |
-
return mesh_data, video_data
|
|
|
86 |
world_view_transforms, full_proj_transforms, camera_centers = get_target_cameras()
|
87 |
background = torch.tensor([1, 1, 1], dtype=torch.float32, device=self.device)
|
88 |
loop_renders = []
|
89 |
+
t_to_512 = torchvision.transforms.Resize(512, interpolation=torchvision.transforms.InterpolationMode.LANCZOS)
|
90 |
|
91 |
for r_idx in range(world_view_transforms.shape[0]):
|
92 |
rendered_image = render_predicted(
|
|
|
101 |
rendered_image = t_to_512(rendered_image)
|
102 |
loop_renders.append(torch.clamp(rendered_image * 255, 0.0, 255.0).detach().permute(1, 2, 0).cpu().numpy().astype(np.uint8))
|
103 |
|
104 |
+
# Save video to a file and load its content
|
105 |
+
video_path = "loop_.mp4"
|
106 |
+
imageio.mimsave(video_path, loop_renders, fps=25)
|
107 |
+
with open(video_path, "rb") as video_file:
|
108 |
+
video_data = video_file.read()
|
109 |
|
110 |
+
# Save the mesh as a .ply file and load its content
|
111 |
+
mesh_path = "mesh.ply"
|
112 |
+
export_to_obj(reconstruction, mesh_path)
|
113 |
+
with open(mesh_path, "rb") as mesh_file:
|
114 |
+
mesh_data = mesh_file.read()
|
115 |
|
116 |
+
return mesh_data, video_data
|
app.py
CHANGED
@@ -18,9 +18,7 @@ class VideoGenerator:
|
|
18 |
|
19 |
def generate_3d_video(self, image):
|
20 |
# Process the image and create a 3D video and mesh
|
21 |
-
|
22 |
-
mesh_data, video_data = self.processor.reconstruct_and_export(processed_image)
|
23 |
-
|
24 |
return mesh_data, video_data
|
25 |
|
26 |
class GradioApp:
|
@@ -36,18 +34,19 @@ class GradioApp:
|
|
36 |
|
37 |
def full_pipeline(self, prompt):
|
38 |
initial_image = self.sdxl_generator.generate_images([prompt])[0]
|
|
|
39 |
mesh_data, video_data = self.video_generator.generate_3d_video(initial_image)
|
40 |
|
41 |
# Create temporary files to display mesh and video content
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
|
46 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as video_file:
|
47 |
video_file.write(video_data)
|
48 |
video_path = video_file.name
|
49 |
|
50 |
-
return initial_image, video_path
|
51 |
|
52 |
def launch(self):
|
53 |
interface = gr.Interface(
|
@@ -55,13 +54,13 @@ class GradioApp:
|
|
55 |
inputs=gr.Textbox(label="Input Prompt"),
|
56 |
outputs=[
|
57 |
gr.Image(label="Generated Image"),
|
58 |
-
|
59 |
gr.Video(label="3D Model Video")
|
60 |
],
|
61 |
title="SDXL to ControlNet to 3D Pipeline",
|
62 |
description="Generate an image using SDXL, refine it with ControlNet, and generate a 3D video output."
|
63 |
)
|
64 |
-
interface.launch(share=True)
|
65 |
|
66 |
if __name__ == "__main__":
|
67 |
app = GradioApp()
|
|
|
18 |
|
19 |
def generate_3d_video(self, image):
|
20 |
# Process the image and create a 3D video and mesh
|
21 |
+
mesh_data, video_data = self.processor.reconstruct_and_export(image)
|
|
|
|
|
22 |
return mesh_data, video_data
|
23 |
|
24 |
class GradioApp:
|
|
|
34 |
|
35 |
def full_pipeline(self, prompt):
|
36 |
initial_image = self.sdxl_generator.generate_images([prompt])[0]
|
37 |
+
# controlled_image = self.controlnet_processor.controlnet_image(initial_image)
|
38 |
mesh_data, video_data = self.video_generator.generate_3d_video(initial_image)
|
39 |
|
40 |
# Create temporary files to display mesh and video content
|
41 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".ply") as mesh_file:
|
42 |
+
mesh_file.write(mesh_data)
|
43 |
+
mesh_path = mesh_file.name
|
44 |
|
45 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as video_file:
|
46 |
video_file.write(video_data)
|
47 |
video_path = video_file.name
|
48 |
|
49 |
+
return initial_image, mesh_path, video_path
|
50 |
|
51 |
def launch(self):
|
52 |
interface = gr.Interface(
|
|
|
54 |
inputs=gr.Textbox(label="Input Prompt"),
|
55 |
outputs=[
|
56 |
gr.Image(label="Generated Image"),
|
57 |
+
gr.File(label="3D Mesh (.ply)"),
|
58 |
gr.Video(label="3D Model Video")
|
59 |
],
|
60 |
title="SDXL to ControlNet to 3D Pipeline",
|
61 |
description="Generate an image using SDXL, refine it with ControlNet, and generate a 3D video output."
|
62 |
)
|
63 |
+
interface.launch(share=True) # Added `share=True` for public link
|
64 |
|
65 |
if __name__ == "__main__":
|
66 |
app = GradioApp()
|