Drexubery commited on
Commit
ae271f8
1 Parent(s): 1f198ea
Files changed (1) hide show
  1. viewcrafter.py +10 -12
viewcrafter.py CHANGED
@@ -26,7 +26,6 @@ from pytorch_lightning import seed_everything
26
  from utils.diffusion_utils import instantiate_from_config,load_model_checkpoint,image_guided_synthesis
27
  from pathlib import Path
28
  from torchvision.utils import save_image
29
- render_results = None
30
 
31
  class ViewCrafter:
32
  def __init__(self, opts, gradio = False):
@@ -372,24 +371,23 @@ class ViewCrafter:
372
  # self.img_ori: torch.Size([576, 1024, 3]), [0,1]
373
  # self.images, self.img_ori = self.load_initial_images(image_dir=i2v_input_image)
374
  self.run_dust3r(input_images=self.images)
375
- global render_results
376
  render_results = self.nvs_single_view(gradio=True)
377
- # save_video(render_results, os.path.join(self.opts.save_dir, 'render0.mp4'))
378
  traj_dir = os.path.join(self.opts.save_dir, "viz_traj.mp4")
379
  return traj_dir
380
 
381
  def run_gen(self,i2v_steps, i2v_seed):
382
  self.opts.ddim_steps = i2v_steps
383
  seed_everything(i2v_seed)
384
- # render_dir = os.path.join(self.opts.save_dir, 'render0.mp4')
385
- # video = imageio.get_reader(render_dir, 'ffmpeg')
386
- # frames = []
387
- # for frame in video:
388
- # frame = frame / 255.0
389
- # frames.append(frame)
390
- # frames = np.array(frames)
391
- # ##torch.Size([25, 576, 1024, 3])
392
- # render_results = torch.from_numpy(frames).to(self.device).half()
393
 
394
  gen_dir = os.path.join(self.opts.save_dir, "diffusion0.mp4")
395
  diffusion_results = self.run_diffusion(render_results)
 
26
  from utils.diffusion_utils import instantiate_from_config,load_model_checkpoint,image_guided_synthesis
27
  from pathlib import Path
28
  from torchvision.utils import save_image
 
29
 
30
  class ViewCrafter:
31
  def __init__(self, opts, gradio = False):
 
371
  # self.img_ori: torch.Size([576, 1024, 3]), [0,1]
372
  # self.images, self.img_ori = self.load_initial_images(image_dir=i2v_input_image)
373
  self.run_dust3r(input_images=self.images)
 
374
  render_results = self.nvs_single_view(gradio=True)
375
+ save_video(render_results, os.path.join(self.opts.save_dir, 'render0.mp4'))
376
  traj_dir = os.path.join(self.opts.save_dir, "viz_traj.mp4")
377
  return traj_dir
378
 
379
  def run_gen(self,i2v_steps, i2v_seed):
380
  self.opts.ddim_steps = i2v_steps
381
  seed_everything(i2v_seed)
382
+ render_dir = os.path.join(self.opts.save_dir, 'render0.mp4')
383
+ video = imageio.get_reader(render_dir, 'ffmpeg')
384
+ frames = []
385
+ for frame in video:
386
+ frame = frame / 255.0
387
+ frames.append(frame)
388
+ frames = np.array(frames)
389
+ ##torch.Size([25, 576, 1024, 3])
390
+ render_results = torch.from_numpy(frames).to(self.device).half()
391
 
392
  gen_dir = os.path.join(self.opts.save_dir, "diffusion0.mp4")
393
  diffusion_results = self.run_diffusion(render_results)