lemonaddie commited on
Commit
6e2b2c5
·
verified ·
1 Parent(s): 03d53ed

Update app_recon.py

Browse files
Files changed (1) hide show
  1. app_recon.py +20 -1
app_recon.py CHANGED
@@ -55,6 +55,25 @@ from torchvision.transforms import InterpolationMode
55
 
56
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  vae = AutoencoderKL.from_pretrained("./", subfolder='vae')
59
  scheduler = DDIMScheduler.from_pretrained("./", subfolder='scheduler')
60
  image_encoder = CLIPVisionModelWithProjection.from_pretrained("./", subfolder="image_encoder")
@@ -96,7 +115,7 @@ def sam_init():
96
  sam_checkpoint = os.path.join(os.path.dirname(__file__), "sam_vit_h_4b8939.pth")
97
 
98
  if not os.path.exists(sam_checkpoint):
99
- print("Downloading SAM-H checkpoint to " + os.path.exists(sam_checkpoint))
100
  os.system('wget -P' + os.path.dirname(__file__) + ' -nv https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth')
101
 
102
  model_type = "vit_h"
 
55
 
56
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
57
 
58
+ # Initialization.. Downloading the ckpts
59
+
60
+ image_encoder_checkpoint1 = os.path.join(os.path.dirname(__file__), "image_encoder", "model.safetensors")
61
+ image_encoder_checkpoint2 = os.path.join(os.path.dirname(__file__), "image_encoder", "pytorch_model.bin")
62
+ vae_checkpoint1 = os.path.join(os.path.dirname(__file__), "vae", "diffusion_pytorch_model.bin")
63
+ vae_checkpoint2 = os.path.join(os.path.dirname(__file__), "vae", "diffusion_pytorch_model.safetensors")
64
+ ckpt_lists = [image_encoder_checkpoint1, image_encoder_checkpoint2, vae_checkpoint1, vae_checkpoint2]
65
+
66
+ image_encoder_url1 = 'https://huggingface.co/lemonaddie/Geowizard/resolve/main/image_encoder/model.safetensors'
67
+ image_encoder_url2 = 'https://huggingface.co/lemonaddie/Geowizard/resolve/main/image_encoder/pytorch_model.bin'
68
+ vae_url1 = 'https://huggingface.co/lemonaddie/Geowizard/resolve/main/vae/diffusion_pytorch_model.bin'
69
+ vae_url2 = 'https://huggingface.co/lemonaddie/Geowizard/resolve/main/vae/diffusion_pytorch_model.safetensors'
70
+ url_lists = [image_encoder_url1, image_encoder_url2, vae_url1, vae_url2]
71
+
72
+ for ckpt_path, ckpt_url in zip(ckpt_lists, url_lists):
73
+ if not os.path.exists(ckpt_path):
74
+ print("Downloading to " + ckpt_path + "...")
75
+ os.system('wget -P' + os.path.dirname(ckpt_path) + ' -nv ' + ckpt_url)
76
+
77
  vae = AutoencoderKL.from_pretrained("./", subfolder='vae')
78
  scheduler = DDIMScheduler.from_pretrained("./", subfolder='scheduler')
79
  image_encoder = CLIPVisionModelWithProjection.from_pretrained("./", subfolder="image_encoder")
 
115
  sam_checkpoint = os.path.join(os.path.dirname(__file__), "sam_vit_h_4b8939.pth")
116
 
117
  if not os.path.exists(sam_checkpoint):
118
+ print("Downloading SAM-H checkpoint to " + sam_checkpoint)
119
  os.system('wget -P' + os.path.dirname(__file__) + ' -nv https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth')
120
 
121
  model_type = "vit_h"