Spaces:
Sleeping
Sleeping
update code
Browse files
app.py
CHANGED
@@ -88,8 +88,8 @@ def calculate_sigmoid_focal_loss(inputs, targets, num_masks = 1, alpha: float =
|
|
88 |
|
89 |
def inference(ic_image, ic_mask, image1, image2):
|
90 |
# in context image and mask
|
91 |
-
ic_image =
|
92 |
-
|
93 |
|
94 |
sam_type, sam_ckpt = 'vit_h', 'sam_vit_h_4b8939.pth'
|
95 |
sam = sam_model_registry[sam_type](checkpoint=sam_ckpt).cuda()
|
@@ -114,7 +114,7 @@ def inference(ic_image, ic_mask, image1, image2):
|
|
114 |
|
115 |
for test_image in [image1, image2]:
|
116 |
print("======> Testing Image" )
|
117 |
-
test_image =
|
118 |
|
119 |
# Image feature encoding
|
120 |
predictor.set_image(test_image)
|
@@ -188,8 +188,8 @@ def inference_scribble(image, image1, image2):
|
|
188 |
# in context image and mask
|
189 |
ic_image = image["image"]
|
190 |
ic_mask = image["mask"]
|
191 |
-
ic_image =
|
192 |
-
|
193 |
|
194 |
sam_type, sam_ckpt = 'vit_h', 'sam_vit_h_4b8939.pth'
|
195 |
sam = sam_model_registry[sam_type](checkpoint=sam_ckpt).cuda()
|
@@ -214,7 +214,7 @@ def inference_scribble(image, image1, image2):
|
|
214 |
|
215 |
for test_image in [image1, image2]:
|
216 |
print("======> Testing Image" )
|
217 |
-
test_image =
|
218 |
|
219 |
# Image feature encoding
|
220 |
predictor.set_image(test_image)
|
@@ -286,8 +286,8 @@ def inference_scribble(image, image1, image2):
|
|
286 |
|
287 |
def inference_finetune(ic_image, ic_mask, image1, image2):
|
288 |
# in context image and mask
|
289 |
-
ic_image =
|
290 |
-
|
291 |
|
292 |
gt_mask = torch.tensor(ic_mask)[:, :, 0] > 0
|
293 |
gt_mask = gt_mask.float().unsqueeze(0).flatten(1).cuda()
|
@@ -377,7 +377,7 @@ def inference_finetune(ic_image, ic_mask, image1, image2):
|
|
377 |
output_image = []
|
378 |
|
379 |
for test_image in [image1, image2]:
|
380 |
-
test_image =
|
381 |
|
382 |
# Image feature encoding
|
383 |
predictor.set_image(test_image)
|
@@ -542,3 +542,4 @@ with demo:
|
|
542 |
)
|
543 |
|
544 |
demo.launch(enable_queue=False)
|
|
|
|
88 |
|
89 |
def inference(ic_image, ic_mask, image1, image2):
|
90 |
# in context image and mask
|
91 |
+
ic_image = cv2.cvtColor(ic_image, cv2.COLOR_BGR2RGB)
|
92 |
+
ic_make = cv2.cvtColor(ic_image,cv2.COLOR_BGR2RGB)
|
93 |
|
94 |
sam_type, sam_ckpt = 'vit_h', 'sam_vit_h_4b8939.pth'
|
95 |
sam = sam_model_registry[sam_type](checkpoint=sam_ckpt).cuda()
|
|
|
114 |
|
115 |
for test_image in [image1, image2]:
|
116 |
print("======> Testing Image" )
|
117 |
+
test_image = cv2.cvtColor(test_image, cv2.COLOR_BGR2RGB)
|
118 |
|
119 |
# Image feature encoding
|
120 |
predictor.set_image(test_image)
|
|
|
188 |
# in context image and mask
|
189 |
ic_image = image["image"]
|
190 |
ic_mask = image["mask"]
|
191 |
+
ic_image = cv2.cvtColor(ic_image, cv2.COLOR_BGR2RGB)
|
192 |
+
ic_make = cv2.cvtColor(ic_image,cv2.COLOR_BGR2RGB)
|
193 |
|
194 |
sam_type, sam_ckpt = 'vit_h', 'sam_vit_h_4b8939.pth'
|
195 |
sam = sam_model_registry[sam_type](checkpoint=sam_ckpt).cuda()
|
|
|
214 |
|
215 |
for test_image in [image1, image2]:
|
216 |
print("======> Testing Image" )
|
217 |
+
test_image = cv2.cvtColor(test_image, cv2.COLOR_BGR2RGB)
|
218 |
|
219 |
# Image feature encoding
|
220 |
predictor.set_image(test_image)
|
|
|
286 |
|
287 |
def inference_finetune(ic_image, ic_mask, image1, image2):
|
288 |
# in context image and mask
|
289 |
+
ic_image = cv2.cvtColor(ic_image, cv2.COLOR_BGR2RGB)
|
290 |
+
ic_make = cv2.cvtColor(ic_image,cv2.COLOR_BGR2RGB)
|
291 |
|
292 |
gt_mask = torch.tensor(ic_mask)[:, :, 0] > 0
|
293 |
gt_mask = gt_mask.float().unsqueeze(0).flatten(1).cuda()
|
|
|
377 |
output_image = []
|
378 |
|
379 |
for test_image in [image1, image2]:
|
380 |
+
test_image = cv2.cvtColor(test_image, cv2.COLOR_BGR2RGB)
|
381 |
|
382 |
# Image feature encoding
|
383 |
predictor.set_image(test_image)
|
|
|
542 |
)
|
543 |
|
544 |
demo.launch(enable_queue=False)
|
545 |
+
|