Spaces:
Running
on
Zero
Running
on
Zero
Update stf/stf-api-alternative/src/stf_alternative/preprocess_dir/utils/face_finder.py
Browse files
stf/stf-api-alternative/src/stf_alternative/preprocess_dir/utils/face_finder.py
CHANGED
@@ -30,7 +30,7 @@ def init_face_finder(device="cuda:0"):
|
|
30 |
if g_mtcnn is None and g_recognizer is None:
|
31 |
g_mtcnn = MTCNN(image_size=166, device=device)
|
32 |
print("load MTCNN ", "success ^ ^" if g_mtcnn is not None else "fail ㅠㅠ")
|
33 |
-
g_recognizer = InceptionResnetV1(pretrained="vggface2").eval().to(device)
|
34 |
print(
|
35 |
"load g_recognizer ",
|
36 |
"success ^ ^" if g_recognizer is not None else "fail ㅠㅠ",
|
@@ -70,11 +70,11 @@ def find_face(img):
|
|
70 |
img = imageio.imread(img)
|
71 |
frame = np.array(img)
|
72 |
df_non_face = pd.DataFrame({"box": [np.nan], "ebd": [np.nan]})
|
73 |
-
with torch.no_grad():
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
|
79 |
org = np.array(frame)
|
80 |
|
@@ -84,8 +84,8 @@ def find_face(img):
|
|
84 |
sz = g_mtcnn.image_size
|
85 |
resized = cv2.resize(crop, (sz, sz), cv2.INTER_AREA)
|
86 |
x = torchvision.transforms.functional.to_tensor(resized)
|
87 |
-
with torch.no_grad():
|
88 |
-
|
89 |
return ebd[0].cpu(), crop
|
90 |
|
91 |
def check_box(x1, y1, x2, y2):
|
@@ -103,7 +103,7 @@ def find_face(img):
|
|
103 |
class FaceFinder:
|
104 |
def __init__(self, device="cuda:0"):
|
105 |
self.mtcnn = MTCNN(image_size=166, device=device)
|
106 |
-
self.recognizer = InceptionResnetV1(pretrained="vggface2").eval().to(device)
|
107 |
self.device = device
|
108 |
self.tracker = cv2.TrackerCSRT_create() # cv2.legacy.TrackerMOSSE_create()
|
109 |
self.tracker_initialized = False
|
@@ -116,8 +116,8 @@ class FaceFinder:
|
|
116 |
sz = self.mtcnn.image_size
|
117 |
resized = cv2.resize(crop, (sz, sz), cv2.INTER_AREA)
|
118 |
x = torchvision.transforms.functional.to_tensor(resized)
|
119 |
-
with torch.no_grad():
|
120 |
-
|
121 |
return ebd[0].cpu()
|
122 |
|
123 |
def find_face(self, frame):
|
|
|
30 |
if g_mtcnn is None and g_recognizer is None:
|
31 |
g_mtcnn = MTCNN(image_size=166, device=device)
|
32 |
print("load MTCNN ", "success ^ ^" if g_mtcnn is not None else "fail ㅠㅠ")
|
33 |
+
g_recognizer = InceptionResnetV1(pretrained="vggface2").eval().cuda(0) # // .to(device)
|
34 |
print(
|
35 |
"load g_recognizer ",
|
36 |
"success ^ ^" if g_recognizer is not None else "fail ㅠㅠ",
|
|
|
70 |
img = imageio.imread(img)
|
71 |
frame = np.array(img)
|
72 |
df_non_face = pd.DataFrame({"box": [np.nan], "ebd": [np.nan]})
|
73 |
+
#with torch.no_grad():
|
74 |
+
boxes = g_mtcnn.detect(frame)
|
75 |
+
if boxes[0] is None:
|
76 |
+
return df_non_face, None
|
77 |
+
boxes = boxes[0].round().astype(np.int32)
|
78 |
|
79 |
org = np.array(frame)
|
80 |
|
|
|
84 |
sz = g_mtcnn.image_size
|
85 |
resized = cv2.resize(crop, (sz, sz), cv2.INTER_AREA)
|
86 |
x = torchvision.transforms.functional.to_tensor(resized)
|
87 |
+
#with torch.no_grad():
|
88 |
+
ebd = g_recognizer(x.unsqueeze(0).cuda(0)) # to(g_device))
|
89 |
return ebd[0].cpu(), crop
|
90 |
|
91 |
def check_box(x1, y1, x2, y2):
|
|
|
103 |
class FaceFinder:
|
104 |
def __init__(self, device="cuda:0"):
|
105 |
self.mtcnn = MTCNN(image_size=166, device=device)
|
106 |
+
self.recognizer = InceptionResnetV1(pretrained="vggface2").eval().cuda(0) #to(device)
|
107 |
self.device = device
|
108 |
self.tracker = cv2.TrackerCSRT_create() # cv2.legacy.TrackerMOSSE_create()
|
109 |
self.tracker_initialized = False
|
|
|
116 |
sz = self.mtcnn.image_size
|
117 |
resized = cv2.resize(crop, (sz, sz), cv2.INTER_AREA)
|
118 |
x = torchvision.transforms.functional.to_tensor(resized)
|
119 |
+
#with torch.no_grad():
|
120 |
+
ebd = self.recognizer(x.unsqueeze(0).cuda(0)) #to(g_device))
|
121 |
return ebd[0].cpu()
|
122 |
|
123 |
def find_face(self, frame):
|