Spaces:
Running
Running
Pallavi Bhoj
commited on
Commit
•
13ca877
1
Parent(s):
e1575d9
Update face_recognition_model.py
Browse files
app/Hackathon_setup/face_recognition.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import numpy as np
|
2 |
import cv2
|
3 |
from matplotlib import pyplot as plt
|
@@ -71,21 +72,30 @@ def get_similarity(img1, img2):
|
|
71 |
##the same path as this file, we recommend to put in the same directory ##
|
72 |
##########################################################################################
|
73 |
##########################################################################################
|
74 |
-
|
75 |
-
model = torch.load(current_path + '/siamese_model.t7', map_location=device)
|
76 |
-
feature_net.load_state_dict(model['net_dict'])
|
77 |
-
|
78 |
-
|
79 |
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
82 |
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
86 |
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
88 |
|
|
|
|
|
|
|
|
|
89 |
|
90 |
|
91 |
#1) Image captured from mobile is passed as parameter to this function in the API call, It returns the face class in the string form ex: "Person1"
|
|
|
1 |
+
from . import face_recognition_model
|
2 |
import numpy as np
|
3 |
import cv2
|
4 |
from matplotlib import pyplot as plt
|
|
|
72 |
##the same path as this file, we recommend to put in the same directory ##
|
73 |
##########################################################################################
|
74 |
##########################################################################################
|
75 |
+
|
|
|
|
|
|
|
|
|
76 |
|
77 |
+
feature_net = Siamese()
|
78 |
+
ckpt = torch.load(current_path + '/siamese_model.t7', map_location=device)
|
79 |
+
# model_path = current_path + "/Hackathon-setup/siamese_model.t7"
|
80 |
+
feature_net.load_state_dict(ckpt['net_dict'])
|
81 |
+
# model.eval()
|
82 |
|
83 |
+
with torch.no_grad():
|
84 |
+
output1, output2 = feature_net(face1.to(device), face2.to(device))
|
85 |
+
# Calculate similarity measure - for instance, using cosine similarity
|
86 |
+
euclidean_distance = F.pairwise_distance(output1, output2)
|
87 |
|
88 |
+
return euclidean_distance.item()
|
89 |
+
|
90 |
+
# ckpt = torch.load(current_path + "/Hackathon-setup/siamese_model.t7", map_location=device)
|
91 |
+
# # YOUR CODE HERE, load the model
|
92 |
+
# similarity_measure = ckpt(face1.to(device), face2.to(device))
|
93 |
+
# # YOUR CODE HERE, return similarity measure using your model
|
94 |
|
95 |
+
# return similarity_measure
|
96 |
+
|
97 |
+
|
98 |
+
# Load the Siamese network model
|
99 |
|
100 |
|
101 |
#1) Image captured from mobile is passed as parameter to this function in the API call, It returns the face class in the string form ex: "Person1"
|
app/Hackathon_setup/face_recognition_model.py
CHANGED
@@ -14,10 +14,9 @@ trnscm = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()
|
|
14 |
class Siamese(torch.nn.Module):
|
15 |
def __init__(self):
|
16 |
super(Siamese, self).__init__()
|
17 |
-
#
|
18 |
self.cnn1 = nn.Sequential(
|
19 |
-
nn.ReflectionPad2d(1),
|
20 |
-
# Pads the input tensor using the reflection of the input boundary, it similar to the padding.
|
21 |
nn.Conv2d(1, 4, kernel_size=3),
|
22 |
nn.ReLU(inplace=True),
|
23 |
nn.BatchNorm2d(4),
|
@@ -27,6 +26,7 @@ class Siamese(torch.nn.Module):
|
|
27 |
nn.ReLU(inplace=True),
|
28 |
nn.BatchNorm2d(8),
|
29 |
|
|
|
30 |
nn.ReflectionPad2d(1),
|
31 |
nn.Conv2d(8, 8, kernel_size=3),
|
32 |
nn.ReLU(inplace=True),
|
@@ -34,28 +34,26 @@ class Siamese(torch.nn.Module):
|
|
34 |
)
|
35 |
|
36 |
self.fc1 = nn.Sequential(
|
37 |
-
nn.Linear(8
|
38 |
nn.ReLU(inplace=True),
|
39 |
|
40 |
nn.Linear(500, 500),
|
41 |
nn.ReLU(inplace=True),
|
42 |
|
43 |
nn.Linear(500, 5))
|
44 |
-
#YOUR CODE HERE
|
45 |
|
46 |
-
# forward_once is for one image. This can be used while classifying the face images
|
47 |
def forward_once(self, x):
|
48 |
output = self.cnn1(x)
|
49 |
output = output.view(output.size()[0], -1)
|
50 |
output = self.fc1(output)
|
51 |
return output
|
52 |
|
53 |
-
def forward(self,
|
54 |
output1 = self.forward_once(input1)
|
55 |
output2 = self.forward_once(input2)
|
56 |
return output1, output2
|
57 |
-
|
58 |
-
|
59 |
|
60 |
##########################################################################################################
|
61 |
## Sample classification network (Specify if you are using a pytorch classifier during the training) ##
|
@@ -63,6 +61,14 @@ class Siamese(torch.nn.Module):
|
|
63 |
##########################################################################################################
|
64 |
|
65 |
# YOUR CODE HERE for pytorch classifier
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
# Definition of classes as dictionary
|
68 |
-
classes = ['person1','person2','person3','person4','person5','person6'
|
|
|
14 |
class Siamese(torch.nn.Module):
|
15 |
def __init__(self):
|
16 |
super(Siamese, self).__init__()
|
17 |
+
#YOUR CODE HERE
|
18 |
self.cnn1 = nn.Sequential(
|
19 |
+
nn.ReflectionPad2d(1), #Pads the input tensor using the reflection of the input boundary, it similar to the padding.
|
|
|
20 |
nn.Conv2d(1, 4, kernel_size=3),
|
21 |
nn.ReLU(inplace=True),
|
22 |
nn.BatchNorm2d(4),
|
|
|
26 |
nn.ReLU(inplace=True),
|
27 |
nn.BatchNorm2d(8),
|
28 |
|
29 |
+
|
30 |
nn.ReflectionPad2d(1),
|
31 |
nn.Conv2d(8, 8, kernel_size=3),
|
32 |
nn.ReLU(inplace=True),
|
|
|
34 |
)
|
35 |
|
36 |
self.fc1 = nn.Sequential(
|
37 |
+
nn.Linear(8*100*100, 500),
|
38 |
nn.ReLU(inplace=True),
|
39 |
|
40 |
nn.Linear(500, 500),
|
41 |
nn.ReLU(inplace=True),
|
42 |
|
43 |
nn.Linear(500, 5))
|
|
|
44 |
|
|
|
45 |
def forward_once(self, x):
|
46 |
output = self.cnn1(x)
|
47 |
output = output.view(output.size()[0], -1)
|
48 |
output = self.fc1(output)
|
49 |
return output
|
50 |
|
51 |
+
def forward(self, input1, input2):
|
52 |
output1 = self.forward_once(input1)
|
53 |
output2 = self.forward_once(input2)
|
54 |
return output1, output2
|
55 |
+
|
56 |
+
|
57 |
|
58 |
##########################################################################################################
|
59 |
## Sample classification network (Specify if you are using a pytorch classifier during the training) ##
|
|
|
61 |
##########################################################################################################
|
62 |
|
63 |
# YOUR CODE HERE for pytorch classifier
|
64 |
+
num_of_classes = 6
|
65 |
+
classifier = nn.Sequential(nn.Linear(256, 64),
|
66 |
+
nn.BatchNorm1d(64),
|
67 |
+
nn.ReLU(),
|
68 |
+
nn.Linear(64, 32),
|
69 |
+
nn.BatchNorm1d(32),
|
70 |
+
nn.ReLU(),
|
71 |
+
nn.Linear(32, num_of_classes))
|
72 |
|
73 |
# Definition of classes as dictionary
|
74 |
+
classes = ['person1','person2','person3','person4','person5','person6']
|