File size: 4,473 Bytes
afa894f
 
 
 
 
 
 
 
 
 
1a42758
afa894f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a42758
 
 
a54e4a9
afa894f
a54e4a9
1a42758
 
 
a54e4a9
 
 
 
 
 
 
1a42758
afa894f
 
1a42758
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import numpy as np
import cv2
from matplotlib import pyplot as plt
import torch
# In the below line,remove '.' while working on your local system.However Make sure that '.' is present before face_recognition_model while uploading to the server, Do not remove it.
from .exp_recognition_model import *
from PIL import Image
import base64
import io
import os
import pdb
## Add more imports if required

#############################################################################################################################
#   Caution: Don't change any of the filenames, function names and definitions                                              #
#   Always use the current_path + file_name for refering any files, without it we cannot access files on the server         #
#############################################################################################################################

# Current_path stores absolute path of the file from where it runs.
current_path = os.path.dirname(os.path.abspath(__file__))


#1) The below function is used to detect faces in the given image.
#2) It returns only one image which has maximum area out of all the detected faces in the photo.
#3) If no face is detected,then it returns zero(0).

def detected_face(image):
    eye_haar = current_path + '/haarcascade_eye.xml'
    face_haar = current_path + '/haarcascade_frontalface_default.xml'
    face_cascade = cv2.CascadeClassifier(face_haar)
    eye_cascade = cv2.CascadeClassifier(eye_haar)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    face_areas=[]
    images = []
    required_image=0
    for i, (x,y,w,h) in enumerate(faces):
        face_cropped = gray[y:y+h, x:x+w]
        face_areas.append(w*h)
        images.append(face_cropped)
        required_image = images[np.argmax(face_areas)]
        required_image = Image.fromarray(required_image)
    return required_image
    

#1) Images captured from mobile is passed as parameter to the below function in the API call, It returns the Expression detected by your network.
#2) The image is passed to the function in base64 encoding, Code for decoding the image is provided within the function.
#3) Define an object to your network here in the function and load the weight from the trained network, set it in evaluation mode.
#4) Perform necessary transformations to the input(detected face using the above function), this should return the Expression in string form ex: "Anger"
#5) For loading your model use the current_path+'your model file name', anyhow detailed example is given in comments to the function 
##Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
def get_expression(img):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
    ##########################################################################################
    ##Example for loading a model using weight state dictionary:                            ##
    ## face_det_net = facExpRec() #Example Network                                          ##
    ## model = torch.load(current_path + '/exp_recognition_net.t7', map_location=device)    ##
    ## face_det_net.load_state_dict(model['net_dict'])                                      ##
    ##                                                                                      ##
    ##current_path + '/<network_definition>' is path of the saved model if present in       ##
    ##the same path as this file, we recommend to put in the same directory                 ##
    ##########################################################################################
    ##########################################################################################
   
    face_det_net = facExpRec()                                         
    model = torch.load(current_path + '/face_expression.t7', map_location=device)    
    face_det_net.load_state_dict(model['net_dict'])
    face = detected_face(img)

    if face == 0:
        face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
   
    with torch.no_grad():
        face = trnscm(face)
        output = face_det_net(face)
        _, predicted = torch.max(output, 1)

    predicted_expression = classes[predicted.item()]

    
    # YOUR CODE HERE, return expression using your model

    return predicted_expression