Spaces:
Running
Running
import torch | |
import torchvision | |
import torch.nn as nn | |
from torchvision import transforms | |
import torch.nn.functional as F | |
## Add more imports if required | |
#################################################################################################################### | |
# Define your model and transform and all necessary helper functions here # | |
# They will be imported to the exp_recognition.py file # | |
#################################################################################################################### | |
# Definition of classes as dictionary | |
classes = {0: 'ANGER', 1: 'DISGUST', 2: 'FEAR', 3: 'HAPPINESS', 4: 'NEUTRAL', 5: 'SADNESS', 6: 'SURPRISE'} | |
# Example Network | |
class facExpRec(torch.nn.Module): | |
def __init__(self): | |
super(facExpRec, self).__init__() | |
self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3) | |
self.conv2 = nn.Conv2d(in_channels=16, out_channels=64, kernel_size=3) | |
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3) | |
self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=1) | |
self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=1) | |
self.conv6 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=1) | |
self.fc1 = nn.Linear(1024 * 1 * 1, 256) | |
self.fc2 = nn.Linear(256, 128) | |
self.fc3 = nn.Linear(128, 64) | |
self.fc4 = nn.Linear(64, 7) | |
self.pool = nn.MaxPool2d(kernel_size=2) | |
def forward(self, x): | |
x = self.pool(F.elu(self.conv1(x))) | |
x = self.pool(F.elu(self.conv2(x))) | |
x = self.pool(F.elu(self.conv3(x))) | |
x = self.pool(F.elu(self.conv4(x))) | |
x = self.pool(F.elu(self.conv5(x))) | |
x = self.pool(F.elu(self.conv6(x))) | |
x = x.view(-1, 1024 * 1 * 1) | |
x = F.elu(self.fc1(x)) | |
x = F.elu(self.fc2(x)) | |
x = F.elu(self.fc3(x)) | |
x = self.fc4(x) | |
x = F.log_softmax(x, dim=1) | |
return x | |
# Sample Helper function | |
def rgb2gray(image): | |
return image.convert('L') | |
# Sample Transformation function | |
#YOUR CODE HERE for changing the Transformation values. | |
trnscm = transforms.Compose([rgb2gray, transforms.Resize((100,100)), transforms.ToTensor()]) |