Pallavi Bhoj commited on
Commit
1a42758
1 Parent(s): 8adf233

Update exp_recognition.py

Browse files
app/Hackathon_setup/exp_recognition.py CHANGED
@@ -8,6 +8,7 @@ from PIL import Image
8
  import base64
9
  import io
10
  import os
 
11
  ## Add more imports if required
12
 
13
  #############################################################################################################################
@@ -61,13 +62,15 @@ def get_expression(img):
61
  ##the same path as this file, we recommend to put in the same directory ##
62
  ##########################################################################################
63
  ##########################################################################################
64
- face_det_net = facExpRec()
65
- model = torch.load(current_path + '/exp_recognition_net.t7', map_location=device)
 
66
  face_det_net.load_state_dict(model['net_dict'])
67
  face = detected_face(img)
68
- if face==0:
69
- face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
70
 
 
 
 
71
  with torch.no_grad():
72
  face = trnscm(face)
73
  output = face_det_net(face)
@@ -75,7 +78,7 @@ def get_expression(img):
75
 
76
  predicted_expression = classes[predicted.item()]
77
 
 
78
  # YOUR CODE HERE, return expression using your model
79
 
80
- return predicted_expression
81
-
 
8
  import base64
9
  import io
10
  import os
11
+ import pdb
12
  ## Add more imports if required
13
 
14
  #############################################################################################################################
 
62
  ##the same path as this file, we recommend to put in the same directory ##
63
  ##########################################################################################
64
  ##########################################################################################
65
+
66
+ face_det_net = facExpRec()
67
+ model = torch.load(current_path + '/face_expression.t7', map_location=device)
68
  face_det_net.load_state_dict(model['net_dict'])
69
  face = detected_face(img)
 
 
70
 
71
+ if face == 0:
72
+ face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
73
+
74
  with torch.no_grad():
75
  face = trnscm(face)
76
  output = face_det_net(face)
 
78
 
79
  predicted_expression = classes[predicted.item()]
80
 
81
+
82
  # YOUR CODE HERE, return expression using your model
83
 
84
+ return predicted_expression
 
app/Hackathon_setup/exp_recognition_model.py CHANGED
@@ -6,8 +6,8 @@ import torch.nn.functional as F
6
  ## Add more imports if required
7
 
8
  ####################################################################################################################
9
- # Define your model and transform and all necessary helper functions here #
10
- # They will be imported to the exp_recognition.py file #
11
  ####################################################################################################################
12
 
13
  # Definition of classes as dictionary
@@ -18,44 +18,32 @@ class facExpRec(torch.nn.Module):
18
  def __init__(self):
19
  super(facExpRec, self).__init__()
20
  self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3)
21
-
22
  self.conv2 = nn.Conv2d(in_channels=16, out_channels=64, kernel_size=3)
23
-
24
  self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3)
25
-
26
- # Define the Fully connected layers
27
- # The output of the second convolution layer will be input to the first fully connected layer
28
- self.fc1 = nn.Linear(128 * 10 * 10, 256)
29
- # 256 input features, 128 output features
30
  self.fc2 = nn.Linear(256, 128)
31
- # 128 input features, 64 output features
32
  self.fc3 = nn.Linear(128, 64)
33
- # 64 input features, 7 output features for our 7 defined classes
34
  self.fc4 = nn.Linear(64, 7)
35
 
36
- # Max pooling
37
- self.pool = nn.MaxPool2d(kernel_size=2) # Max pooling layer with filter size 2x2
38
-
39
 
40
  def forward(self, x):
41
- x = self.pool(F.relu(self.conv1(x)))
42
- x = self.pool(F.relu(self.conv2(x)))
43
- x = self.pool(F.relu(self.conv3(x)))
44
- # Flatten the image
45
- x = x.view(-1, 128 * 10 * 10) # Output shape of convolutional layer is 16*5*5
46
-
47
- # Linear layers with RELU activation
48
- x = F.relu(self.fc1(x))
49
- x = F.relu(self.fc2(x))
50
- x = F.relu(self.fc3(x))
51
  x = self.fc4(x)
52
  x = F.log_softmax(x, dim=1)
53
  return x
54
-
55
-
56
-
57
- # remove 'pass' once you have written your code
58
- #YOUR CODE HERE
59
 
60
  # Sample Helper function
61
  def rgb2gray(image):
@@ -63,4 +51,4 @@ def rgb2gray(image):
63
 
64
  # Sample Transformation function
65
  #YOUR CODE HERE for changing the Transformation values.
66
- trnscm = transforms.Compose([rgb2gray, transforms.Resize((48,48)), transforms.ToTensor()])
 
6
  ## Add more imports if required
7
 
8
  ####################################################################################################################
9
+ # Define your model and transform and all necessary helper functions here #
10
+ # They will be imported to the exp_recognition.py file #
11
  ####################################################################################################################
12
 
13
  # Definition of classes as dictionary
 
18
  def __init__(self):
19
  super(facExpRec, self).__init__()
20
  self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3)
 
21
  self.conv2 = nn.Conv2d(in_channels=16, out_channels=64, kernel_size=3)
 
22
  self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3)
23
+ self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=1)
24
+ self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=1)
25
+ self.conv6 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=1)
26
+ self.fc1 = nn.Linear(1024 * 1 * 1, 256)
 
27
  self.fc2 = nn.Linear(256, 128)
 
28
  self.fc3 = nn.Linear(128, 64)
 
29
  self.fc4 = nn.Linear(64, 7)
30
 
31
+ self.pool = nn.MaxPool2d(kernel_size=2)
 
 
32
 
33
  def forward(self, x):
34
+ x = self.pool(F.elu(self.conv1(x)))
35
+ x = self.pool(F.elu(self.conv2(x)))
36
+ x = self.pool(F.elu(self.conv3(x)))
37
+ x = self.pool(F.elu(self.conv4(x)))
38
+ x = self.pool(F.elu(self.conv5(x)))
39
+ x = self.pool(F.elu(self.conv6(x)))
40
+ x = x.view(-1, 1024 * 1 * 1)
41
+ x = F.elu(self.fc1(x))
42
+ x = F.elu(self.fc2(x))
43
+ x = F.elu(self.fc3(x))
44
  x = self.fc4(x)
45
  x = F.log_softmax(x, dim=1)
46
  return x
 
 
 
 
 
47
 
48
  # Sample Helper function
49
  def rgb2gray(image):
 
51
 
52
  # Sample Transformation function
53
  #YOUR CODE HERE for changing the Transformation values.
54
+ trnscm = transforms.Compose([rgb2gray, transforms.Resize((100,100)), transforms.ToTensor()])
app/Hackathon_setup/exp_recognition_net.t7 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c4bdcc922b6a7eb0117e3c5da04e4ea074bb87b1d59f60ff99aeb9c33380a6b7
3
  size 13612996
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c9fd9f96e8fd42c67f0381ee4de320e0073f93877ac617bf1286356e262f99b
3
  size 13612996
app/Hackathon_setup/expression_model.t7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e259073cae1db728ace85d09969082ac6055d611269aa9f4e10de3245f63ee41
3
+ size 13612942