-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathmodel_ECA_in_front.py
More file actions
142 lines (98 loc) · 5.02 KB
/
model_ECA_in_front.py
File metadata and controls
142 lines (98 loc) · 5.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
'''
Aum Sri Sai Ram
Implementation of Compact Facial Expression Recognition Net by using ECA before feeding to Local and global context branches
Authors: Darshan Gera and Dr. S. Balasubramanian, SSSIHL
Date: 20-05-2021
Email: darshangera@sssihl.edu.in
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import pickle
from thop import profile
from light_cnn import LightCNN_29Layers_v2
class eca_layer(nn.Module):
def __init__(self, channel, k_size = 5):
super(eca_layer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# x: input features with shape [b, c, h, w]
b, c, h, w = x.size()
# feature descriptor on the global spatial information
y = self.avg_pool(x)
# Two different branches of ECA module
y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
# Multi-scale information fusion
y = self.sigmoid(y)
return x * y.expand_as(x)
class FERNet(nn.Module):
def __init__(self,num_classes=7, num_regions=4):
super(FERNet, self).__init__()
self.base= LightCNN_29Layers_v2(num_classes=7)
checkpoint = torch.load('pretrained/LightCNN_29Layers_V2_checkpoint.pth.tar')
pretrained_state_dict = dict(checkpoint['state_dict'])
keys = list(pretrained_state_dict.keys())
[pretrained_state_dict.pop(key) for key in keys if ('3' in key or '4' in key or 'fc' in key)] # for light cnn 29 v2
new_dict = dict(zip(list(self.base.state_dict().keys()), list(pretrained_state_dict.values())))
self.base.load_state_dict(new_dict, strict = True)
self.num_regions = num_regions
self.eca = eca_layer(192, 3)
self.globalavgpool = nn.ModuleList([nn.AdaptiveAvgPool2d(1) for i in range(num_regions+1)])
self.region_net = nn.ModuleList([ nn.Sequential( nn.Linear(192,256), nn.ReLU()) for i in range(num_regions+1)])
self.classifiers = nn.ModuleList([ nn.Linear(256+256, num_classes, bias = False) for i in range(num_regions+1)])
self.s = 30.0
def forward(self, x1, x2):
x1 = self.base(x1)
x2 = self.base(x2)
bs, c, w, h = x1.size()
region_size = int(x1.size(2) / (self.num_regions/2) )
x1 = self.eca(x1)
x2 = self.eca(x2)
patches1 = x1.unfold(2, region_size, region_size).unfold(3,region_size,region_size)
patches1 = patches1.contiguous().view(bs, c, -1, region_size, region_size).permute(0,2,1,3,4)
patches2 = x2.unfold(2, region_size, region_size).unfold(3,region_size,region_size)
patches2 = patches2.contiguous().view(bs, c, -1, region_size, region_size).permute(0,2,1,3,4)
output = []
for i in range(int(self.num_regions)):
f1 = patches1[:,i,:,:,:]
f1 = self.globalavgpool[i](f1).squeeze(3).squeeze(2)
f1 = self.region_net[i](f1)
f2 = patches2[:,i,:,:,:]
f2 = self.globalavgpool[i](f2).squeeze(3).squeeze(2)
f2 = self.region_net[i](f2)
f = torch.cat((f1,f2),dim=1)
for W in self.classifiers[i].parameters():
W = F.normalize(W, p=2, dim=1)
f = F.normalize(f, p=2, dim=1)
f = self.s * self.classifiers[i](f)
output.append(f)
output_stacked = torch.stack(output, dim = 2)
y1 = self.globalavgpool[4](x1).squeeze(3).squeeze(2)
y1 = self.region_net[4](y1)
y2 = self.globalavgpool[4](x2).squeeze(3).squeeze(2)
y2 = self.region_net[4](y2)
for W in self.classifiers[4].parameters():
W = F.normalize(W, p=2, dim=1)
y = torch.cat((y1,y2),dim=1)
y = F.normalize(y, p=2, dim=1)
output_global = self.classifiers[4](y).unsqueeze(2)
output_final = torch.cat((output_stacked,output_global),dim=2)
return output_final
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
if __name__=='__main__':
model = FERNet()
model = model.to('cuda')
print(count_parameters(model))
x = torch.rand(1, 1, 128, 128).to('cuda')
y = model(x, x)
print(y.size())
macs, params = profile(model, inputs=(x,x ))
print(macs,params)
'''
for name, param in model.named_parameters():
print(name, param.size())
'''