Coverage for src/bob/bio/face/pytorch/facexzoo/ResNets.py: 25%

76 statements  

« prev     ^ index     » next       coverage.py v7.6.0, created at 2024-07-13 00:04 +0200

1""" 

2@author: Jun Wang 

3@date: 20201019 

4@contact: jun21wangustc@gmail.com 

5""" 

6 

7# based on: 

8# https://github.com/TreB1eN/InsightFace_Pytorch/blob/master/model.py 

9 

10from collections import namedtuple 

11 

12from torch.nn import ( 

13 AdaptiveAvgPool2d, 

14 BatchNorm1d, 

15 BatchNorm2d, 

16 Conv2d, 

17 Dropout, 

18 Linear, 

19 MaxPool2d, 

20 Module, 

21 PReLU, 

22 ReLU, 

23 Sequential, 

24 Sigmoid, 

25) 

26 

27 

28class Flatten(Module): 

29 def forward(self, input): 

30 return input.view(input.size(0), -1) 

31 

32 

33class SEModule(Module): 

34 def __init__(self, channels, reduction): 

35 super(SEModule, self).__init__() 

36 self.avg_pool = AdaptiveAvgPool2d(1) 

37 self.fc1 = Conv2d( 

38 channels, 

39 channels // reduction, 

40 kernel_size=1, 

41 padding=0, 

42 bias=False, 

43 ) 

44 self.relu = ReLU(inplace=True) 

45 self.fc2 = Conv2d( 

46 channels // reduction, 

47 channels, 

48 kernel_size=1, 

49 padding=0, 

50 bias=False, 

51 ) 

52 self.sigmoid = Sigmoid() 

53 

54 def forward(self, x): 

55 module_input = x 

56 x = self.avg_pool(x) 

57 x = self.fc1(x) 

58 x = self.relu(x) 

59 x = self.fc2(x) 

60 x = self.sigmoid(x) 

61 return module_input * x 

62 

63 

64class bottleneck_IR(Module): 

65 def __init__(self, in_channel, depth, stride): 

66 super(bottleneck_IR, self).__init__() 

67 if in_channel == depth: 

68 self.shortcut_layer = MaxPool2d(1, stride) 

69 else: 

70 self.shortcut_layer = Sequential( 

71 Conv2d(in_channel, depth, (1, 1), stride, bias=False), 

72 BatchNorm2d(depth), 

73 ) 

74 self.res_layer = Sequential( 

75 BatchNorm2d(in_channel), 

76 Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), 

77 PReLU(depth), 

78 Conv2d(depth, depth, (3, 3), stride, 1, bias=False), 

79 BatchNorm2d(depth), 

80 ) 

81 

82 def forward(self, x): 

83 shortcut = self.shortcut_layer(x) 

84 res = self.res_layer(x) 

85 return res + shortcut 

86 

87 

88class bottleneck_IR_SE(Module): 

89 def __init__(self, in_channel, depth, stride): 

90 super(bottleneck_IR_SE, self).__init__() 

91 if in_channel == depth: 

92 self.shortcut_layer = MaxPool2d(1, stride) 

93 else: 

94 self.shortcut_layer = Sequential( 

95 Conv2d(in_channel, depth, (1, 1), stride, bias=False), 

96 BatchNorm2d(depth), 

97 ) 

98 self.res_layer = Sequential( 

99 BatchNorm2d(in_channel), 

100 Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), 

101 PReLU(depth), 

102 Conv2d(depth, depth, (3, 3), stride, 1, bias=False), 

103 BatchNorm2d(depth), 

104 SEModule(depth, 16), 

105 ) 

106 

107 def forward(self, x): 

108 shortcut = self.shortcut_layer(x) 

109 res = self.res_layer(x) 

110 return res + shortcut 

111 

112 

113class Bottleneck(namedtuple("Block", ["in_channel", "depth", "stride"])): 

114 """A named tuple describing a ResNet block.""" 

115 

116 

117def get_block(in_channel, depth, num_units, stride=2): 

118 return [Bottleneck(in_channel, depth, stride)] + [ 

119 Bottleneck(depth, depth, 1) for i in range(num_units - 1) 

120 ] 

121 

122 

123def get_blocks(num_layers): 

124 if num_layers == 50: 

125 blocks = [ 

126 get_block(in_channel=64, depth=64, num_units=3), 

127 get_block(in_channel=64, depth=128, num_units=4), 

128 get_block(in_channel=128, depth=256, num_units=14), 

129 get_block(in_channel=256, depth=512, num_units=3), 

130 ] 

131 elif num_layers == 100: 

132 blocks = [ 

133 get_block(in_channel=64, depth=64, num_units=3), 

134 get_block(in_channel=64, depth=128, num_units=13), 

135 get_block(in_channel=128, depth=256, num_units=30), 

136 get_block(in_channel=256, depth=512, num_units=3), 

137 ] 

138 elif num_layers == 152: 

139 blocks = [ 

140 get_block(in_channel=64, depth=64, num_units=3), 

141 get_block(in_channel=64, depth=128, num_units=8), 

142 get_block(in_channel=128, depth=256, num_units=36), 

143 get_block(in_channel=256, depth=512, num_units=3), 

144 ] 

145 return blocks 

146 

147 

148# class Backbone(Module): 

149class Resnet(Module): 

150 def __init__( 

151 self, num_layers, drop_ratio, mode="ir", feat_dim=512, out_h=7, out_w=7 

152 ): 

153 super(Resnet, self).__init__() 

154 assert num_layers in [ 

155 50, 

156 100, 

157 152, 

158 ], "num_layers should be 50,100, or 152" 

159 assert mode in ["ir", "ir_se"], "mode should be ir or ir_se" 

160 blocks = get_blocks(num_layers) 

161 if mode == "ir": 

162 unit_module = bottleneck_IR 

163 elif mode == "ir_se": 

164 unit_module = bottleneck_IR_SE 

165 self.input_layer = Sequential( 

166 Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64) 

167 ) 

168 self.output_layer = Sequential( 

169 BatchNorm2d(512), 

170 Dropout(drop_ratio), 

171 Flatten(), 

172 Linear(512 * out_h * out_w, feat_dim), # for eye 

173 BatchNorm1d(feat_dim), 

174 ) 

175 modules = [] 

176 for block in blocks: 

177 for bottleneck in block: 

178 modules.append( 

179 unit_module( 

180 bottleneck.in_channel, 

181 bottleneck.depth, 

182 bottleneck.stride, 

183 ) 

184 ) 

185 self.body = Sequential(*modules) 

186 

187 def forward(self, x): 

188 x = self.input_layer(x) 

189 x = self.body(x) 

190 x = self.output_layer(x) 

191 return x