Coverage for src/bob/bio/face/pytorch/facexzoo/backbone_def.py: 84%
97 statements
« prev ^ index » next coverage.py v7.6.0, created at 2024-07-13 00:04 +0200
« prev ^ index » next coverage.py v7.6.0, created at 2024-07-13 00:04 +0200
1"""
2@author: Jun Wang
3@date: 20201019
4@contact: jun21wangustc@gmail.com
5"""
6import yaml
8from .AttentionNets import ResidualAttentionNet
9from .EfficientNets import EfficientNet, efficientnet
10from .GhostNet import GhostNet
11from .HRNet import HighResolutionNet
12from .MobileFaceNets import MobileFaceNet
13from .resnest.resnest import ResNeSt
14from .ResNets import Resnet
15from .ReXNets import ReXNetV1
16from .TF_NAS import TF_NAS_A
19class BackboneFactory:
20 """Factory to produce backbone according the backbone_conf.yaml.
22 Attributes:
23 backbone_type(str): which backbone will produce.
24 backbone_param(dict): parsed params and it's value.
25 """
27 def __init__(self, backbone_type, backbone_conf_file):
28 self.backbone_type = backbone_type
29 with open(backbone_conf_file) as f:
30 backbone_conf = yaml.load(f, yaml.Loader)
31 self.backbone_param = backbone_conf[backbone_type]
32 print("backbone param:")
33 print(self.backbone_param)
35 def get_backbone(self):
36 if self.backbone_type == "MobileFaceNet":
37 feat_dim = self.backbone_param[
38 "feat_dim"
39 ] # dimension of the output features, e.g. 512.
40 out_h = self.backbone_param[
41 "out_h"
42 ] # height of the feature map before the final features.
43 out_w = self.backbone_param[
44 "out_w"
45 ] # width of the feature map before the final features.
46 backbone = MobileFaceNet(feat_dim, out_h, out_w)
47 elif self.backbone_type == "ResNet":
48 depth = self.backbone_param[
49 "depth"
50 ] # depth of the ResNet, e.g. 50, 100, 152.
51 drop_ratio = self.backbone_param["drop_ratio"] # drop out ratio.
52 net_mode = self.backbone_param[
53 "net_mode"
54 ] # 'ir' for improved by resnt, 'ir_se' for SE-ResNet.
55 feat_dim = self.backbone_param[
56 "feat_dim"
57 ] # dimension of the output features, e.g. 512.
58 out_h = self.backbone_param[
59 "out_h"
60 ] # height of the feature map before the final features.
61 out_w = self.backbone_param[
62 "out_w"
63 ] # width of the feature map before the final features.
64 backbone = Resnet(
65 depth, drop_ratio, net_mode, feat_dim, out_h, out_w
66 )
67 elif self.backbone_type == "EfficientNet":
68 width = self.backbone_param[
69 "width"
70 ] # width for EfficientNet, e.g. 1.0, 1.2, 1.4, ...
71 depth = self.backbone_param[
72 "depth"
73 ] # depth for EfficientNet, e.g. 1.0, 1.2, 1.4, ...
74 image_size = self.backbone_param[
75 "image_size"
76 ] # input image size, e.g. 112.
77 drop_ratio = self.backbone_param["drop_ratio"] # drop out ratio.
78 out_h = self.backbone_param[
79 "out_h"
80 ] # height of the feature map before the final features.
81 out_w = self.backbone_param[
82 "out_w"
83 ] # width of the feature map before the final features.
84 feat_dim = self.backbone_param[
85 "feat_dim"
86 ] # dimension of the output features, e.g. 512.
87 blocks_args, global_params = efficientnet(
88 width_coefficient=width,
89 depth_coefficient=depth,
90 dropout_rate=drop_ratio,
91 image_size=image_size,
92 )
93 backbone = EfficientNet(
94 out_h, out_w, feat_dim, blocks_args, global_params
95 )
96 elif self.backbone_type == "HRNet":
97 config = {}
98 config["MODEL"] = self.backbone_param
99 backbone = HighResolutionNet(config)
100 elif self.backbone_type == "GhostNet":
101 width = self.backbone_param["width"]
102 drop_ratio = self.backbone_param["drop_ratio"] # drop out ratio.
103 feat_dim = self.backbone_param[
104 "feat_dim"
105 ] # dimension of the output features, e.g. 512.
106 out_h = self.backbone_param[
107 "out_h"
108 ] # height of the feature map before the final features.
109 out_w = self.backbone_param[
110 "out_w"
111 ] # width of the feature map before the final feature
112 backbone = GhostNet(width, drop_ratio, feat_dim, out_h, out_w)
113 elif self.backbone_type == "AttentionNet":
114 stage1_modules = self.backbone_param[
115 "stage1_modules"
116 ] # the number of attention modules in stage1.
117 stage2_modules = self.backbone_param[
118 "stage2_modules"
119 ] # the number of attention modules in stage2.
120 stage3_modules = self.backbone_param[
121 "stage3_modules"
122 ] # the number of attention modules in stage3.
123 feat_dim = self.backbone_param[
124 "feat_dim"
125 ] # dimension of the output features, e.g. 512.
126 out_h = self.backbone_param[
127 "out_h"
128 ] # height of the feature map before the final features.
129 out_w = self.backbone_param[
130 "out_w"
131 ] # width of the feature map before the final features.
132 backbone = ResidualAttentionNet(
133 stage1_modules,
134 stage2_modules,
135 stage3_modules,
136 feat_dim,
137 out_h,
138 out_w,
139 )
140 elif self.backbone_type == "AttentionNet_wj":
141 stage1_modules = self.backbone_param[
142 "stage1_modules"
143 ] # the number of attention modules in stage1.
144 stage2_modules = self.backbone_param[
145 "stage2_modules"
146 ] # the number of attention modules in stage2.
147 stage3_modules = self.backbone_param[
148 "stage3_modules"
149 ] # the number of attention modules in stage3.
150 image_size = self.backbone_param[
151 "image_size"
152 ] # input image size, e.g. 112.
153 feat_dim = self.backbone_param[
154 "feat_dim"
155 ] # dimension of the output features, e.g. 512.
156 out_h = self.backbone_param[
157 "out_h"
158 ] # height of the feature map before the final features.
159 out_w = self.backbone_param[
160 "out_w"
161 ] # width of the feature map before the final features.
162 # where was AttentionNet imported from?
163 backbone = """AttentionNet(
164 stage1_modules,
165 stage2_modules,
166 stage3_modules,
167 image_size,
168 feat_dim,
169 out_h,
170 out_w,
171 )"""
172 elif self.backbone_type == "TF-NAS":
173 drop_ratio = self.backbone_param["drop_ratio"] # drop out ratio.
174 out_h = self.backbone_param[
175 "out_h"
176 ] # height of the feature map before the final features.
177 out_w = self.backbone_param[
178 "out_w"
179 ] # width of the feature map before the final features.
180 feat_dim = self.backbone_param[
181 "feat_dim"
182 ] # dimension of the output features, e.g. 512.
183 backbone = TF_NAS_A(out_h, out_w, feat_dim, drop_ratio)
184 elif self.backbone_type == "ResNeSt":
185 depth = self.backbone_param[
186 "depth"
187 ] # depth of the ResNet, e.g. 50, 100, 152.
188 drop_ratio = self.backbone_param["drop_ratio"] # drop out ratio.
189 feat_dim = self.backbone_param[
190 "feat_dim"
191 ] # dimension of the output features, e.g. 512.
192 out_h = self.backbone_param[
193 "out_h"
194 ] # height of the feature map before the final features.
195 out_w = self.backbone_param[
196 "out_w"
197 ] # width of the feature map before the final features.
198 backbone = ResNeSt(depth, drop_ratio, feat_dim, out_h, out_w)
199 elif self.backbone_type == "ReXNet":
200 input_ch = self.backbone_param["input_ch"]
201 final_ch = self.backbone_param["final_ch"]
202 width_mult = self.backbone_param["width_mult"]
203 depth_mult = self.backbone_param["depth_mult"]
204 use_se = True if self.backbone_param["use_se"] == 1 else False
205 se_ratio = self.backbone_param["se_ratio"]
206 out_h = self.backbone_param["out_h"]
207 out_w = self.backbone_param["out_w"]
208 feat_dim = self.backbone_param["feat_dim"]
209 dropout_ratio = self.backbone_param["dropout_ratio"]
210 backbone = ReXNetV1(
211 input_ch,
212 final_ch,
213 width_mult,
214 depth_mult,
215 use_se,
216 se_ratio,
217 out_h,
218 out_w,
219 feat_dim,
220 dropout_ratio,
221 )
222 else:
223 pass
224 return backbone