Coverage for src/bob/bio/face/script/face_plots_commands.py: 0%

123 statements  

« prev     ^ index     » next       coverage.py v7.6.0, created at 2024-07-13 00:04 +0200

1import click 

2import numpy as np 

3 

4from bob.bio.face.reports.arface import arface_report 

5from bob.bio.face.reports.gbu import gbu_report 

6from bob.bio.face.reports.ijbc import ijbc_report 

7from bob.bio.face.reports.mobio import mobio_report 

8from bob.bio.face.reports.multipie import ( 

9 multipie_expression_report, 

10 multipie_pose_report, 

11) 

12from bob.bio.face.reports.scface import scface_report 

13from bob.measure.script import common_options 

14 

15 

16@click.command() 

17@common_options.scores_argument(nargs=-1) 

18@common_options.titles_option() 

19@common_options.eval_option() 

20@common_options.output_plot_file_option(default_out="multipie.pdf") 

21@click.pass_context 

22@click.option( 

23 "--optimal-threshold", 

24 is_flag=True, 

25 help="BE CAREFUL. If this flag is set, it will compute the decision threshold for each sub-protocol.", 

26) 

27@click.option( 

28 "--threshold-eval", 

29 is_flag=True, 

30 help="BE CAREFUL. If this flag is set, it will compute the decision threshold using the evaluation set.", 

31) 

32@click.option( 

33 "--fmr-operational-threshold", 

34 default=1e-3, 

35 help="FMR operational point used to compute FNMR and FMR on the evaluation set", 

36) 

37def multipie_pose( 

38 ctx, 

39 scores, 

40 evaluation, 

41 output, 

42 titles, 

43 optimal_threshold, 

44 threshold_eval, 

45 fmr_operational_threshold, 

46 **kargs, 

47): 

48 """plots the multipie POSE report""" 

49 

50 if len(scores) // 2 != len(titles): 

51 raise ValueError( 

52 "Number of scores doesn't match the number of titles. It should be one pair of score files (`dev` and `eval` scores) for one title." 

53 ) 

54 

55 scores = np.array(scores, dtype="object") 

56 

57 if evaluation: 

58 scores_dev = scores[[i for i in list(range(len(scores))) if i % 2 == 0]] 

59 scores_eval = scores[ 

60 [i for i in list(range(len(scores))) if i % 2 != 0] 

61 ] 

62 else: 

63 scores_dev = scores 

64 scores_eval = None 

65 

66 multipie_pose_report( 

67 scores_dev, 

68 scores_eval, 

69 output, 

70 titles, 

71 figsize=(8, 4), 

72 optimal_threshold=optimal_threshold, 

73 threshold_eval=threshold_eval, 

74 fmr_threshold=fmr_operational_threshold, 

75 ) 

76 

77 pass 

78 

79 

80@click.command() 

81@common_options.scores_argument(nargs=-1) 

82@common_options.titles_option() 

83@common_options.eval_option() 

84@common_options.output_plot_file_option(default_out="multipie_expression.pdf") 

85@click.option( 

86 "--fmr-operational-threshold", 

87 default=1e-3, 

88 help="FMR operational point used to compute FNMR and FMR on the evaluation set", 

89) 

90@click.pass_context 

91def multipie_expression( 

92 ctx, scores, evaluation, output, titles, fmr_operational_threshold, **kargs 

93): 

94 """plots the multipie EXPRESSION report""" 

95 

96 if len(scores) // 2 != len(titles): 

97 raise ValueError( 

98 "Number of scores doesn't match the number of titles. It should be one pair of score files (`dev` and `eval` scores) for one title." 

99 ) 

100 

101 scores = np.array(scores, dtype="object") 

102 

103 if evaluation: 

104 scores_dev = scores[[i for i in list(range(len(scores))) if i % 2 == 0]] 

105 scores_eval = scores[ 

106 [i for i in list(range(len(scores))) if i % 2 != 0] 

107 ] 

108 else: 

109 scores_dev = scores 

110 scores_eval = None 

111 

112 multipie_expression_report( 

113 scores_dev, 

114 scores_eval, 

115 output, 

116 titles, 

117 figsize=(8, 4), 

118 fmr_threshold=fmr_operational_threshold, 

119 ) 

120 

121 pass 

122 

123 

124@click.command() 

125@common_options.scores_argument(nargs=-1) 

126@common_options.titles_option() 

127@common_options.eval_option() 

128@common_options.output_plot_file_option(default_out="multipie.pdf") 

129@click.option( 

130 "--fmr-operational-threshold", 

131 default=1e-3, 

132 help="FMR operational point used to compute FNMR and FMR on the evaluation set", 

133) 

134@click.pass_context 

135def scface_distance( 

136 ctx, scores, evaluation, output, titles, fmr_operational_threshold, **kargs 

137): 

138 """plots the SCFace multi distance""" 

139 

140 if len(scores) // 2 != len(titles): 

141 raise ValueError( 

142 "Number of scores doesn't match the number of titles. It should be one pair of score files (`dev` and `eval` scores) for one title." 

143 ) 

144 

145 scores = np.array(scores, dtype="object") 

146 

147 if evaluation: 

148 scores_dev = scores[[i for i in list(range(len(scores))) if i % 2 == 0]] 

149 scores_eval = scores[ 

150 [i for i in list(range(len(scores))) if i % 2 != 0] 

151 ] 

152 else: 

153 scores_dev = scores 

154 scores_eval = None 

155 

156 scface_report( 

157 scores_dev, 

158 scores_eval, 

159 output, 

160 titles, 

161 figsize=(8, 4), 

162 fmr_threshold=fmr_operational_threshold, 

163 ) 

164 

165 pass 

166 

167 

168@click.command() 

169@common_options.scores_argument(nargs=-1) 

170@common_options.titles_option() 

171@common_options.eval_option() 

172@common_options.output_plot_file_option(default_out="arface.pdf") 

173@click.option( 

174 "--fmr-operational-threshold", 

175 default=1e-3, 

176 help="FMR operational point used to compute FNMR and FMR on the evaluation set", 

177) 

178@click.pass_context 

179def arface( 

180 ctx, scores, evaluation, output, titles, fmr_operational_threshold, **kargs 

181): 

182 """plots with the arface experiments""" 

183 

184 if len(scores) // 2 != len(titles): 

185 raise ValueError( 

186 "Number of scores doesn't match the number of titles. It should be one pair of score files (`dev` and `eval` scores) for one title." 

187 ) 

188 

189 scores = np.array(scores, dtype="object") 

190 

191 if evaluation: 

192 scores_dev = scores[[i for i in list(range(len(scores))) if i % 2 == 0]] 

193 scores_eval = scores[ 

194 [i for i in list(range(len(scores))) if i % 2 != 0] 

195 ] 

196 else: 

197 scores_dev = scores 

198 scores_eval = None 

199 

200 arface_report( 

201 scores_dev, 

202 scores_eval, 

203 output, 

204 titles, 

205 figsize=(8, 4.5), 

206 fmr_threshold=fmr_operational_threshold, 

207 ) 

208 

209 pass 

210 

211 

212@click.command() 

213@common_options.scores_argument(nargs=-1) 

214@common_options.titles_option() 

215@common_options.eval_option() 

216@common_options.output_plot_file_option(default_out="mobio_gender.pdf") 

217@click.option( 

218 "--fmr-operational-threshold", 

219 default=1e-3, 

220 help="FMR operational point used to compute FNMR and FMR on the evaluation set", 

221) 

222@click.pass_context 

223def mobio_gender( 

224 ctx, scores, evaluation, output, titles, fmr_operational_threshold, **kargs 

225): 

226 """plots with the arface experiments""" 

227 

228 if len(scores) // 2 != len(titles): 

229 raise ValueError( 

230 "Number of scores doesn't match the number of titles. It should be one pair of score files (`dev` and `eval` scores) for one title." 

231 ) 

232 

233 scores = np.array(scores, dtype="object") 

234 

235 if evaluation: 

236 scores_dev = scores[[i for i in list(range(len(scores))) if i % 2 == 0]] 

237 scores_eval = scores[ 

238 [i for i in list(range(len(scores))) if i % 2 != 0] 

239 ] 

240 else: 

241 scores_dev = scores 

242 scores_eval = None 

243 

244 mobio_report( 

245 scores_dev, 

246 scores_eval, 

247 output, 

248 titles, 

249 figsize=(8, 4), 

250 fmr_threshold=fmr_operational_threshold, 

251 ) 

252 

253 pass 

254 

255 

256@click.command() 

257@common_options.scores_argument(nargs=-1) 

258@common_options.titles_option() 

259@common_options.output_plot_file_option(default_out="GBU.pdf") 

260@click.pass_context 

261def gbu(ctx, scores, output, titles, **kargs): 

262 """plots with the GBU experiments""" 

263 

264 if len(scores) != len(titles): 

265 raise ValueError("Number of scores doesn't match the number of titles") 

266 

267 scores = np.array(scores, dtype="object") 

268 

269 gbu_report(scores, output, titles, figsize=(8, 4)) 

270 

271 pass 

272 

273 

274@click.command() 

275@common_options.scores_argument(nargs=-1) 

276@common_options.titles_option() 

277@common_options.output_plot_file_option(default_out="ijb-c.pdf") 

278@click.pass_context 

279def ijbc(ctx, scores, output, titles, **kargs): 

280 """plots with the IJB-C experiments""" 

281 

282 if len(scores) != len(titles): 

283 raise ValueError("Number of scores doesn't match the number of titles") 

284 

285 scores = np.array(scores, dtype="object") 

286 

287 ijbc_report(scores, output, titles, figsize=(8, 6)) 

288 

289 pass