Coverage for src/bob/bio/vein/script/blame.py: 0%
57 statements
« prev ^ index » next coverage.py v7.6.0, created at 2024-07-12 23:27 +0200
« prev ^ index » next coverage.py v7.6.0, created at 2024-07-12 23:27 +0200
1#!/usr/bin/env python
2# vim: set fileencoding=utf-8 :
3# Wed 18 Jan 2017 09:40:25 CET
6"""Evaluates best/worst performers in a run given original scores
8Usage: %(prog)s [-v...] [options] <score-file> [<score-file> ...]
9 %(prog)s --help
10 %(prog)s --version
13Arguments:
14 <score-file> Path to model-by-model score files for analysis
17Options:
18 -h, --help Shows this help message and exits
19 -V, --version Prints the version and exits
20 -v, --verbose Increases the output verbosity level
21 -c INT, --cases=INT Number of worst/best cases to show [default: 5]
24Examples:
26 1. Simple trial:
28 $ %(prog)s -vv model1.txt model2.txt
30 2. Change the number of cases to show:
32 $ %(prog)s -vv --cases=5 model*.txt
34"""
37import os
38import sys
40import clapper.logging
41import numpy
43logger = clapper.logging.setup("bob.bio.vein")
46def main(user_input=None):
47 if user_input is not None:
48 argv = user_input
49 else:
50 argv = sys.argv[1:]
52 import docopt
53 import pkg_resources
55 completions = dict(
56 prog=os.path.basename(sys.argv[0]),
57 version=pkg_resources.require("bob.bio.base")[0].version,
58 )
60 args = docopt.docopt(
61 __doc__ % completions,
62 argv=argv,
63 version=completions["version"],
64 )
66 # Sets-up logging
67 verbosity = int(args["--verbose"])
68 clapper.logging.set_verbosity_level(logger, verbosity)
70 # validates number of cases
71 cases = int(args["--cases"])
73 # generates a huge
74 from bob.bio.base.score.load import load_score
76 scores = []
77 names = {}
79 length = 0
80 for k in args["<score-file>"]:
81 model = os.path.splitext(os.path.basename(k))[0]
82 length = max(length, len(model))
84 for k in args["<score-file>"]:
85 model = os.path.splitext(os.path.basename(k))[0]
86 names[model] = k
87 logger.info("Loading score file `%s' for model `%s'..." % (k, model))
88 s = load_score(k)
90 # append a column with the model name
91 m = numpy.array(len(s) * [model], dtype="<U%d" % length)
92 new_dt = numpy.dtype(s.dtype.descr + [("model", m.dtype.descr)])
93 sp = numpy.zeros(s.shape, dtype=new_dt)
94 sp["claimed_id"] = s["claimed_id"]
95 sp["real_id"] = s["real_id"]
96 sp["test_label"] = s["test_label"]
97 sp["score"] = s["score"]
98 sp["model"] = m
100 # stack into the existing scores set
101 scores.append(sp)
103 scores = numpy.concatenate(scores)
104 genuines = scores[scores["claimed_id"] == scores["real_id"]]
105 genuines.sort(order="score") # ascending
106 impostors = scores[scores["claimed_id"] != scores["real_id"]]
107 impostors.sort(order="score") # ascending
109 # print
110 print("The %d worst genuine scores:" % cases)
111 for k in range(cases):
112 print(
113 " %d. model %s -> %s (%f)"
114 % (
115 k + 1,
116 genuines[k]["model"][0],
117 genuines[k]["test_label"],
118 genuines[k]["score"],
119 )
120 )
122 print("The %d best genuine scores:" % cases)
123 for k in range(cases):
124 pos = len(genuines) - k - 1
125 print(
126 " %d. model %s -> %s (%f)"
127 % (
128 k + 1,
129 genuines[pos]["model"][0],
130 genuines[pos]["test_label"],
131 genuines[pos]["score"],
132 )
133 )
135 print("The %d worst impostor scores:" % cases)
136 for k in range(cases):
137 pos = len(impostors) - k - 1
138 print(
139 " %d. model %s -> %s (%f)"
140 % (
141 k + 1,
142 impostors[pos]["model"][0],
143 impostors[pos]["test_label"],
144 impostors[pos]["score"],
145 )
146 )
148 print("The %d best impostor scores:" % cases)
149 for k in range(cases):
150 print(
151 " %d. model %s -> %s (%f)"
152 % (
153 k + 1,
154 impostors[k]["model"][0],
155 impostors[k]["test_label"],
156 impostors[k]["score"],
157 )
158 )
160 return 0