Coverage for src/bob/bio/vein/algorithm/MiuraMatch.py: 68%

31 statements  

« prev     ^ index     » next       coverage.py v7.6.0, created at 2024-07-12 23:27 +0200

1#!/usr/bin/env python 

2# vim: set fileencoding=utf-8 : 

3 

4import numpy 

5import scipy.signal 

6 

7from bob.bio.base.pipelines import BioAlgorithm 

8 

9 

10class MiuraMatch(BioAlgorithm): 

11 """Finger vein matching: match ratio via cross-correlation 

12 

13 The method is based on "cross-correlation" between a model and a probe image. 

14 It convolves the binary image(s) representing the model with the binary image 

15 representing the probe (rotated by 180 degrees), and evaluates how they 

16 cross-correlate. If the model and probe are very similar, the output of the 

17 correlation corresponds to a single scalar and approaches a maximum. The 

18 value is then normalized by the sum of the pixels lit in both binary images. 

19 Therefore, the output of this method is a floating-point number in the range 

20 :math:`[0, 0.5]`. The higher, the better match. 

21 

22 In case model and probe represent images from the same vein structure, but 

23 are misaligned, the output is not guaranteed to be accurate. To mitigate this 

24 aspect, Miura et al. proposed to add a *small* cropping factor to the model 

25 image, assuming not much information is available on the borders (``ch``, for 

26 the vertical direction and ``cw``, for the horizontal direction). This allows 

27 the convolution to yield searches for different areas in the probe image. The 

28 maximum value is then taken from the resulting operation. The convolution 

29 result is normalized by the pixels lit in both the cropped model image and 

30 the matching pixels on the probe that yield the maximum on the resulting 

31 convolution. 

32 

33 For this to work properly, input images are supposed to be binary in nature, 

34 with zeros and ones. 

35 

36 Based on [MNM04]_ and [MNM05]_ 

37 

38 Parameters: 

39 

40 ch (:py:class:`int`, optional): Maximum search displacement in y-direction. 

41 

42 cw (:py:class:`int`, optional): Maximum search displacement in x-direction. 

43 

44 """ 

45 

46 def __init__( 

47 self, 

48 ch=80, # Maximum search displacement in y-direction 

49 cw=90, # Maximum search displacement in x-direction 

50 probes_score_fusion="max", 

51 enrolls_score_fusion="mean", 

52 **kwargs, 

53 ): 

54 super().__init__( 

55 probes_score_fusion=probes_score_fusion, 

56 enrolls_score_fusion=enrolls_score_fusion, 

57 **kwargs, 

58 ) 

59 

60 self.ch = ch 

61 self.cw = cw 

62 

63 def create_templates(self, feature_sets, enroll): 

64 return feature_sets 

65 

66 def compare(self, enroll_templates, probe_templates): 

67 # returns scores NxM where N is the number of enroll templates and M is the number of probe templates 

68 # enroll_templates is Nx?1xD 

69 # probe_templates is Mx?2xD 

70 scores = [] 

71 for enroll in enroll_templates: 

72 scores.append([]) 

73 for probe in probe_templates: 

74 s = [[self.score(e, p) for p in probe] for e in enroll] 

75 s = self.fuse_probe_scores(s, axis=1) 

76 s = self.fuse_enroll_scores(s, axis=0) 

77 scores[-1].append(s) 

78 return numpy.array(scores) 

79 

80 def score(self, model, probe): 

81 """Computes the score between the probe and the model. 

82 

83 Parameters: 

84 

85 model (numpy.ndarray): The model of the user to test the probe against 

86 

87 probe (numpy.ndarray): The probe to test 

88 

89 

90 Returns: 

91 

92 list[float]: Value between 0 and 0.5, larger value means a better match 

93 

94 """ 

95 

96 image_ = probe.astype(numpy.float64) 

97 

98 md = model 

99 # erode model by (ch, cw) 

100 R = md.astype(numpy.float64) 

101 h, w = R.shape # same as I 

102 crop_R = R[self.ch : h - self.ch, self.cw : w - self.cw] 

103 

104 # correlates using scipy - fastest option available iff the self.ch and 

105 # self.cw are height (>30). In this case, the number of components 

106 # returned by the convolution is high and using an FFT-based method 

107 # yields best results. Otherwise, you may try the other options bellow 

108 # -> check our test_correlation() method on the test units for more 

109 # details and benchmarks. 

110 Nm = scipy.signal.fftconvolve(image_, numpy.rot90(crop_R, k=2), "valid") 

111 # 2nd best: use convolve2d or correlate2d directly; 

112 # Nm = scipy.signal.convolve2d(I, numpy.rot90(crop_R, k=2), 'valid') 

113 # 3rd best: use correlate2d 

114 # Nm = scipy.signal.correlate2d(I, crop_R, 'valid') 

115 

116 # figures out where the maximum is on the resulting matrix 

117 t0, s0 = numpy.unravel_index(Nm.argmax(), Nm.shape) 

118 

119 # this is our output 

120 Nmm = Nm[t0, s0] 

121 

122 # normalizes the output by the number of pixels lit on the input 

123 # matrices, taking into consideration the surface that produced the 

124 # result (i.e., the eroded model and part of the probe) 

125 score = Nmm / ( 

126 crop_R.sum() 

127 + image_[t0 : t0 + h - 2 * self.ch, s0 : s0 + w - 2 * self.cw].sum() 

128 ) 

129 

130 return score