Coverage for src/bob/bio/face/preprocessor/TanTriggs.py: 98%
45 statements
« prev ^ index » next coverage.py v7.6.0, created at 2024-07-13 00:04 +0200
« prev ^ index » next coverage.py v7.6.0, created at 2024-07-13 00:04 +0200
1#!/usr/bin/env python
2# vim: set fileencoding=utf-8 :
3# @author: Manuel Guenther <Manuel.Guenther@idiap.ch>
4# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
7import cv2
8import numpy as np
10from .Base import Base
11from .utils import load_cropper
14def compute_tan_triggs(
15 image, gamma=0.2, sigma_0=1, sigma_1=2, size=11, threshold=10, alpha=0.1
16):
17 """
18 Applies Tan&Triggs algorithm [TT10]_ to photometrically enhance the image
21 Parameters
22 ----------
24 image : 2D numpy.ndarray
25 The image to be processed.
27 gamma : float
28 [default: 0.2] The value of gamma for the gamma correction
30 sigma_0 : float
31 [default: 1] The standard deviation of the first Gaussian kernel used in the DoG filter to smooth the image.
33 sigma_1 : float
34 [default: 2] The standard deviation of the second Gaussian kernel used in the DoG filter to smooth the image.
36 size : int
37 [default: 11] The size of the Gaussian kernel used in the DoG filter to smooth the image.
39 threshold : float
40 [default: 10] The threshold used for the contrast equalization
42 alpha : float
43 [default: 0.1] The alpha value used for the contrast equalization
46 """
47 assert image.ndim == 2, "The image must be a 2D numpy.ndarray"
49 # 1. Gamma correction
50 gamma_image = np.power(image, gamma)
52 # 2. DoG filter
53 dog_1 = cv2.GaussianBlur(gamma_image, (size, size), sigma_0)
54 dog_2 = cv2.GaussianBlur(gamma_image, (size, size), sigma_1)
55 dog_image = dog_1 - dog_2
57 # 3. Contrast equalization
58 # first step - I:=I/mean(abs(I)^a)^(1/a)
59 norm_fact = np.mean(np.abs(dog_image) ** alpha) ** (1 / alpha)
60 dog_image /= norm_fact
62 # second step - I:=I/mean(min(threshold,abs(I))^a)^(1/a)
63 norm_fact = np.mean(np.minimum(threshold, np.abs(dog_image)) ** alpha) ** (
64 1 / alpha
65 )
66 dog_image /= norm_fact
68 # 4. I:= threshold * tanh( I / threshold )
69 dog_image = np.tanh(dog_image / threshold) * threshold
71 return dog_image
74class TanTriggs(Base):
75 """Crops the face (if desired) and applies Tan&Triggs algorithm [TT10]_ to photometrically enhance the image.
77 Parameters
78 ----------
80 face_cropper : str or :py:class:`bob.bio.face.preprocessor.FaceCrop` or :py:class:`bob.bio.face.preprocessor.FaceDetect` or ``None``
81 The face image cropper that should be applied to the image.
82 If ``None`` is selected, no face cropping is performed.
83 Otherwise, the face cropper might be specified as a registered resource, a configuration file, or an instance of a preprocessor.
85 .. note:: The given class needs to contain a ``crop_face`` method.
87 gamma, sigma0, sigma1, size, threshold, alpha
88 Please refer to the [TT10]_ original paper.
90 kwargs
91 Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``.
92 """
94 def __init__(
95 self,
96 face_cropper,
97 gamma=0.2,
98 sigma0=1,
99 sigma1=2,
100 size=5,
101 threshold=10.0,
102 alpha=0.1,
103 **kwargs,
104 ):
105 Base.__init__(self, **kwargs)
107 # call base class constructor with its set of parameters
109 self.face_cropper = face_cropper
110 self.gamma = gamma
111 self.sigma0 = sigma0
112 self.sigma1 = sigma1
113 self.size = size
114 self.threshold = threshold
115 self.alpha = alpha
117 self.gamma = gamma
118 self.sigma0 = sigma0
119 self.sigma1 = sigma1
120 self.size = size
121 self.threshold = threshold
122 self.alpha = alpha
124 self.cropper = load_cropper(face_cropper)
126 def transform(self, X, annotations=None):
127 """__call__(image, annotations = None) -> face
129 Aligns the given image according to the given annotations.
131 First, the desired color channel is extracted from the given image.
132 Afterward, the face is eventually cropped using the ``face_cropper`` specified in the constructor.
133 Then, the image is photometrically enhanced using the Tan&Triggs algorithm [TT10]_.
134 Finally, the resulting face is converted to the desired data type.
136 **Parameters:**
138 image : 2D or 3D :py:class:`numpy.ndarray`
139 The face image to be processed.
141 annotations : dict or ``None``
142 The annotations that fit to the given image.
143 Might be ``None``, when the ``face_cropper`` is ``None`` or of type :py:class:`FaceDetect`.
145 **Returns:**
147 face : 2D :py:class:`numpy.ndarray`
148 The cropped and photometrically enhanced face.
149 """
151 def _crop_one_sample(image, annotations=None):
152 if self.cropper is not None:
153 # TODO: USE THE TAG `ALLOW_ANNOTATIONS`
154 image = (
155 self.cropper.transform([image])
156 if annotations is None
157 else self.cropper.transform([image], [annotations])
158 )
159 # We change the color channel *after* cropping : some croppers use MTCNN internally, that works on multichannel images
160 image = self.change_color_channel(image[0])
162 image = compute_tan_triggs(
163 image,
164 self.gamma,
165 self.sigma0,
166 self.sigma1,
167 self.size,
168 self.threshold,
169 self.alpha,
170 )
172 else:
173 # Handle with the cropper is None
174 image = self.change_color_channel(image)
175 image = compute_tan_triggs(
176 image,
177 self.gamma,
178 self.sigma0,
179 self.sigma1,
180 self.size,
181 self.threshold,
182 self.alpha,
183 )
185 return self.data_type(image)
187 if annotations is None:
188 return [_crop_one_sample(data) for data in X]
189 else:
190 return [
191 _crop_one_sample(data, annot)
192 for data, annot in zip(X, annotations)
193 ]