Coverage for src/bob/bio/face/preprocessor/FaceCrop.py: 89%
76 statements
« prev ^ index » next coverage.py v7.6.0, created at 2024-07-13 00:04 +0200
« prev ^ index » next coverage.py v7.6.0, created at 2024-07-13 00:04 +0200
1import logging
3from .Base import Base
4from .croppers import FaceCropBoundingBox, FaceEyesNorm
6logger = logging.getLogger("bob.bio.face")
7from bob.bio.base import load_resource
10class FaceCrop(Base):
11 """
13 Crops the face according to the given annotations.
15 This class is designed to perform a geometric normalization of the face based
16 on the eye locations, using :py:class:`bob.bio.face.preprocessor.croppers.FaceEyesNorm`. Usually,
17 when executing the :py:meth:`crop_face` function, the image and the eye
18 locations have to be specified. There, the given image will be transformed
19 such that the eye locations will be placed at specific locations in the
20 resulting image. These locations, as well as the size of the cropped image,
21 need to be specified in the constructor of this class, as
22 ``cropped_positions`` and ``cropped_image_size``.
24 Some image databases do not provide eye locations, but rather bounding boxes.
25 This is not a problem at all.
26 Simply define the coordinates, where you want your ``cropped_positions`` to
27 be in the cropped image, by specifying the same keys in the dictionary that
28 will be given as ``annotations`` to the :py:meth:`crop_face` function.
30 .. note::
32 These locations can even be outside of the cropped image boundary, i.e.,
33 when the crop should be smaller than the annotated bounding boxes.
35 Sometimes, databases provide pre-cropped faces, where the eyes are located at
36 (almost) the same position in all images. Usually, the cropping does not
37 conform with the cropping that you like (i.e., image resolution is wrong, or
38 too much background information). However, the database does not provide eye
39 locations (since they are almost identical for all images). In that case, you
40 can specify the ``fixed_positions`` in the constructor, which will be taken
41 instead of the ``annotations`` inside the :py:meth:`crop_face` function (in
42 which case the ``annotations`` are ignored).
47 Parameters
48 ----------
50 cropped_image_size : (int, int)
51 The resolution of the cropped image, in order (HEIGHT,WIDTH); if not given,
52 no face cropping will be performed
54 cropped_positions : dict
55 The coordinates in the cropped image, where the annotated points should be
56 put to. This parameter is a dictionary with usually two elements, e.g.,
57 ``{'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)}``.
58 However, also other parameters, such as ``{'topleft' : ..., 'bottomright' :
59 ...}`` are supported, as long as the ``annotations`` in the `__call__`
60 function are present.
62 fixed_positions : dict or None
63 If specified, ignore the annotations from the database and use these fixed
64 positions throughout.
66 allow_upside_down_normalized_faces: bool, optional
67 If ``False`` (default), a ValueError is raised when normalized faces are going to be
68 upside down compared to input image. This allows you to catch wrong annotations in
69 your database easily. If you are sure about your input, you can set this flag to
70 ``True``.
72 annotator : :any:`bob.bio.base.annotator.Annotator`
73 If provided, the annotator will be used if the required annotations are
74 missing.
76 cropper:
77 Pointer to a function that will crops using the annotations
79 kwargs
80 Remaining keyword parameters passed to the :py:class:`Base` constructor,
81 such as ``color_channel`` or ``dtype``.
82 """
84 def __init__(
85 self,
86 cropped_image_size,
87 cropped_positions=None,
88 cropper=None,
89 fixed_positions=None,
90 annotator=None,
91 allow_upside_down_normalized_faces=False,
92 **kwargs,
93 ):
94 # call base class constructor
95 Base.__init__(self, **kwargs)
97 # Patching image size
98 if isinstance(cropped_image_size, int):
99 cropped_image_size = (cropped_image_size, cropped_image_size)
101 # SEssion the cropper
102 self.allow_upside_down_normalized_faces = (
103 allow_upside_down_normalized_faces
104 )
105 if cropper is None:
106 cropper = FaceEyesNorm(
107 cropped_positions,
108 cropped_image_size,
109 allow_upside_down_normalized_faces=allow_upside_down_normalized_faces,
110 )
111 self.cropper = cropper
113 # check parameters
115 # copy parameters
116 self.cropped_image_size = cropped_image_size
117 self.cropped_positions = cropped_positions
118 # self.cropped_keys = sorted(cropped_positions.keys())
120 self.fixed_positions = fixed_positions
121 if isinstance(annotator, str):
122 annotator = load_resource(annotator, "annotator")
123 self.annotator = annotator
125 # create objects required for face cropping
126 self.cropper = cropper
128 def transform(self, X, annotations=None):
129 """Aligns the given image according to the given annotations.
131 First, the desired color channel is extracted from the given image.
132 Afterward, the face is cropped, according to the given ``annotations`` (or
133 to ``fixed_positions``, see :py:meth:`crop_face`). Finally, the resulting
134 face is converted to the desired data type.
136 Parameters
137 ----------
138 image : 2D or 3D :py:class:`numpy.ndarray`
139 The face image to be processed.
140 annotations : dict or ``None``
141 The annotations that fit to the given image.
143 Returns
144 -------
145 face : 2D :py:class:`numpy.ndarray`
146 The cropped face.
147 """
149 def _crop(image, annot):
150 # Priority to fixed position annotations
151 if self.fixed_positions is not None:
152 annot = self.fixed_positions
154 # if annotations are missing and we don't have an annotator
155 # return None.
156 if annot is None and self.annotator is None:
157 logger.warn(
158 "Cannot crop face without valid annotations or "
159 "fixed_positions or an annotator. Returning None. "
160 "The annotations were: {}".format(annot)
161 )
162 return None
164 # convert to the desired color channel
165 image = self.change_color_channel(image)
167 # annotate the image if annotations are missing AND we don't have fixed_positions
168 if annot is None and self.annotator is not None:
169 annot = self.annotator([image], annotations=[annot])[0]
170 if annot is None:
171 logger.warn(
172 "The annotator failed and the annot are missing too"
173 ". Returning None."
174 )
175 return None
177 # crop face
178 return self.data_type(self.cropper.transform(image, annot))
180 if annotations is None:
181 return [_crop(data, None) for data in X]
182 else:
183 return [_crop(data, annot) for data, annot in zip(X, annotations)]
186class MultiFaceCrop(Base):
187 """Wraps around FaceCrop to enable a dynamical cropper that can handle several annotation types.
188 Initialization and usage is similar to the FaceCrop, but the main difference here is that one specifies
189 a *list* of cropped_positions, and optionally a *list* of associated fixed positions.
191 For each set of cropped_positions in the list, a new FaceCrop will be instantiated that handles this
192 exact set of annotations.
193 When calling the *transform* method, the MultiFaceCrop matches each sample to its associated cropper
194 based on the received annotation, then performs the cropping of each subset, and finally gathers the results.
196 If there is more than one cropper matching with the annotations, the **first valid** cropper will be taken.
197 In case none of the croppers match with the received annotations, a ``ValueError`` is raised.
199 Parameters
200 ----------
202 croppers_list : list
203 A list of :py:class:`FaceCrop` that crops the face
205 """
207 def __init__(
208 self,
209 croppers_list,
210 ):
211 assert isinstance(croppers_list, list)
212 for cl in croppers_list:
213 assert isinstance(cl, FaceCrop)
214 self.croppers_list = croppers_list
216 def transform(self, X, annotations=None):
217 # Assign each sample to its matching cropper
218 transformed_samples = []
219 for X_elem, annotations_elem in zip(X, annotations):
220 cropped_sample = None
221 for cropper in self.croppers_list:
222 # Matching the first possible cropper that works
223 try:
224 cropped_sample = cropper.transform(
225 [X_elem], [annotations_elem]
226 )[0]
227 break
228 except Exception:
229 continue
231 if cropped_sample is None:
232 raise ValueError(
233 "No cropper found for annotations {}".format(
234 annotations_elem
235 )
236 )
238 transformed_samples.append(cropped_sample)
240 # Gather the results
241 return transformed_samples
244class BoundingBoxAnnotatorCrop(Base):
245 """
246 This face cropper uses a 2 stage strategy to crop and align faces in case `annotation_type` has a bounding-box.
247 In the first stage, it crops the face using the {`topleft`, `bottomright`} parameters and expands them using a `margin` factor.
248 In the second stage, it uses the `annotator` to estimate {`leye` and `reye`} to make the crop using :py:class:`bob.bio.face.preprocessor.croppers.FaceEyesNorm`.
249 In case the annotator doesn't work, it returns the cropped face using the `bounding-box` coordinates.
251 .. warning::
252 `cropped_positions` must be set with `leye`, `reye`, `topleft` and `bottomright` positions
255 Parameters
256 ----------
258 eyes_cropper: :py:class:`bob.bio.face.preprocessor.croppers.FaceEyesNorm`
259 This is the cropper that will be used to crop the face using eyes positions
262 annotator : :any:`bob.bio.base.annotator.Annotator`
263 This is the annotator that will be used to detect faces in the cropped images.
266 """
268 def __init__(
269 self,
270 eyes_cropper,
271 annotator,
272 margin=0.5,
273 ):
274 self.eyes_cropper = eyes_cropper
275 self.margin = margin
276 self.face_cropper = FaceCropBoundingBox(
277 final_image_size=self.eyes_cropper.final_image_size, margin=margin
278 )
279 if isinstance(annotator, str):
280 annotator = load_resource(annotator, "annotator")
281 self.annotator = annotator
283 def transform(self, X, annotations=None):
284 """
285 Crops the face using the two-stage croppers
287 Parameters
288 ----------
290 X : list(numpy.ndarray)
291 List of images to be cropped
293 annotations : list(dict)
294 Annotations for each image. Each annotation must contain the following keys:
297 """
299 faces = []
301 for x, annot in zip(X, annotations):
302 face_crop = self.face_cropper.transform(x, annot, resize=False)
304 # get the coordinates with the annotator
305 annotator_annotations = self.annotator([face_crop])[0]
307 # If nothing was detected OR if the annotations are swaped, return the cropped face
308 if (
309 annotator_annotations is None
310 or annotator_annotations["reye"][1]
311 > annotator_annotations["leye"][1]
312 ):
313 logger.warning(
314 f"Unable to detect face in bounding box. Got : {annotator_annotations}. Cropping will be only based on bounding-box."
315 )
317 # append original image cropped with original bounding boxes
318 faces.append(self.face_cropper.transform(x, annot, resize=True))
319 else:
320 faces.append(
321 self.eyes_cropper.transform(
322 face_crop, annotator_annotations
323 )
324 )
326 return faces