diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py index c384f454..42be49f0 100644 --- a/photonix/classifiers/face/model.py +++ b/photonix/classifiers/face/model.py @@ -17,6 +17,7 @@ from photonix.classifiers.face.deepface.commons.distance import findEuclideanDistance from photonix.classifiers.face.deepface.DeepFace import build_model from photonix.photos.utils.redis import redis_connection +from photonix.photos.utils.metadata import PhotoMetadata GRAPH_FILE = os.path.join('face', 'mtcnn_weights.npy') @@ -72,8 +73,17 @@ def load_graph(self, graph_file): def predict(self, image_file, min_score=0.99): # Detects face bounding boxes image = Image.open(image_file) + if image.mode != 'RGB': image = image.convert('RGB') + + # Perform rotations if decalared in metadata + metadata = PhotoMetadata(image_file) + if metadata.get('Orientation') in ['Rotate 90 CW', 'Rotate 270 CCW']: + image = image.rotate(-90, expand=True) + elif metadata.get('Orientation') in ['Rotate 90 CCW', 'Rotate 270 CW']: + image = image.rotate(90, expand=True) + image = np.asarray(image) results = self.graph['mtcnn'].detect_faces(image) return list(filter(lambda f: f['confidence'] > min_score, results)) diff --git a/photonix/classifiers/object/model.py b/photonix/classifiers/object/model.py index 593c3121..15db6106 100644 --- a/photonix/classifiers/object/model.py +++ b/photonix/classifiers/object/model.py @@ -11,6 +11,7 @@ from photonix.classifiers.object.utils import label_map_util from photonix.classifiers.base_model import BaseModel from photonix.photos.utils.redis import redis_connection +from photonix.photos.utils.metadata import PhotoMetadata GRAPH_FILE = os.path.join('object', 'ssd_mobilenet_v2_oid_v4_2018_12_12_frozen_inference_graph.pb') @@ -115,8 +116,17 @@ def format_output(self, output_dict, min_score): def predict(self, image_file, min_score=0.1): image = Image.open(image_file) + if image.mode != 'RGB': image = image.convert('RGB') + + # Perform rotations if decalared in metadata + metadata = PhotoMetadata(image_file) + if metadata.get('Orientation') in ['Rotate 90 CW', 'Rotate 270 CCW']: + image = image.rotate(-90, expand=True) + elif metadata.get('Orientation') in ['Rotate 90 CCW', 'Rotate 270 CW']: + image = image.rotate(90, expand=True) + # the array based representation of the image will be used later in order to prepare the # result image with boxes and labels on it. image_np = self.load_image_into_numpy_array(image)