diff --git a/contrib/common/lib/cv/annotations/CircularAnnotations.py b/contrib/common/lib/cv/annotations/CircularAnnotations.py index 83ce85e6c..f9de9ab92 100644 --- a/contrib/common/lib/cv/annotations/CircularAnnotations.py +++ b/contrib/common/lib/cv/annotations/CircularAnnotations.py @@ -23,7 +23,7 @@ def __init__( self, style: rcps.RenderControlPointSeq = None, centers_radiuses: tuple[p2.Pxy, list[int]] = None, - pixels_to_meters: float = None, + meters_per_pixel: float = None, ): """ Parameters @@ -32,7 +32,7 @@ def __init__( The rendering style, by default {magenta, no corner markers} centers_radiuses : tuple[Pxy, list[int]] The center(s) and radius(es) for this annotation, in pixels - pixels_to_meters : float, optional + meters_per_pixel : float, optional A simple conversion method for how many meters a pixel represents, for use in scale(). By default None. """ @@ -41,7 +41,7 @@ def __init__( super().__init__(style) self.p2r = centers_radiuses - self.pixels_to_meters = pixels_to_meters + self.meters_per_pixel = meters_per_pixel def get_bounding_box(self, index=0) -> reg.RegionXY: x = self.p2r[0].x[index] @@ -54,6 +54,11 @@ def get_bounding_box(self, index=0) -> reg.RegionXY: def origin(self) -> p2.Pxy: return self.p2r[0] + def translate(self, translation: p2.Pxy): + centers, radiuses = self.p2r + p2r = (centers + translation, radiuses) + return self.__class__(self.style, p2r, self.meters_per_pixel) + @property def rotation(self) -> scipy.spatial.transform.Rotation: raise NotImplementedError("Orientation is not yet implemented for CircularAnnotations") @@ -64,13 +69,13 @@ def size(self) -> list[float]: @property def scale(self) -> list[float]: - if self.pixels_to_meters is None: + if self.meters_per_pixel is None: lt.error_and_raise( RuntimeError, "Error in CircularAnnotations.scale(): " - + "no pixels_to_meters conversion ratio is set, so scale can't be estimated", + + "no meters_per_pixel conversion ratio is set, so scale can't be estimated", ) - return [d * self.pixels_to_meters for d in self.size] + return [d * self.meters_per_pixel for d in self.size] def render_to_figure(self, fig: rcfr.RenderControlFigureRecord, image: np.ndarray = None, include_label=False): label = self.get_label(include_label) diff --git a/contrib/common/lib/cv/annotations/EnclosedEnergyAnnotations.py b/contrib/common/lib/cv/annotations/EnclosedEnergyAnnotations.py index bd8409da4..e2b458448 100644 --- a/contrib/common/lib/cv/annotations/EnclosedEnergyAnnotations.py +++ b/contrib/common/lib/cv/annotations/EnclosedEnergyAnnotations.py @@ -21,7 +21,7 @@ def __init__( style: rcps.RenderControlPointSeq = None, centers_radiuses: tuple[p2.Pxy, list[int]] = None, enclosed_shape: str = "circle", - pixels_to_meters: float = None, + meters_per_pixel: float = None, ): """ Parameters @@ -32,7 +32,7 @@ def __init__( The center(s) and radius(es) for this annotation, in pixels enclosed_shape : str, optional The shape used to determine the enclosed energy. Supports "circle" and "square". Default is "circle". - pixels_to_meters : float, optional + meters_per_pixel : float, optional A simple conversion method for how many meters a pixel represents, for use in scale(). By default None. """ @@ -50,15 +50,15 @@ def __init__( self._p2r = centers_radiuses self.enclosed_shape = enclosed_shape - self.pixels_to_meters = pixels_to_meters + self.meters_per_pixel = meters_per_pixel self.label = f"En{enclosed_shape}d Energy" # Encircled, Ensquared r = p2.Pxy((self._p2r[1], self._p2r[1])) upper_left = self._p2r[0] - r lower_right = self._p2r[0] + r - self._representative_circle = CircularAnnotations(style, centers_radiuses, pixels_to_meters) - self._representative_square = RectangleAnnotations(style, (upper_left, lower_right), pixels_to_meters) + self._representative_circle = CircularAnnotations(style, centers_radiuses, meters_per_pixel) + self._representative_square = RectangleAnnotations(style, (upper_left, lower_right), meters_per_pixel) def get_bounding_box(self, index=0) -> reg.RegionXY: return self._representative_circle.get_bounding_box(index) @@ -67,6 +67,11 @@ def get_bounding_box(self, index=0) -> reg.RegionXY: def origin(self) -> p2.Pxy: return self._representative_circle.origin + def translate(self, translation: p2.Pxy): + centers, radiuses = self._p2r + centers += translation + return self.__class__(self.style, (centers, radiuses), self.enclosed_shape, self.meters_per_pixel) + @property def rotation(self) -> scipy.spatial.transform.Rotation: return self._representative_circle.rotation diff --git a/contrib/common/lib/cv/annotations/MomentsAnnotation.py b/contrib/common/lib/cv/annotations/MomentsAnnotation.py index 86dd299e9..699b195ca 100644 --- a/contrib/common/lib/cv/annotations/MomentsAnnotation.py +++ b/contrib/common/lib/cv/annotations/MomentsAnnotation.py @@ -50,6 +50,83 @@ def get_bounding_box(self, index=0) -> reg.RegionXY: def origin(self) -> p2.Pxy: return self.centroid + def translate(self, translations: p2.Pxy): + old_m00 = self.moments["m00"] + old_m10 = self.moments["m10"] + old_m01 = self.moments["m01"] + old_u11 = self.central_moment(1, 1) + old_u20 = self.central_moment(2, 0) + old_u02 = self.central_moment(0, 2) + old_u21 = self.central_moment(2, 1) + old_u12 = self.central_moment(1, 2) + old_u30 = self.central_moment(3, 0) + old_u03 = self.central_moment(0, 3) + + Tx = translations.x[0] + Ty = translations.y[0] + + # Example calculated values for an elipse at 135 degrees, size 100x50: + # center at 50, 50 at 60, 70 + # m00 1031985.0 1031985.0 + # m10 51626280.0 61946130.0 + # m01 51634950.0 72274650.0 + # m11 2825640210.0 4580912310.0 + # m20 2994052920.0 4129777020.0 + # m02 2995071900.0 5473263900.0 + # m12 174088606080.0 362497365480.0 + # m21 174061884630.0 318333724230.0 + # m30 190942591890.0 297284048490.0 + # m03 191033689110.0 440955823110.0 + # u00 1031985.0 1031985.0 + # u01 0 0 + # u10 0 0 + # u11 242540274.9369 242540274.9369 + # u20 411386712.0237 411386712.0237 + # u02 411538165.0111 411538165.0111 + # u21 -11069058.1853 -11069058.1854 + # u12 -14244705.8617 -14244705.8616 + # u30 1303451.534790 1303451.534790 + # u03 -5805601.44134 -5805601.44128 + + # central_moment formulas: + # u00 = m00 + # u01 = 0 + # u10 = 0 + # u11 = m11 - m01 / m00 * m10 + # u20 = m20 - m10**2 / m00 + # u02 = m02 - m01**2 / m00 + # u21 = m21 - 2 * m10 / m00 * m11 - m01 / m00 * m20 + 2 * m10**2 / m00**2 * m01 + # u12 = m12 - 2 * m01 / m00 * m11 - m10 / m00 * m02 + 2 * m01**2 / m00**2 * m10 + # u30 = m30 - 3 * m10 / m00 * m20 + 2 * m10**3 / m00**2 + # u03 = m03 - 3 * m01 / m00 * m02 + 2 * m01**3 / m00**2 + + # fmt: off + new_m00 = old_m00 + new_m10 = old_m10 + old_m00 * Tx + new_m01 = old_m01 + old_m00 * Ty + new_m11 = old_u11 + new_m01 / new_m00 * new_m10 + new_m20 = old_u20 + new_m10**2 / new_m00 + new_m02 = old_u02 + new_m01**2 / new_m00 + new_m21 = old_u21 + 2 * new_m10 / new_m00 * new_m11 + new_m01 / new_m00 * new_m20 - 2 * new_m10**2 / new_m00**2 * new_m01 + new_m12 = old_u12 + 2 * new_m01 / new_m00 * new_m11 + new_m10 / new_m00 * new_m02 - 2 * new_m01**2 / new_m00**2 * new_m10 + new_m30 = old_u30 + 3 * new_m10 / new_m00 * new_m20 - 2 * new_m10**3 / new_m00**2 + new_m03 = old_u03 + 3 * new_m01 / new_m00 * new_m02 - 2 * new_m01**3 / new_m00**2 + # fmt: on + + new_moments = copy.copy(self.moments) + new_moments["m00"] = new_m00 + new_moments["m10"] = new_m10 + new_moments["m01"] = new_m01 + new_moments["m11"] = new_m11 + new_moments["m20"] = new_m20 + new_moments["m02"] = new_m02 + new_moments["m21"] = new_m21 + new_moments["m12"] = new_m12 + new_moments["m30"] = new_m30 + new_moments["m03"] = new_m03 + + return self.__class__(new_moments, self.style, self.rotation_style) + @cached_property def rotation_angle_2d(self) -> float: # from https://en.wikipedia.org/wiki/Image_moment diff --git a/contrib/common/lib/cv/annotations/RectangleAnnotations.py b/contrib/common/lib/cv/annotations/RectangleAnnotations.py index f3f736bff..043b72ab4 100644 --- a/contrib/common/lib/cv/annotations/RectangleAnnotations.py +++ b/contrib/common/lib/cv/annotations/RectangleAnnotations.py @@ -23,7 +23,7 @@ def __init__( self, style: rcps.RenderControlPointSeq = None, upperleft_lowerright_corners: tuple[p2.Pxy, p2.Pxy] = None, - pixels_to_meters: float = None, + meters_per_pixel: float = None, ): """ Parameters @@ -32,7 +32,7 @@ def __init__( The rendering style, by default {magenta, no corner markers} upperleft_lowerright_corners : Pxy The upper-left and lower-right corners of the bounding box for this rectangle, in pixels - pixels_to_meters : float, optional + meters_per_pixel : float, optional A simple conversion method for how many meters a pixel represents, for use in scale(). By default None. """ @@ -41,7 +41,7 @@ def __init__( super().__init__(style) self.points = upperleft_lowerright_corners - self.pixels_to_meters = pixels_to_meters + self.meters_per_pixel = meters_per_pixel def get_bounding_box(self, index=0) -> reg.RegionXY: x1 = self.points[0].x[index] @@ -57,6 +57,11 @@ def get_bounding_box(self, index=0) -> reg.RegionXY: def origin(self) -> p2.Pxy: return self.points[0] + def translate(self, translation: p2.Pxy): + upper_left = self.points[0] + translation + lower_right = self.points[1] + translation + return self.__class__(self.style, (upper_left, lower_right), self.meters_per_pixel) + @property def rotation(self) -> scipy.spatial.transform.Rotation: raise NotImplementedError("Orientation is not yet implemented for RectangleAnnotations") @@ -69,13 +74,13 @@ def size(self) -> list[float]: @property def scale(self) -> list[float]: - if self.pixels_to_meters is None: + if self.meters_per_pixel is None: lt.error_and_raise( RuntimeError, "Error in RectangeAnnotations.scale(): " - + "no pixels_to_meters conversion ratio is set, so scale can't be estimated", + + "no meters_per_pixel conversion ratio is set, so scale can't be estimated", ) - return [self.size * self.pixels_to_meters] + return [self.size * self.meters_per_pixel] def render_to_figure(self, fig: rcfr.RenderControlFigureRecord, image: np.ndarray = None, include_label=False): label = self.get_label(include_label) diff --git a/contrib/common/lib/cv/annotations/SpotWidthAnnotation.py b/contrib/common/lib/cv/annotations/SpotWidthAnnotation.py index c59b8d6f9..9e38937e0 100644 --- a/contrib/common/lib/cv/annotations/SpotWidthAnnotation.py +++ b/contrib/common/lib/cv/annotations/SpotWidthAnnotation.py @@ -71,6 +71,19 @@ def single_width_bounding_box(center: p2.Pxy, width: float) -> reg.RegionXY: def origin(self) -> p2.Pxy: return self.centroid_loc + def translate(self, translation: p2.Pxy): + centroid_loc = self.centroid_loc + translation + long_axis_center = self.long_axis_center + translation + return self.__class__( + self.spot_width_technique, + centroid_loc, + self.width, + self.long_axis_rotation, + long_axis_center, + self.orthogonal_axis_width, + self.style, + ) + @property def rotation(self) -> scipy.spatial.transform.Rotation: if self.spot_width_technique == "fwhm": diff --git a/contrib/common/lib/cv/annotations/test/TestMomentsAnnotation.py b/contrib/common/lib/cv/annotations/test/TestMomentsAnnotation.py new file mode 100644 index 000000000..ce5dbddbc --- /dev/null +++ b/contrib/common/lib/cv/annotations/test/TestMomentsAnnotation.py @@ -0,0 +1,123 @@ +import cv2 as cv +import numpy as np +from PIL import Image +import unittest + +from opencsp.common.lib.cv.CacheableImage import CacheableImage +from contrib.common.lib.cv.spot_analysis.image_processor.MomentsImageProcessor import MomentsImageProcessor +from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable +from opencsp.common.lib.cv.SpotAnalysis import SpotAnalysis +import opencsp.common.lib.geometry.Pxy as p2 + + +class TestImportAnnotations(unittest.TestCase): + def setUp(self): + self._build_ellipse_img() + + return super().setUp() + + def _build_ellipse_img(self): + dimensions = (140, 140, 3) + base_image = np.zeros(dimensions, dtype="uint8") + center = (70, 70) + axes_lengths = (50, 25) + angle = 45 + s, e = 0, 360 # start, end angles + color = (255, 255, 255) + thickness = -1 # fill + self.ellipse_img = cv.ellipse(base_image, center, axes_lengths, angle, s, e, color, thickness) + + def test_centroid(self): + """Test that the moments annotation produces the correct value when asked for the centroid""" + # import here to avoid import cycles + import contrib.common.lib.cv.annotations.MomentsAnnotation as manno + + # calculate the moments + processor = MomentsImageProcessor() + sa = SpotAnalysis(self._testMethodName, [processor]) + sa.set_primary_images([self.ellipse_img]) + for result in sa: + pass + + # verify that the centroid is in the center + moments: manno.MomentsAnnotation = result.get_fiducials_by_type(manno.MomentsAnnotation)[0] + self.assertAlmostEqual(moments.cX, 70, delta=0.1) + self.assertAlmostEqual(moments.cY, 70, delta=0.1) + + def test_rotation(self): + """Test that the moments annotation produces the correct value when asked for the rotation""" + # import here to avoid import cycles + import contrib.common.lib.cv.annotations.MomentsAnnotation as manno + + # calculate the moments + processor = MomentsImageProcessor() + sa = SpotAnalysis(self._testMethodName, [processor]) + sa.set_primary_images([self.ellipse_img]) + for result in sa: + pass + + # verify that the rotation is at 45 degrees + moments: manno.MomentsAnnotation = result.get_fiducials_by_type(manno.MomentsAnnotation)[0] + self.assertAlmostEqual( + np.rad2deg(moments.rotation_angle_2d), 135, delta=0.1 + ) # 135 degrees is an acceptable answer + + def test_eccentricity(self): + """Regression test to check that the eccentricity value hasn't changed since the last time this test was run.""" + # import here to avoid import cycles + import contrib.common.lib.cv.annotations.MomentsAnnotation as manno + + # calculate the moments + processor = MomentsImageProcessor() + sa = SpotAnalysis(self._testMethodName, [processor]) + sa.set_primary_images([self.ellipse_img]) + for result in sa: + pass + + # verify that the eccentricity matches the mathematical definition + # e = sqrt(1 - b**2/a**2) = 0.8660254037844386 + moments: manno.MomentsAnnotation = result.get_fiducials_by_type(manno.MomentsAnnotation)[0] + self.assertAlmostEqual(moments.eccentricity_untested, 0.8660254037844386, delta=0.01) + + def test_translate(self): + # import here to avoid import cycles + import contrib.common.lib.cv.annotations.MomentsAnnotation as manno + + # calculate the moments for the untranslated image + processor = MomentsImageProcessor() + sa = SpotAnalysis(self._testMethodName, [processor]) + sa.set_primary_images([self.ellipse_img]) + for result in sa: + act_moments: manno.MomentsAnnotation = result.get_fiducials_by_type(manno.MomentsAnnotation)[0] + act_moments = act_moments.translate(p2.Pxy([10, 20])) + + # translate the image and recalculate + t_ellipse_img = np.zeros_like(self.ellipse_img) + t_ellipse_img[20:, 10:, :] = self.ellipse_img[:-20, :-10, :] + t_processor = MomentsImageProcessor() + t_sa = SpotAnalysis(self._testMethodName, [t_processor]) + t_sa.set_primary_images([t_ellipse_img]) + for t_result in t_sa: + t_moments: manno.MomentsAnnotation = t_result.get_fiducials_by_type(manno.MomentsAnnotation)[0] + + # verify that the moments have changed as expected + self.assertAlmostEqual(t_moments.moments["m00"], act_moments.moments["m00"], delta=0.1) + self.assertAlmostEqual(t_moments.moments["m10"], act_moments.moments["m10"], delta=0.1) + self.assertAlmostEqual(t_moments.moments["m01"], act_moments.moments["m01"], delta=0.1) + self.assertAlmostEqual(t_moments.moments["m11"], act_moments.moments["m11"], delta=0.1) + self.assertAlmostEqual(t_moments.moments["m20"], act_moments.moments["m20"], delta=0.1) + self.assertAlmostEqual(t_moments.moments["m02"], act_moments.moments["m02"], delta=0.1) + self.assertAlmostEqual(t_moments.moments["m12"], act_moments.moments["m12"], delta=0.1) + self.assertAlmostEqual(t_moments.moments["m21"], act_moments.moments["m21"], delta=0.1) + self.assertAlmostEqual(t_moments.moments["m30"], act_moments.moments["m30"], delta=0.1) + self.assertAlmostEqual(t_moments.moments["m03"], act_moments.moments["m03"], delta=0.1) + + # verify that the centroid is the only changed value + self.assertAlmostEqual(act_moments.cX, 80, delta=0.1) + self.assertAlmostEqual(act_moments.cY, 90, delta=0.1) + self.assertAlmostEqual(np.rad2deg(act_moments.rotation_angle_2d), 135, delta=0.1) + self.assertAlmostEqual(act_moments.eccentricity_untested, 0.86, delta=0.01) + + +if __name__ == "__main__": + unittest.main() diff --git a/contrib/common/lib/cv/spot_analysis/image_processor/SaveToFileImageProcessor.py b/contrib/common/lib/cv/spot_analysis/image_processor/SaveToFileImageProcessor.py index aaa519897..2cee980bc 100644 --- a/contrib/common/lib/cv/spot_analysis/image_processor/SaveToFileImageProcessor.py +++ b/contrib/common/lib/cv/spot_analysis/image_processor/SaveToFileImageProcessor.py @@ -20,10 +20,10 @@ def __init__( save_ext="png", prefix: str | Callable[[SpotAnalysisOperable], str] = None, suffix: str | Callable[[SpotAnalysisOperable], str] = None, - save_primary: str | None = "", - save_supporting: str | None = None, - save_visualizations: str | None = None, - save_algorithms: str | None = None, + save_primary: str | bool | None = "", + save_supporting: str | bool | None = None, + save_visualizations: str | bool | None = None, + save_algorithms: str | bool | None = None, primary_log_level=lt.log.INFO, ): """ @@ -39,19 +39,29 @@ def __init__( suffix : str or callable, optional The suffix for the saved image file names. If a callable, it will be called with the SpotAnalysisOperable as an argument. Default is None. - save_primary : str, optional - The subdirectory for saving primary images to (default is an empty string). - save_supporting : str, optional - The subdirectory for saving supporting images to (default is None). - save_visualizations : str, optional - The subdirectory for saving visualization images to (default is None). - save_algorithms : str, optional - The subdirectory for saving algorithm images to (default is None). + save_primary : str | bool, optional + The subdirectory for saving primary images to (default is an empty string). If True, then the default value "primary" is used. If False, then the primary image isn't saved. + save_supporting : str | bool, optional + The subdirectory for saving supporting images to (default is None). If True, then the default value "supporting" is used. If False, then the supporting images aren't saved. + save_visualizations : str | bool, optional + The subdirectory for saving visualization images to (default is None). If True, then the default value "visualizations" is used. If False, then the visualizations images aren't saved. + save_algorithms : str | bool, optional + The subdirectory for saving algorithm images to (default is None). If True, then the default value "algorithms" is used. If False, then the algorithms images aren't saved. primary_log_level : int, optional The log level for where the primary image is saved to (default is INFO). """ super().__init__() + # normalize values + if save_primary == True or save_primary == False: + save_primary = "primary" if save_primary else None + if save_supporting == True or save_supporting == False: + save_supporting = "supporting" if save_supporting else None + if save_visualizations == True or save_visualizations == False: + save_visualizations = "visualizations" if save_visualizations else None + if save_algorithms == True or save_algorithms == False: + save_algorithms = "algorithms" if save_algorithms else None + # register parameters self.save_dir = save_dir self.save_ext = save_ext diff --git a/opencsp/__init__.py b/opencsp/__init__.py index f1c5be981..e5db670c9 100644 --- a/opencsp/__init__.py +++ b/opencsp/__init__.py @@ -58,6 +58,14 @@ def __getattr__(self, name): # Delegate attribute access to the class instance return getattr(self._load(), name) + def __instancecheck__(self, instance): + # Delegate instance check to the class instance + return isinstance(instance, self._load()) + + def __subclasscheck__(self, instance): + # Delegate subclass check to the class instance + return issubclass(instance, self._load()) + if platform.system() == 'Darwin': # On Mac, force matplotlib to use the TkAgg. diff --git a/opencsp/common/lib/cv/fiducials/AbstractFiducials.py b/opencsp/common/lib/cv/fiducials/AbstractFiducials.py index 518d296c5..b22ed30f4 100644 --- a/opencsp/common/lib/cv/fiducials/AbstractFiducials.py +++ b/opencsp/common/lib/cv/fiducials/AbstractFiducials.py @@ -77,9 +77,18 @@ def origin(self) -> p2.Pxy: p2.Pxy The origin point(s) of the fiducial. """ - # "ChatGPT 4o" assisted with generating this docstring. + @abstractmethod + def translate(self, translation: p2.Pxy) -> "AbstractFiducials": + """Moves the location for this instance by the amount given. After this method is called, origin() should return a new value equal to the old origin + the given translation. + + Parameters + ---------- + translation : p2.Pxy + Amount to move this instance by. + """ + @property @abstractmethod def rotation(self) -> scipy.spatial.transform.Rotation: diff --git a/opencsp/common/lib/cv/fiducials/BcsFiducial.py b/opencsp/common/lib/cv/fiducials/BcsFiducial.py index 753e26a65..58e763eb2 100644 --- a/opencsp/common/lib/cv/fiducials/BcsFiducial.py +++ b/opencsp/common/lib/cv/fiducials/BcsFiducial.py @@ -18,7 +18,7 @@ class BcsFiducial(AbstractFiducials): """ def __init__( - self, origin_px: p2.Pxy, radius_px: float, style: rcb.RenderControlBcs = None, pixels_to_meters: float = 0.1 + self, origin_px: p2.Pxy, radius_px: float, style: rcb.RenderControlBcs = None, meters_per_pixel: float = 0.1 ): """ Initializes the BcsFiducial with the specified origin, radius, style, and pixel-to-meter conversion. @@ -31,7 +31,7 @@ def __init__( The radius of the BCS target, in pixels. style : rcb.RenderControlBcs, optional The rendering style for the fiducial. Defaults to None. - pixels_to_meters : float, optional + meters_per_pixel : float, optional A conversion factor for how many meters a pixel represents, for use in scale(). Defaults to 0.1. """ # "ChatGPT 4o" assisted with generating this docstring. @@ -39,7 +39,7 @@ def __init__( super().__init__(style=style) self.origin_px = origin_px self.radius_px = radius_px - self.pixels_to_meters = pixels_to_meters + self.meters_per_pixel = meters_per_pixel def get_bounding_box(self, index=0) -> reg.RegionXY: """ @@ -77,6 +77,9 @@ def origin(self) -> p2.Pxy: # "ChatGPT 4o" assisted with generating this docstring. return self.origin_px + def translate(self, translation: p2.Pxy): + return self.__class__(self.origin_px + translation, self.radius_px, self.style, self.meters_per_pixel) + @property def rotation(self) -> scipy.spatial.transform.Rotation: """ @@ -114,7 +117,7 @@ def scale(self) -> list[float]: A list containing a single value: the size of the BCS target, in meters. """ # "ChatGPT 4o" assisted with generating this docstring. - return [self.size * self.pixels_to_meters] + return [self.size * self.meters_per_pixel] def render_to_figure(self, fig: rcfr.RenderControlFigureRecord, image: np.ndarray, include_label=False): # This method adds a circle and a marker to the axes based on the style defined for the fiducial. diff --git a/opencsp/common/lib/cv/fiducials/PointFiducials.py b/opencsp/common/lib/cv/fiducials/PointFiducials.py index 27b237945..e09ef1241 100644 --- a/opencsp/common/lib/cv/fiducials/PointFiducials.py +++ b/opencsp/common/lib/cv/fiducials/PointFiducials.py @@ -61,6 +61,9 @@ def origin(self) -> p2.Pxy: # "ChatGPT 4o" assisted with generating this docstring. return self.points + def translate(self, translation: p2.Pxy): + return self.__class__(self.style, self.points + translation) + @property def rotation(self) -> scipy.spatial.transform.Rotation: """ diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py index 0f5634152..120b5e4fc 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/CroppingImageProcessor.py @@ -2,11 +2,14 @@ import dataclasses from typing import Callable +import numpy as np + from opencsp.common.lib.cv.CacheableImage import CacheableImage from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable from opencsp.common.lib.cv.spot_analysis.image_processor.AbstractSpotAnalysisImageProcessor import ( AbstractSpotAnalysisImageProcessor, ) +import opencsp.common.lib.geometry.Pxy as p2 import opencsp.common.lib.opencsp_path.opencsp_root_path as orp import opencsp.common.lib.tool.file_tools as ft import opencsp.common.lib.tool.image_tools as it @@ -84,7 +87,7 @@ def by_region(cls, x1x2y1y2: tuple[int, int, int, int] | Callable[[SpotAnalysisO y1: The top side of the box to crop to (inclusive). y2: The bottom side of the box to crop to (exclusive). """ - return cls(x1x2y1y1=x1x2y1y2) + return cls(x1x2y1y2=x1x2y1y2) @classmethod def by_center_and_size( @@ -123,17 +126,23 @@ def validate_x1x2y1y2(self, x1x2y1y2: tuple[int, int, int, int], debug_name: str def validate_center(self, center_xy: tuple[int, int], operable: SpotAnalysisOperable | None, debug_name: str): center_x, center_y = center_xy - err_msg = ( - "Error in CroppingImageProcessor.crop_around_location(): " - + f"centered location ({center_x}, {center_y}) is out of image bounds (width: {w}, height: {h}) " - + f"for {debug_name}" - ) # verify that the center is inside the image boundaries - if center_x < 0 or center_x >= w or center_y < 0 or center_y >= h: - lt.error_and_raise(RuntimeError, err_msg) + if center_x < 0 or center_y < 0: + lt.error_and_raise( + RuntimeError, + "Error in CroppingImageProcessor.crop_around_location(): " + + f"centered location ({center_x}, {center_y}) is out of image bounds", + ) if operable is not None: (h, w), _ = it.dims_and_nchannels(operable.primary_image.nparray) + err_msg = ( + "Error in CroppingImageProcessor.crop_around_location(): " + + f"centered location ({center_x}, {center_y}) is out of image bounds (width: {w}, height: {h}) " + + f"for {debug_name}" + ) + if center_x >= w or center_y >= h: + lt.error_and_raise(RuntimeError, err_msg) if center_x >= w or center_y >= h: lt.error_and_raise(RuntimeError, err_msg) @@ -164,7 +173,40 @@ def validate_width_height( + f"for {debug_name}!", ) - def crop_by_bounding_box(self, operable: SpotAnalysisOperable) -> tuple[CacheableImage, list]: + def _crop_image( + self, + operable: SpotAnalysisOperable, + x1: int, + x2: int, + y1: int, + y2: int, + additional_notes: list[tuple[str, str]], + ) -> SpotAnalysisOperable: + # crop the image + image = operable.primary_image.nparray + (h, w), _ = it.dims_and_nchannels(image) + lt.debug("In CroppingImageProcessor(): " + f"cropping image from [0:{w},0:{h}] to [{x1}:{x2},{y1}:{y2}]") + cropped = image[y1:y2, x1:x2] + new_primary = CacheableImage.from_single_source(cropped) + + # apply the crop to the annotations + given_fiducials = operable.given_fiducials + found_fiducials = operable.found_fiducials + annotations = operable.annotations + for annots in [given_fiducials, found_fiducials, annotations]: + for i, annot in enumerate(annots): + annots[i] = annot.translate(p2.Pxy([-x1, -y1])) + + # apply the changes to the notes + image_processor_notes = copy.copy(operable.image_processor_notes) + image_processor_notes += additional_notes + + # build the new operable + ret = dataclasses.replace(operable, primary_image=new_primary, image_processor_notes=image_processor_notes) + + return ret + + def crop_by_bounding_box(self, operable: SpotAnalysisOperable) -> SpotAnalysisOperable: """ Parameters ---------- @@ -182,8 +224,8 @@ def crop_by_bounding_box(self, operable: SpotAnalysisOperable) -> tuple[Cacheabl self.validate_x1x2y1y2((x1, x2, y1, y2), f"operable '{operable.best_primary_pathnameext}'") # check the size of the image - (h, w), _ = it.dims_and_nchannels(img) - if w < x2 - 1 or h < y2 - 1: + (h, w), _ = it.dims_and_nchannels(image) + if x1 >= w or y1 >= h or x2 > w or y2 > h: lt.error_and_raise( ValueError, "Error in CroppingImageProcessor._execute(): " @@ -191,15 +233,12 @@ def crop_by_bounding_box(self, operable: SpotAnalysisOperable) -> tuple[Cacheabl ) # create the cropped image - cropped = image[y1:y2, x1:x2] - new_primary = CacheableImage(cropped) + new_notes = [("CroppingImageProcessor", [f"{x1}", f"{x2}", f"{y1}", f"{y2}"])] + new_operable = self._crop_image(operable, x1, x2, y1, y2, new_notes) - image_processor_notes = copy.copy(operable.image_processor_notes) - image_processor_notes.append(("CroppingImageProcessor", [f"{x1}", f"{x2}", f"{y1}", f"{y2}"])) + return new_operable - return new_primary, image_processor_notes - - def crop_around_center(self, operable: SpotAnalysisOperable) -> tuple[CacheableImage, list]: + def crop_around_center(self, operable: SpotAnalysisOperable) -> SpotAnalysisOperable: """ Parameters ---------- @@ -228,51 +267,46 @@ def crop_around_center(self, operable: SpotAnalysisOperable) -> tuple[CacheableI # Calculate the cropping coordinates. # Remember that the width and height must match the requested value. - half_height = height // 2 - half_width = width // 2 + half_height = int(np.ceil(height / 2)) + half_width = int(np.ceil(width / 2)) - y1 = max(center_y - half_height, 0) - center_y = y1 + half_height y2 = min(center_y + half_height, h) - center_y, y1 = y2 - half_height, y2 - height - x1 = max(center_x - half_width, 0) - center_x = x1 + half_width + y1 = max(y2 - height, 0) + y2 = min(y1 + height, h) x2 = min(center_x + half_width, w) - center_x, x1 = x2 - half_width, x2 - width + x1 = max(x2 - width, 0) + x2 = min(x1 + width, w) # Sanity check assert x2 > x1 assert y2 > y1 assert x1 >= 0 assert y1 >= 0 + assert x2 - x1 == width + assert y2 - y1 == height # Create the cropped image - lt.info("In CroppingImageProcessor(): " + f"cropping image from [0:{w},0:{h}] to [{x1}:{x2},{y1}:{y2}]") - cropped = image[y1:y2, x1:x2] - new_primary = CacheableImage.from_single_source(cropped) - - image_processor_notes = copy.copy(operable.image_processor_notes) - image_processor_notes.append( + new_notes = [ ( "CroppingImageProcessor", [f"centered at ({center_x}, {center_y})", f"width: {width}", f"height: {height}"], ) - ) + ] + new_operable = self._crop_image(operable, x1, x2, y1, y2, new_notes) - return new_primary, image_processor_notes + return new_operable def _execute(self, operable: SpotAnalysisOperable, is_last: bool) -> list[SpotAnalysisOperable]: if self.x1x2y1y2 is not None: - new_primary, image_processor_notes = self.crop_by_bounding_box(operable) + ret = self.crop_by_bounding_box(operable) elif self.centered_location is not None: - new_primary, image_processor_notes = self.crop_around_center(operable) + ret = self.crop_around_center(operable) else: lt.error_and_raise( ValueError, "Error in CroppingImageProcessor(): " + "unknown cropping method encountered in _execute() method", ) - ret = dataclasses.replace(operable, primary_image=new_primary, image_processor_notes=image_processor_notes) return [ret] diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/ViewFalseColorImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/ViewFalseColorImageProcessor.py index 8867f84f1..3feac72cc 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/ViewFalseColorImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/ViewFalseColorImageProcessor.py @@ -139,7 +139,7 @@ def visualize_operable( lt.error_and_raise( ValueError, f"Error in {self.name}.visualize_operable(): " - + f"image should be in grayscale, but {nchannels} color channels were found ({base_image.shape=})!", + + f"image should be in grayscale, but {nchannels} color channels were found ({base_image.nparray.shape=})!", ) # apply the false color mapping diff --git a/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestCroppingImageProcessor.py b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestCroppingImageProcessor.py index edeed040a..ee757e0d3 100644 --- a/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestCroppingImageProcessor.py +++ b/opencsp/common/lib/cv/spot_analysis/image_processor/test/TestCroppingImageProcessor.py @@ -1,10 +1,12 @@ import numpy as np import os import unittest + from opencsp.common.lib.cv.CacheableImage import CacheableImage +from opencsp.common.lib.cv.SpotAnalysis import SpotAnalysis from opencsp.common.lib.cv.spot_analysis.SpotAnalysisOperable import SpotAnalysisOperable from opencsp.common.lib.cv.spot_analysis.image_processor.CroppingImageProcessor import CroppingImageProcessor - +import opencsp.common.lib.geometry.Pxy as p2 import opencsp.common.lib.tool.file_tools as ft @@ -16,30 +18,90 @@ def setUp(self) -> None: ft.create_directories_if_necessary(self.data_dir) ft.create_directories_if_necessary(self.out_dir) - def test_valid_crop(self): + def test_crop_by_region(self): tenbyfive = CacheableImage(np.arange(50).reshape((5, 10))) # [[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], # [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], # [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], # [30, 31, 32, 33, 34, 35, 36, 37, 38, 39], # [40, 41, 42, 43, 44, 45, 46, 47, 48, 49]] - - processor = CroppingImageProcessor(x1=1, x2=9, y1=2, y2=4) + expected = np.array([[21, 22, 23, 24, 25, 26, 27, 28], [31, 32, 33, 34, 35, 36, 37, 38]]) operable = SpotAnalysisOperable(tenbyfive, "tenbyfive") + + # crop with a static value + processor = CroppingImageProcessor.by_region((1, 9, 2, 4)) result = processor.process_operable(operable)[0] cropped_image = result.primary_image.nparray + np.testing.assert_array_equal(cropped_image, expected) - expected = np.array([[21, 22, 23, 24, 25, 26, 27, 28], [31, 32, 33, 34, 35, 36, 37, 38]]) - + # crop with a callable + processor = CroppingImageProcessor.by_region(lambda op: (1, 9, 2, 4)) + result = processor.process_operable(operable)[0] + cropped_image = result.primary_image.nparray np.testing.assert_array_equal(cropped_image, expected) + def test_crop_by_center_and_size(self): + tenbyfive = CacheableImage(np.arange(50).reshape((5, 10))) + # [[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + # [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], + # [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], + # [30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + # [40, 41, 42, 43, 44, 45, 46, 47, 48, 49]] + expected = np.array([[21, 22, 23, 24]]) + operable = SpotAnalysisOperable(tenbyfive, "tenbyfive") + + # crop with static and dynamic values + for center in [(3, 2), (lambda op: (3, 2))]: + for width_height in [(4, 1), (lambda op: (4, 1))]: + processor = CroppingImageProcessor.by_center_and_size(center, width_height) + result = processor.process_operable(operable)[0] + cropped_image = result.primary_image.nparray + np.testing.assert_array_equal(cropped_image, expected) + def test_bad_input_raises_error(self): tenbyfive = np.arange(50).reshape((5, 10)) - processor = CroppingImageProcessor(x1=1, x2=90, y1=2, y2=40) + # bad left + with self.assertRaises(ValueError): + processor = CroppingImageProcessor.by_region((-1, 9, 2, 4)) + with self.assertRaises(ValueError): + processor = CroppingImageProcessor.by_region((9, 9, 2, 4)) + + # bad top + with self.assertRaises(ValueError): + processor = CroppingImageProcessor.by_region((1, 9, -1, 4)) + with self.assertRaises(ValueError): + processor = CroppingImageProcessor.by_region((1, 9, 4, 4)) + + # bad right + processor = CroppingImageProcessor.by_region((1, 11, 2, 4)) + with self.assertRaises(ValueError): + processor.process_operable(SpotAnalysisOperable(tenbyfive)) + + # bad bottom + processor = CroppingImageProcessor.by_region((1, 9, 2, 6)) with self.assertRaises(ValueError): processor.process_operable(SpotAnalysisOperable(tenbyfive)) + def test_crop_annotations(self): + from contrib.common.lib.cv.annotations.RectangleAnnotations import RectangleAnnotations + + hundredbyhundred = CacheableImage(np.arange(100**2).reshape((100, 100))) + annotation0 = RectangleAnnotations(upperleft_lowerright_corners=(p2.Pxy([40, 40]), p2.Pxy([50, 50]))) + operable0 = SpotAnalysisOperable(hundredbyhundred, "hundredbyhundred0", given_fiducials=[annotation0]) + annotation1 = RectangleAnnotations(upperleft_lowerright_corners=(p2.Pxy([90, 90]), p2.Pxy([100, 100]))) + operable1 = SpotAnalysisOperable(hundredbyhundred, "hundredbyhundred1", given_fiducials=[annotation1]) + + processor = CroppingImageProcessor.by_region((10, 90, 10, 90)) + spot_analysis = SpotAnalysis("test_crop_annotations", image_processors=[processor]) + spot_analysis.set_input_operables([operable0, operable1]) + result0, result1 = tuple([r for r in spot_analysis]) + + new_annot0: RectangleAnnotations = result0.get_fiducials_by_type(RectangleAnnotations)[0] + new_annot1: RectangleAnnotations = result1.get_fiducials_by_type(RectangleAnnotations)[0] + self.assertEqual(new_annot0.origin.astuple(), (30, 30)) + self.assertEqual(new_annot1.origin.astuple(), (80, 80)) + if __name__ == "__main__": unittest.main() diff --git a/opencsp/common/lib/tool/file_tools.py b/opencsp/common/lib/tool/file_tools.py index 09e4ccbca..8aa182504 100755 --- a/opencsp/common/lib/tool/file_tools.py +++ b/opencsp/common/lib/tool/file_tools.py @@ -7,6 +7,7 @@ import csv from datetime import datetime +import errno import glob import json import os @@ -725,7 +726,14 @@ def default_output_path(file_path_name_ext: Optional[str] = None) -> str: return _output_paths[file_path_name_ext] -def rename_file(input_dir_body_ext: str, output_dir_body_ext: str, is_file_check_only=False, retries=20, delay=2): +def rename_file( + input_dir_body_ext: str, + output_dir_body_ext: str, + is_file_check_only=False, + nattempts=20, + delay=2, + cross_filesys_check=True, +): """Move a file from input to output. Verifies that input is a file, and that the output doesn't exist. We check @@ -738,9 +746,15 @@ def rename_file(input_dir_body_ext: str, output_dir_body_ext: str, is_file_check The "dir/body.ext" of the source file to be renamed. output_dir_body_ext: str, optional The destinaion "dir/body.ext" of the file. - is_file_check_only (bool): + is_file_check_only: bool, optional If True, then only check that input_dir_body_ext is a file. Otherwise, check everything. Default is False. + nattempts: int, optional + The number of times to try to save the file. Should be at least 1. Default is 20. + delay: float, optional + How many seconds to wait between a failed rename attempt and another attempt. Default is 2s. + cross_filesys_check: bool, optional + If True, then check for a cross-filesystem error after the first rename fails. If that detected as the cause then rename the file using copy_and_delete_file() instead. Default is True. See also: copy_file(), copy_and_delete_file() """ @@ -783,11 +797,17 @@ def rename_file(input_dir_body_ext: str, output_dir_body_ext: str, is_file_check + str(os.path.dirname(output_dir_body_ext)), ) # Rename the file. - osError = None + osError: OSError = None - for attempt in range(retries): + for attempt in range(nattempts): try: - os.rename(input_dir_body_ext, output_dir_body_ext) + if (attempt > 0) and (cross_filesys_check) and (osError is not None) and (osError.errno == errno.EXDEV): + # cross-filesystem error, try again with a different function + copy_and_delete_file(input_dir_body_ext, output_dir_body_ext) + else: # attempt == 0 or osError.errno != 18 + # first attempt or unknown error, try to rename + os.rename(input_dir_body_ext, output_dir_body_ext) + # Verify the rename if not is_file_check_only: if not os.path.exists(output_dir_body_ext): @@ -798,7 +818,12 @@ def rename_file(input_dir_body_ext: str, output_dir_body_ext: str, is_file_check return except OSError as e: print(f"Attempt {attempt + 1}: {e}") - time.sleep(delay) + if (cross_filesys_check) and (e.errno == errno.EXDEV): + # for cross-filesystem errors, just try again right away with copy_and_delete_file + pass + else: + # for all other errors, wait and see if that clears the issue + time.sleep(delay) osError = e raise osError