From 7f69f6a1682bf101c96b6c91e7fd953aea0b9b2e Mon Sep 17 00:00:00 2001 From: fdloopes Date: Mon, 30 Oct 2023 10:26:09 -0300 Subject: [PATCH 1/4] Add np.maximum when retrieving detections coordinates within the BlurAnnotator.annotate() method. --- supervision/annotators/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervision/annotators/core.py b/supervision/annotators/core.py index 95cea69e7..45da5e72f 100644 --- a/supervision/annotators/core.py +++ b/supervision/annotators/core.py @@ -892,7 +892,7 @@ def annotate( supervision-annotator-examples/blur-annotator-example-purple.png) """ for detection_idx in range(len(detections)): - x1, y1, x2, y2 = detections.xyxy[detection_idx].astype(int) + x1, y1, x2, y2 = np.maximum(detections.xyxy[detection_idx].astype(int),0) roi = scene[y1:y2, x1:x2] roi = cv2.blur(roi, (self.kernel_size, self.kernel_size)) From 8b88f1f415a7923fb52abdf070d7df26c12bea07 Mon Sep 17 00:00:00 2001 From: fdloopes Date: Tue, 31 Oct 2023 11:50:37 -0300 Subject: [PATCH 2/4] Add space after comma, following pre-commit pattern --- supervision/annotators/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervision/annotators/core.py b/supervision/annotators/core.py index 45da5e72f..b6378bc0f 100644 --- a/supervision/annotators/core.py +++ b/supervision/annotators/core.py @@ -892,7 +892,7 @@ def annotate( supervision-annotator-examples/blur-annotator-example-purple.png) """ for detection_idx in range(len(detections)): - x1, y1, x2, y2 = np.maximum(detections.xyxy[detection_idx].astype(int),0) + x1, y1, x2, y2 = np.maximum(detections.xyxy[detection_idx].astype(int), 0) roi = scene[y1:y2, x1:x2] roi = cv2.blur(roi, (self.kernel_size, self.kernel_size)) From ce7e3a1de4eba124e67aec68a983cdc3ea748303 Mon Sep 17 00:00:00 2001 From: SkalskiP Date: Tue, 31 Oct 2023 18:04:13 +0100 Subject: [PATCH 3/4] Refactor clip_boxes function and enhance detections processing Refactored the `clip_boxes` function for clarity by renaming arguments from `boxes_xyxy` and `frame_resolution_wh` to `xyxy` and `resolution_wh`, respectively. These change makes the function arguments more intuitive and improves code readability. The processing of detections in `supervision/annotators/core.py` has been updated to include clipping of detection boxes to the image bounds before processing. This prevents errors and ensures detections beyond the image dimensions are handled correctly. Adjustments were also made in the test cases and in `polygon_zone.py` to match the updated `clip_boxes` function. --- supervision/annotators/core.py | 10 +++++++--- supervision/detection/tools/polygon_zone.py | 2 +- supervision/detection/utils.py | 12 +++++------- test/detection/test_utils.py | 8 ++++---- 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/supervision/annotators/core.py b/supervision/annotators/core.py index b6378bc0f..8da3a659c 100644 --- a/supervision/annotators/core.py +++ b/supervision/annotators/core.py @@ -7,6 +7,7 @@ from supervision.annotators.base import BaseAnnotator from supervision.annotators.utils import ColorLookup, Trace, resolve_color from supervision.detection.core import Detections +from supervision.detection.utils import clip_boxes from supervision.draw.color import Color, ColorPalette from supervision.geometry.core import Position @@ -891,10 +892,13 @@ def annotate( ![blur-annotator-example](https://media.roboflow.com/ supervision-annotator-examples/blur-annotator-example-purple.png) """ - for detection_idx in range(len(detections)): - x1, y1, x2, y2 = np.maximum(detections.xyxy[detection_idx].astype(int), 0) - roi = scene[y1:y2, x1:x2] + image_height, image_width = scene.shape[:2] + clipped_xyxy = clip_boxes( + xyxy=detections.xyxy, + resolution_wh=(image_width, image_height)).astype(int) + for x1, y1, x2, y2 in clipped_xyxy: + roi = scene[y1:y2, x1:x2] roi = cv2.blur(roi, (self.kernel_size, self.kernel_size)) scene[y1:y2, x1:x2] = roi diff --git a/supervision/detection/tools/polygon_zone.py b/supervision/detection/tools/polygon_zone.py index f1dba839a..15ee1aa23 100644 --- a/supervision/detection/tools/polygon_zone.py +++ b/supervision/detection/tools/polygon_zone.py @@ -56,7 +56,7 @@ def trigger(self, detections: Detections) -> np.ndarray: """ clipped_xyxy = clip_boxes( - boxes_xyxy=detections.xyxy, frame_resolution_wh=self.frame_resolution_wh + xyxy=detections.xyxy, resolution_wh=self.frame_resolution_wh ) clipped_detections = replace(detections, xyxy=clipped_xyxy) clipped_anchors = np.ceil( diff --git a/supervision/detection/utils.py b/supervision/detection/utils.py index 7a5eb5469..7aa2c846a 100644 --- a/supervision/detection/utils.py +++ b/supervision/detection/utils.py @@ -110,17 +110,15 @@ def non_max_suppression( return keep[sort_index.argsort()] -def clip_boxes( - boxes_xyxy: np.ndarray, frame_resolution_wh: Tuple[int, int] -) -> np.ndarray: +def clip_boxes(xyxy: np.ndarray, resolution_wh: Tuple[int, int]) -> np.ndarray: """ Clips bounding boxes coordinates to fit within the frame resolution. Args: - boxes_xyxy (np.ndarray): A numpy array of shape `(N, 4)` where each + xyxy (np.ndarray): A numpy array of shape `(N, 4)` where each row corresponds to a bounding box in the format `(x_min, y_min, x_max, y_max)`. - frame_resolution_wh (Tuple[int, int]): A tuple of the form `(width, height)` + resolution_wh (Tuple[int, int]): A tuple of the form `(width, height)` representing the resolution of the frame. Returns: @@ -128,8 +126,8 @@ def clip_boxes( corresponds to a bounding box with coordinates clipped to fit within the frame resolution. """ - result = np.copy(boxes_xyxy) - width, height = frame_resolution_wh + result = np.copy(xyxy) + width, height = resolution_wh result[:, [0, 2]] = result[:, [0, 2]].clip(0, width) result[:, [1, 3]] = result[:, [1, 3]].clip(0, height) return result diff --git a/test/detection/test_utils.py b/test/detection/test_utils.py index 09691adf2..6b3249b40 100644 --- a/test/detection/test_utils.py +++ b/test/detection/test_utils.py @@ -122,7 +122,7 @@ def test_non_max_suppression( @pytest.mark.parametrize( - "boxes_xyxy, frame_resolution_wh, expected_result", + "xyxy, resolution_wh, expected_result", [ ( np.empty(shape=(0, 4)), @@ -157,11 +157,11 @@ def test_non_max_suppression( ], ) def test_clip_boxes( - boxes_xyxy: np.ndarray, - frame_resolution_wh: Tuple[int, int], + xyxy: np.ndarray, + resolution_wh: Tuple[int, int], expected_result: np.ndarray, ) -> None: - result = clip_boxes(boxes_xyxy=boxes_xyxy, frame_resolution_wh=frame_resolution_wh) + result = clip_boxes(xyxy=xyxy, resolution_wh=resolution_wh) assert np.array_equal(result, expected_result) From e6a5c514f8408ad750c3ae6743d1c19b7ed030e7 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 31 Oct 2023 17:05:10 +0000 Subject: [PATCH 4/4] =?UTF-8?q?fix(pre=5Fcommit):=20=F0=9F=8E=A8=20auto=20?= =?UTF-8?q?format=20pre-commit=20hooks?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- supervision/annotators/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/supervision/annotators/core.py b/supervision/annotators/core.py index 8da3a659c..bd5181abf 100644 --- a/supervision/annotators/core.py +++ b/supervision/annotators/core.py @@ -894,8 +894,8 @@ def annotate( """ image_height, image_width = scene.shape[:2] clipped_xyxy = clip_boxes( - xyxy=detections.xyxy, - resolution_wh=(image_width, image_height)).astype(int) + xyxy=detections.xyxy, resolution_wh=(image_width, image_height) + ).astype(int) for x1, y1, x2, y2 in clipped_xyxy: roi = scene[y1:y2, x1:x2]