From 4bdea557e27e4bb3d76825af27355309154bd6c1 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Tue, 22 Mar 2022 17:17:38 +0000 Subject: [PATCH 01/19] reverting some recently introduced exceptions --- torchvision/models/detection/_utils.py | 12 ++++-------- torchvision/models/detection/anchor_utils.py | 8 +++----- torchvision/models/detection/faster_rcnn.py | 5 +---- torchvision/models/detection/fcos.py | 8 ++------ torchvision/models/detection/generalized_rcnn.py | 8 ++------ torchvision/models/detection/retinanet.py | 12 ++++-------- torchvision/models/detection/ssd.py | 15 ++++----------- torchvision/models/detection/transform.py | 5 +---- torchvision/ops/_utils.py | 10 +++++----- torchvision/ops/poolers.py | 7 +++---- 10 files changed, 29 insertions(+), 61 deletions(-) diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index d7126ef681b..36e99e6506d 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -159,10 +159,8 @@ def encode_single(self, reference_boxes: Tensor, proposals: Tensor) -> Tensor: return targets def decode(self, rel_codes: Tensor, boxes: List[Tensor]) -> Tensor: - if not isinstance(boxes, (list, tuple)): - raise TypeError(f"This function expects boxes of type list or tuple, instead got {type(boxes)}") - if not isinstance(rel_codes, torch.Tensor): - raise TypeError(f"This function expects rel_codes of type torch.Tensor, instead got {type(rel_codes)}") + assert isinstance(boxes, (list, tuple)) + assert isinstance(rel_codes, torch.Tensor) boxes_per_image = [b.size(0) for b in boxes] concat_boxes = torch.cat(boxes, dim=0) box_sum = 0 @@ -335,8 +333,7 @@ def __init__(self, high_threshold: float, low_threshold: float, allow_low_qualit """ self.BELOW_LOW_THRESHOLD = -1 self.BETWEEN_THRESHOLDS = -2 - if low_threshold > high_threshold: - raise ValueError("low_threshold should be <= high_threshold") + assert low_threshold <= high_threshold self.high_threshold = high_threshold self.low_threshold = low_threshold self.allow_low_quality_matches = allow_low_quality_matches @@ -374,8 +371,7 @@ def __call__(self, match_quality_matrix: Tensor) -> Tensor: matches[between_thresholds] = self.BETWEEN_THRESHOLDS if self.allow_low_quality_matches: - if all_matches is None: - raise ValueError("all_matches should not be None") + assert all_matches is not None self.set_low_quality_matches_(matches, all_matches, match_quality_matrix) return matches diff --git a/torchvision/models/detection/anchor_utils.py b/torchvision/models/detection/anchor_utils.py index 3248fc2e1aa..3a4aee15bc1 100644 --- a/torchvision/models/detection/anchor_utils.py +++ b/torchvision/models/detection/anchor_utils.py @@ -84,9 +84,7 @@ def num_anchors_per_location(self): def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]: anchors = [] cell_anchors = self.cell_anchors - - if cell_anchors is None: - ValueError("cell_anchors should not be None") + assert cell_anchors is not None if not (len(grid_sizes) == len(strides) == len(cell_anchors)): raise ValueError( @@ -164,8 +162,8 @@ def __init__( clip: bool = True, ): super().__init__() - if steps is not None and len(aspect_ratios) != len(steps): - raise ValueError("aspect_ratios and steps should have the same length") + if steps is not None: + assert len(aspect_ratios) == len(steps) self.aspect_ratios = aspect_ratios self.steps = steps self.clip = clip diff --git a/torchvision/models/detection/faster_rcnn.py b/torchvision/models/detection/faster_rcnn.py index 35cb968d711..5685d82b3ba 100644 --- a/torchvision/models/detection/faster_rcnn.py +++ b/torchvision/models/detection/faster_rcnn.py @@ -305,10 +305,7 @@ def __init__(self, in_channels, num_classes): def forward(self, x): if x.dim() == 4: - if list(x.shape[2:]) != [1, 1]: - raise ValueError( - f"x has the wrong shape, expecting the last two dimensions to be [1,1] instead of {list(x.shape[2:])}" - ) + assert list(x.shape[2:]) == [1, 1] x = x.flatten(start_dim=1) scores = self.cls_score(x) bbox_deltas = self.bbox_pred(x) diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index c15702f5e18..03925f76ff8 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -571,10 +571,7 @@ def forward( original_image_sizes: List[Tuple[int, int]] = [] for img in images: val = img.shape[-2:] - if len(val) != 2: - raise ValueError( - f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}" - ) + assert len(val) == 2 original_image_sizes.append((val[0], val[1])) # transform the input @@ -612,9 +609,8 @@ def forward( losses = {} detections: List[Dict[str, Tensor]] = [] if self.training: + assert targets is not None # compute the losses - if targets is None: - raise ValueError("targets should not be none when in training mode") losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level) else: diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index dba8e5b8148..2b1e159f3bb 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -58,8 +58,7 @@ def forward(self, images, targets=None): """ if self.training: - if targets is None: - raise ValueError("In training mode, targets should be passed") + assert targets is not None for target in targets: boxes = target["boxes"] @@ -72,10 +71,7 @@ def forward(self, images, targets=None): original_image_sizes: List[Tuple[int, int]] = [] for img in images: val = img.shape[-2:] - if len(val) != 2: - raise ValueError( - f"Expecting the last two dimensions of the input tensor to be H and W, instead got {img.shape[-2:]}" - ) + assert len(val) == 2 original_image_sizes.append((val[0], val[1])) images, targets = self.transform(images, targets) diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index 6d6463d6894..a3fe01b1438 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -487,8 +487,8 @@ def forward(self, images, targets=None): like `scores`, `labels` and `mask` (for Mask R-CNN models). """ - if self.training and targets is None: - raise ValueError("In training mode, targets should be passed") + if self.training: + assert targets is not None if self.training: if targets is None: @@ -505,10 +505,7 @@ def forward(self, images, targets=None): original_image_sizes: List[Tuple[int, int]] = [] for img in images: val = img.shape[-2:] - if len(val) != 2: - raise ValueError( - f"Expecting the two last elements of the input tensors to be H and W instead got {img.shape[-2:]}" - ) + assert len(val) == 2 original_image_sizes.append((val[0], val[1])) # transform the input @@ -546,8 +543,7 @@ def forward(self, images, targets=None): losses = {} detections: List[Dict[str, Tensor]] = [] if self.training: - if targets is None: - raise ValueError("In training mode, targets should be passed") + assert targets is not None # compute the losses losses = self.compute_loss(targets, head_outputs, anchors) else: diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index bd7f1b2863f..5c5696dff6b 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -311,8 +311,7 @@ def forward( raise ValueError("In training mode, targets should be passed") if self.training: - if targets is None: - raise ValueError("targets should not be None") + assert targets is not None for target in targets: boxes = target["boxes"] if isinstance(boxes, torch.Tensor): @@ -325,10 +324,7 @@ def forward( original_image_sizes: List[Tuple[int, int]] = [] for img in images: val = img.shape[-2:] - if len(val) != 2: - raise ValueError( - f"The last two dimensions of the input tensors should contain H and W, instead got {img.shape[-2:]}" - ) + assert len(val) == 2 original_image_sizes.append((val[0], val[1])) # transform the input @@ -363,9 +359,7 @@ def forward( losses = {} detections: List[Dict[str, Tensor]] = [] if self.training: - if targets is None: - raise ValueError("targets should not be None when in training mode") - + assert targets is not None matched_idxs = [] for anchors_per_image, targets_per_image in zip(anchors, targets): if targets_per_image["boxes"].numel() == 0: @@ -535,8 +529,7 @@ def _vgg_extractor(backbone: vgg.VGG, highres: bool, trainable_layers: int): num_stages = len(stage_indices) # find the index of the layer from which we wont freeze - if not 0 <= trainable_layers <= num_stages: - raise ValueError(f"trainable_layers should be in the range [0, {num_stages}]. Instead got {trainable_layers}") + assert 0 <= trainable_layers <= num_stages freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers] for b in backbone[:freeze_before]: diff --git a/torchvision/models/detection/transform.py b/torchvision/models/detection/transform.py index 58b38baee04..960e28500a1 100644 --- a/torchvision/models/detection/transform.py +++ b/torchvision/models/detection/transform.py @@ -134,10 +134,7 @@ def forward( images = self.batch_images(images, size_divisible=self.size_divisible) image_sizes_list: List[Tuple[int, int]] = [] for image_size in image_sizes: - if len(image_size) != 2: - raise ValueError( - f"Input tensors expected to have in the last two elements H and W, instead got {image_size}" - ) + assert len(image_size) == 2 image_sizes_list.append((image_size[0], image_size[1])) image_list = ImageList(images, image_sizes_list) diff --git a/torchvision/ops/_utils.py b/torchvision/ops/_utils.py index 30f28e51c4c..3a07c747f58 100644 --- a/torchvision/ops/_utils.py +++ b/torchvision/ops/_utils.py @@ -28,13 +28,13 @@ def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor: def check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]): if isinstance(boxes, (list, tuple)): for _tensor in boxes: - if _tensor.size(1) != 4: - raise ValueError("The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]].") + assert ( + _tensor.size(1) == 4 + ), "The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]" elif isinstance(boxes, torch.Tensor): - if boxes.size(1) != 5: - raise ValueError("The boxes tensor shape is not correct as Tensor[K, 5]/") + assert boxes.size(1) == 5, "The boxes tensor shape is not correct as Tensor[K, 5]" else: - raise TypeError(f"boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]], instead got {type(boxes)}") + assert False, "boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]" return diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index f881201a2d2..d5e467b0c20 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -111,8 +111,7 @@ def _infer_scale(feature: Tensor, original_size: List[int]) -> float: def _setup_scales( features: List[Tensor], image_shapes: List[Tuple[int, int]], canonical_scale: int, canonical_level: int ) -> Tuple[List[float], LevelMapper]: - if not image_shapes: - raise ValueError("images list should not be empty") + assert len(image_shapes) != 0 max_x = 0 max_y = 0 for shape in image_shapes: @@ -166,8 +165,8 @@ def _multiscale_roi_align( Returns: result (Tensor) """ - if scales is None or mapper is None: - raise ValueError("scales and mapper should not be None") + assert scales is not None + assert mapper is not None num_levels = len(x_filtered) rois = _convert_to_roi_format(boxes) From c2fb56b7d392292fd4756c9791d4976456f5097c Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Wed, 23 Mar 2022 10:50:45 +0000 Subject: [PATCH 02/19] Update torchvision/ops/poolers.py Co-authored-by: Vasilis Vryniotis --- torchvision/ops/poolers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index d5e467b0c20..87666369919 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -111,7 +111,7 @@ def _infer_scale(feature: Tensor, original_size: List[int]) -> float: def _setup_scales( features: List[Tensor], image_shapes: List[Tuple[int, int]], canonical_scale: int, canonical_level: int ) -> Tuple[List[float], LevelMapper]: - assert len(image_shapes) != 0 + assert image_shapes max_x = 0 max_y = 0 for shape in image_shapes: From 698af0eb4aab1261dd2e1875eee6b4a75b849ea9 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Thu, 24 Mar 2022 11:33:50 +0000 Subject: [PATCH 03/19] address PR comments --- torchvision/models/detection/_utils.py | 14 ++++++++--- torchvision/models/detection/anchor_utils.py | 6 ++--- torchvision/models/detection/faster_rcnn.py | 10 +++++--- torchvision/models/detection/fcos.py | 5 +++- .../models/detection/generalized_rcnn.py | 7 ++++-- torchvision/models/detection/retinanet.py | 25 ++++++++++--------- torchvision/models/detection/ssd.py | 14 ++++++++--- torchvision/models/detection/transform.py | 5 +++- torchvision/ops/_utils.py | 10 ++++---- torchvision/ops/poolers.py | 6 ++--- 10 files changed, 64 insertions(+), 38 deletions(-) diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index 36e99e6506d..a8bb22327de 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -159,8 +159,14 @@ def encode_single(self, reference_boxes: Tensor, proposals: Tensor) -> Tensor: return targets def decode(self, rel_codes: Tensor, boxes: List[Tensor]) -> Tensor: - assert isinstance(boxes, (list, tuple)) - assert isinstance(rel_codes, torch.Tensor) + torch._assert( + isinstance(boxes, (list, tuple)), + f"This function expects boxes of type list or tuple, instead got {type(boxes)}", + ) + torch._assert( + isinstance(rel_codes, torch.Tensor), + f"This function expects rel_codes of type torch.Tensor, instead got {type(rel_codes)}", + ) boxes_per_image = [b.size(0) for b in boxes] concat_boxes = torch.cat(boxes, dim=0) box_sum = 0 @@ -333,7 +339,7 @@ def __init__(self, high_threshold: float, low_threshold: float, allow_low_qualit """ self.BELOW_LOW_THRESHOLD = -1 self.BETWEEN_THRESHOLDS = -2 - assert low_threshold <= high_threshold + torch._assert(low_threshold <= high_threshold, "low_threshold should be <= high_threshold") self.high_threshold = high_threshold self.low_threshold = low_threshold self.allow_low_quality_matches = allow_low_quality_matches @@ -371,7 +377,7 @@ def __call__(self, match_quality_matrix: Tensor) -> Tensor: matches[between_thresholds] = self.BETWEEN_THRESHOLDS if self.allow_low_quality_matches: - assert all_matches is not None + torch._assert(all_matches is not None, "all_matches should not be None") self.set_low_quality_matches_(matches, all_matches, match_quality_matrix) return matches diff --git a/torchvision/models/detection/anchor_utils.py b/torchvision/models/detection/anchor_utils.py index 3a4aee15bc1..623ee59eeb5 100644 --- a/torchvision/models/detection/anchor_utils.py +++ b/torchvision/models/detection/anchor_utils.py @@ -84,7 +84,7 @@ def num_anchors_per_location(self): def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]: anchors = [] cell_anchors = self.cell_anchors - assert cell_anchors is not None + torch._assert(cell_anchors is not None, "cell_anchors should not be None") if not (len(grid_sizes) == len(strides) == len(cell_anchors)): raise ValueError( @@ -162,8 +162,8 @@ def __init__( clip: bool = True, ): super().__init__() - if steps is not None: - assert len(aspect_ratios) == len(steps) + if steps is not None and len(aspect_ratios) != len(steps): + raise ValueError("aspect_ratios and steps should have the same length") self.aspect_ratios = aspect_ratios self.steps = steps self.clip = clip diff --git a/torchvision/models/detection/faster_rcnn.py b/torchvision/models/detection/faster_rcnn.py index 5685d82b3ba..f5f77b64290 100644 --- a/torchvision/models/detection/faster_rcnn.py +++ b/torchvision/models/detection/faster_rcnn.py @@ -1,3 +1,4 @@ +import torch import torch.nn.functional as F from torch import nn from torchvision.ops import MultiScaleRoIAlign @@ -187,9 +188,9 @@ def __init__( "same for all the levels)" ) - if not isinstance(rpn_anchor_generator, (AnchorGenerator, type(None))): + if not isinstance(anchor_generator, (AnchorGenerator, type(None))): raise TypeError( - f"rpn_anchor_generator should be of type AnchorGenerator or None instead of {type(rpn_anchor_generator)}" + f"anchor_generator should be of type AnchorGenerator or None, instead got {type(anchor_generator)}" ) if not isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None))): raise TypeError( @@ -305,7 +306,10 @@ def __init__(self, in_channels, num_classes): def forward(self, x): if x.dim() == 4: - assert list(x.shape[2:]) == [1, 1] + torch._assert( + list(x.shape[2:]) == [1, 1], + f"x has the wrong shape, expecting the last two dimensions to be [1,1] instead of {list(x.shape[2:])}", + ) x = x.flatten(start_dim=1) scores = self.cls_score(x) bbox_deltas = self.bbox_pred(x) diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index 03925f76ff8..ccdc256589e 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -571,7 +571,10 @@ def forward( original_image_sizes: List[Tuple[int, int]] = [] for img in images: val = img.shape[-2:] - assert len(val) == 2 + torch._assert( + len(val) == 2, + f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}", + ) original_image_sizes.append((val[0], val[1])) # transform the input diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 2b1e159f3bb..aee949b933c 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -58,7 +58,7 @@ def forward(self, images, targets=None): """ if self.training: - assert targets is not None + torch._assert(targets is not None, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] @@ -71,7 +71,10 @@ def forward(self, images, targets=None): original_image_sizes: List[Tuple[int, int]] = [] for img in images: val = img.shape[-2:] - assert len(val) == 2 + torch._assert( + len(val) == 2, + f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}", + ) original_image_sizes.append((val[0], val[1])) images, targets = self.transform(images, targets) diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index a3fe01b1438..9afb4595d65 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -488,24 +488,25 @@ def forward(self, images, targets=None): """ if self.training: - assert targets is not None - - if self.training: - if targets is None: - raise ValueError("In training mode, targets should be passed") + torch._assert(targets is not None, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] - if isinstance(boxes, torch.Tensor): - if len(boxes.shape) != 2 or boxes.shape[-1] != 4: - raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") - else: - raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.") + torch._assert( + isinstance(boxes, torch.Tensor), f"Expected target boxes to be of type Tensor, got {type(boxes)}." + ) + torch._assert( + len(boxes.shape) == 2 and boxes.shape[-1] == 4, + f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", + ) # get the original image sizes original_image_sizes: List[Tuple[int, int]] = [] for img in images: val = img.shape[-2:] - assert len(val) == 2 + torch._assert( + len(val) == 2, + f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}", + ) original_image_sizes.append((val[0], val[1])) # transform the input @@ -543,7 +544,7 @@ def forward(self, images, targets=None): losses = {} detections: List[Dict[str, Tensor]] = [] if self.training: - assert targets is not None + torch._assert(targets is not None, "targets should not be none when in training mode") # compute the losses losses = self.compute_loss(targets, head_outputs, anchors) else: diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 5c5696dff6b..810fc049b4f 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -311,7 +311,7 @@ def forward( raise ValueError("In training mode, targets should be passed") if self.training: - assert targets is not None + torch._assert(targets is not None, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] if isinstance(boxes, torch.Tensor): @@ -324,7 +324,10 @@ def forward( original_image_sizes: List[Tuple[int, int]] = [] for img in images: val = img.shape[-2:] - assert len(val) == 2 + torch._assert( + len(val) == 2, + f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}", + ) original_image_sizes.append((val[0], val[1])) # transform the input @@ -359,7 +362,7 @@ def forward( losses = {} detections: List[Dict[str, Tensor]] = [] if self.training: - assert targets is not None + torch._assert(targets is not None, "targets should not be none when in training mode") matched_idxs = [] for anchors_per_image, targets_per_image in zip(anchors, targets): if targets_per_image["boxes"].numel() == 0: @@ -529,7 +532,10 @@ def _vgg_extractor(backbone: vgg.VGG, highres: bool, trainable_layers: int): num_stages = len(stage_indices) # find the index of the layer from which we wont freeze - assert 0 <= trainable_layers <= num_stages + torch._assert( + 0 <= trainable_layers <= num_stages, + f"trainable_layers should be in the range [0, {num_stages}]. Instead got {trainable_layers}", + ) freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers] for b in backbone[:freeze_before]: diff --git a/torchvision/models/detection/transform.py b/torchvision/models/detection/transform.py index 960e28500a1..ac902ac0fd6 100644 --- a/torchvision/models/detection/transform.py +++ b/torchvision/models/detection/transform.py @@ -134,7 +134,10 @@ def forward( images = self.batch_images(images, size_divisible=self.size_divisible) image_sizes_list: List[Tuple[int, int]] = [] for image_size in image_sizes: - assert len(image_size) == 2 + torch._assert( + len(image_size) == 2, + f"Input tensors expected to have in the last two elements H and W, instead got {image_size}", + ) image_sizes_list.append((image_size[0], image_size[1])) image_list = ImageList(images, image_sizes_list) diff --git a/torchvision/ops/_utils.py b/torchvision/ops/_utils.py index 3a07c747f58..107785266a1 100644 --- a/torchvision/ops/_utils.py +++ b/torchvision/ops/_utils.py @@ -28,13 +28,13 @@ def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor: def check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]): if isinstance(boxes, (list, tuple)): for _tensor in boxes: - assert ( - _tensor.size(1) == 4 - ), "The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]" + torch._assert( + _tensor.size(1) == 4, "The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]" + ) elif isinstance(boxes, torch.Tensor): - assert boxes.size(1) == 5, "The boxes tensor shape is not correct as Tensor[K, 5]" + torch._assert(boxes.size(1) == 5, "The boxes tensor shape is not correct as Tensor[K, 5]") else: - assert False, "boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]" + torch._assert(False, "boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]") return diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index 87666369919..7257f0dfd7a 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -111,7 +111,7 @@ def _infer_scale(feature: Tensor, original_size: List[int]) -> float: def _setup_scales( features: List[Tensor], image_shapes: List[Tuple[int, int]], canonical_scale: int, canonical_level: int ) -> Tuple[List[float], LevelMapper]: - assert image_shapes + torch._assert(image_shapes, "images list should not be empty") max_x = 0 max_y = 0 for shape in image_shapes: @@ -165,8 +165,8 @@ def _multiscale_roi_align( Returns: result (Tensor) """ - assert scales is not None - assert mapper is not None + torch._assert(scales is not None, "scales should not be None") + torch._assert(mapper is not None, "mapper should not be None") num_levels = len(x_filtered) rois = _convert_to_roi_format(boxes) From 2f924626bdae0ac9e0ee8f502a339a3e60601cf4 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Thu, 24 Mar 2022 11:42:43 +0000 Subject: [PATCH 04/19] replace one more assert with torch._assert: --- torchvision/models/detection/fcos.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index ccdc256589e..2dfa8c1961b 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -612,7 +612,7 @@ def forward( losses = {} detections: List[Dict[str, Tensor]] = [] if self.training: - assert targets is not None + torch._assert(targets is not None, "targets should not be none when in training mode") # compute the losses losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level) From 2bda809a8fdf79a906c0bc866fb2b479c17d207c Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Thu, 24 Mar 2022 11:55:30 +0000 Subject: [PATCH 05/19] address PR comments --- torchvision/models/detection/faster_rcnn.py | 4 ++-- torchvision/models/detection/fcos.py | 21 +++++++++++---------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/torchvision/models/detection/faster_rcnn.py b/torchvision/models/detection/faster_rcnn.py index f5f77b64290..a68a89536bd 100644 --- a/torchvision/models/detection/faster_rcnn.py +++ b/torchvision/models/detection/faster_rcnn.py @@ -188,9 +188,9 @@ def __init__( "same for all the levels)" ) - if not isinstance(anchor_generator, (AnchorGenerator, type(None))): + if not isinstance(rpn_anchor_generator, (AnchorGenerator, type(None))): raise TypeError( - f"anchor_generator should be of type AnchorGenerator or None, instead got {type(anchor_generator)}" + f"rpn_anchor_generator should be of type AnchorGenerator or None instead of {type(rpn_anchor_generator)}" ) if not isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None))): raise TypeError( diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index 2dfa8c1961b..7ce25d5ca4a 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -558,15 +558,16 @@ def forward( like `scores`, `labels` and `mask` (for Mask R-CNN models). """ if self.training: - if targets is None: - raise ValueError("In training mode, targets should be passed") + torch._assert(targets is not None, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] - if isinstance(boxes, torch.Tensor): - if len(boxes.shape) != 2 or boxes.shape[-1] != 4: - raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") - else: - raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.") + torch._assert( + isinstance(boxes, torch.Tensor), f"Expected target boxes to be of type Tensor, got {type(boxes)}." + ) + torch._assert( + len(boxes.shape) == 2 and boxes.shape[-1] == 4, + f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", + ) original_image_sizes: List[Tuple[int, int]] = [] for img in images: @@ -589,9 +590,9 @@ def forward( # print the first degenerate box bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] degen_bb: List[float] = boxes[bb_idx].tolist() - raise ValueError( - "All bounding boxes should have positive height and width." - f" Found invalid box {degen_bb} for target at index {target_idx}." + torch._assert( + False, + f"All bounding boxes should have positive height and width. Found invalid box {degen_bb} for target at index {target_idx}.", ) # get the features from the backbone From c9edb0574024ee658d8b77cb10c675bbd891e692 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Thu, 24 Mar 2022 14:25:12 +0000 Subject: [PATCH 06/19] make type checker happy --- torchvision/models/detection/fcos.py | 7 +++++-- torchvision/models/detection/generalized_rcnn.py | 3 ++- torchvision/models/detection/retinanet.py | 6 ++++-- torchvision/models/detection/ssd.py | 6 ++++-- 4 files changed, 15 insertions(+), 7 deletions(-) diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index 7ce25d5ca4a..854e98321dd 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -558,7 +558,9 @@ def forward( like `scores`, `labels` and `mask` (for Mask R-CNN models). """ if self.training: - torch._assert(targets is not None, "targets should not be none when in training mode") + + if targets is None: + torch._assert(False, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] torch._assert( @@ -613,7 +615,8 @@ def forward( losses = {} detections: List[Dict[str, Tensor]] = [] if self.training: - torch._assert(targets is not None, "targets should not be none when in training mode") + if targets is None: + torch._assert(False, "targets should not be none when in training mode") # compute the losses losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level) diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index aee949b933c..0dceb66fbe8 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -58,7 +58,8 @@ def forward(self, images, targets=None): """ if self.training: - torch._assert(targets is not None, "targets should not be none when in training mode") + if targets is None: + torch._assert(True, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index 9afb4595d65..0af0dda1881 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -488,7 +488,8 @@ def forward(self, images, targets=None): """ if self.training: - torch._assert(targets is not None, "targets should not be none when in training mode") + if targets is None: + torch._assert(False, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] torch._assert( @@ -544,7 +545,8 @@ def forward(self, images, targets=None): losses = {} detections: List[Dict[str, Tensor]] = [] if self.training: - torch._assert(targets is not None, "targets should not be none when in training mode") + if targets is None: + torch._assert(False, "targets should not be none when in training mode") # compute the losses losses = self.compute_loss(targets, head_outputs, anchors) else: diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 810fc049b4f..750fc4c4523 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -311,7 +311,8 @@ def forward( raise ValueError("In training mode, targets should be passed") if self.training: - torch._assert(targets is not None, "targets should not be none when in training mode") + if targets is None: + torch._assert(False, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] if isinstance(boxes, torch.Tensor): @@ -362,7 +363,8 @@ def forward( losses = {} detections: List[Dict[str, Tensor]] = [] if self.training: - torch._assert(targets is not None, "targets should not be none when in training mode") + if targets is None: + torch._assert(False, "targets should not be none when in training mode") matched_idxs = [] for anchors_per_image, targets_per_image in zip(anchors, targets): if targets_per_image["boxes"].numel() == 0: From 55d99267f0a80b67967a427ef9c235f49e942c1c Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Thu, 24 Mar 2022 14:39:00 +0000 Subject: [PATCH 07/19] Fix bug --- torchvision/models/detection/generalized_rcnn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 0dceb66fbe8..897a7866234 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -59,7 +59,7 @@ def forward(self, images, targets=None): """ if self.training: if targets is None: - torch._assert(True, "targets should not be none when in training mode") + torch._assert(False, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] From 661e1364920eb81a7902f08cf506bccc5de0bc11 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Thu, 24 Mar 2022 17:18:28 +0000 Subject: [PATCH 08/19] fix bug --- torchvision/models/detection/fcos.py | 4 ++-- torchvision/models/detection/generalized_rcnn.py | 2 +- torchvision/models/detection/retinanet.py | 4 ++-- torchvision/models/detection/ssd.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index 854e98321dd..c053165533e 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -560,7 +560,7 @@ def forward( if self.training: if targets is None: - torch._assert(False, "targets should not be none when in training mode") + torch._assert(True, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] torch._assert( @@ -616,7 +616,7 @@ def forward( detections: List[Dict[str, Tensor]] = [] if self.training: if targets is None: - torch._assert(False, "targets should not be none when in training mode") + torch._assert(True, "targets should not be none when in training mode") # compute the losses losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level) diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 897a7866234..0dceb66fbe8 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -59,7 +59,7 @@ def forward(self, images, targets=None): """ if self.training: if targets is None: - torch._assert(False, "targets should not be none when in training mode") + torch._assert(True, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index 0af0dda1881..9ec8e117587 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -489,7 +489,7 @@ def forward(self, images, targets=None): """ if self.training: if targets is None: - torch._assert(False, "targets should not be none when in training mode") + torch._assert(True, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] torch._assert( @@ -546,7 +546,7 @@ def forward(self, images, targets=None): detections: List[Dict[str, Tensor]] = [] if self.training: if targets is None: - torch._assert(False, "targets should not be none when in training mode") + torch._assert(True, "targets should not be none when in training mode") # compute the losses losses = self.compute_loss(targets, head_outputs, anchors) else: diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 750fc4c4523..60b49ef5c79 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -312,7 +312,7 @@ def forward( if self.training: if targets is None: - torch._assert(False, "targets should not be none when in training mode") + torch._assert(True, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] if isinstance(boxes, torch.Tensor): @@ -364,7 +364,7 @@ def forward( detections: List[Dict[str, Tensor]] = [] if self.training: if targets is None: - torch._assert(False, "targets should not be none when in training mode") + torch._assert(True, "targets should not be none when in training mode") matched_idxs = [] for anchors_per_image, targets_per_image in zip(anchors, targets): if targets_per_image["boxes"].numel() == 0: From 846275c9742b86b011456141759a2255f82fba5a Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Thu, 24 Mar 2022 17:22:50 +0000 Subject: [PATCH 09/19] fix for wrong asserts --- torchvision/models/detection/fcos.py | 4 ++-- torchvision/models/detection/generalized_rcnn.py | 2 +- torchvision/models/detection/retinanet.py | 4 ++-- torchvision/models/detection/ssd.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index a35ac18acf6..120c739d53f 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -567,7 +567,7 @@ def forward( if self.training: if targets is None: - torch._assert(True, "targets should not be none when in training mode") + torch._assert(False, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] torch._assert( @@ -623,7 +623,7 @@ def forward( detections: List[Dict[str, Tensor]] = [] if self.training: if targets is None: - torch._assert(True, "targets should not be none when in training mode") + torch._assert(False, "targets should not be none when in training mode") # compute the losses losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level) diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 0dceb66fbe8..897a7866234 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -59,7 +59,7 @@ def forward(self, images, targets=None): """ if self.training: if targets is None: - torch._assert(True, "targets should not be none when in training mode") + torch._assert(False, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index ae209193e34..1984ffc4f5d 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -496,7 +496,7 @@ def forward(self, images, targets=None): """ if self.training: if targets is None: - torch._assert(True, "targets should not be none when in training mode") + torch._assert(False, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] torch._assert( @@ -553,7 +553,7 @@ def forward(self, images, targets=None): detections: List[Dict[str, Tensor]] = [] if self.training: if targets is None: - torch._assert(True, "targets should not be none when in training mode") + torch._assert(False, "targets should not be none when in training mode") # compute the losses losses = self.compute_loss(targets, head_outputs, anchors) else: diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 3966bbca172..4a2562c6894 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -327,7 +327,7 @@ def forward( if self.training: if targets is None: - torch._assert(True, "targets should not be none when in training mode") + torch._assert(False, "targets should not be none when in training mode") for target in targets: boxes = target["boxes"] if isinstance(boxes, torch.Tensor): @@ -379,7 +379,7 @@ def forward( detections: List[Dict[str, Tensor]] = [] if self.training: if targets is None: - torch._assert(True, "targets should not be none when in training mode") + torch._assert(False, "targets should not be none when in training mode") matched_idxs = [] for anchors_per_image, targets_per_image in zip(anchors, targets): if targets_per_image["boxes"].numel() == 0: From 63831d4435d5d73df6548de734b33dfadca3d289 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Fri, 25 Mar 2022 13:07:11 +0000 Subject: [PATCH 10/19] attempt to make tests pass --- torchvision/models/detection/_utils.py | 8 +++++--- torchvision/models/detection/fcos.py | 3 ++- torchvision/models/detection/generalized_rcnn.py | 2 +- torchvision/models/detection/retinanet.py | 6 ++++-- torchvision/models/detection/ssd.py | 2 ++ 5 files changed, 14 insertions(+), 7 deletions(-) diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index a8bb22327de..de5eacee27f 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -161,11 +161,11 @@ def encode_single(self, reference_boxes: Tensor, proposals: Tensor) -> Tensor: def decode(self, rel_codes: Tensor, boxes: List[Tensor]) -> Tensor: torch._assert( isinstance(boxes, (list, tuple)), - f"This function expects boxes of type list or tuple, instead got {type(boxes)}", + f"This function expects boxes of type list or tuple.", ) torch._assert( isinstance(rel_codes, torch.Tensor), - f"This function expects rel_codes of type torch.Tensor, instead got {type(rel_codes)}", + f"This function expects rel_codes of type torch.Tensor.", ) boxes_per_image = [b.size(0) for b in boxes] concat_boxes = torch.cat(boxes, dim=0) @@ -377,7 +377,9 @@ def __call__(self, match_quality_matrix: Tensor) -> Tensor: matches[between_thresholds] = self.BETWEEN_THRESHOLDS if self.allow_low_quality_matches: - torch._assert(all_matches is not None, "all_matches should not be None") + if all_matches is None: + torch._assert(False, "all_matches should not be None") + return torch.empty((0), dtype=torch.int64) # not reachable - added to make type checker happy self.set_low_quality_matches_(matches, all_matches, match_quality_matrix) return matches diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index 120c739d53f..74ea4a327fc 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -568,6 +568,7 @@ def forward( if targets is None: torch._assert(False, "targets should not be none when in training mode") + return ({}, [{}]) # # not reachable - added to make type checker happy for target in targets: boxes = target["boxes"] torch._assert( @@ -624,8 +625,8 @@ def forward( if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") + return ({}, [{}]) # not reachable - added to make type checker happy # compute the losses - losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level) else: # split outputs per level diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 897a7866234..f53d4631394 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -60,7 +60,7 @@ def forward(self, images, targets=None): if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") - + return ({}, [{}]) # not reachable - added to make type checker happy for target in targets: boxes = target["boxes"] if isinstance(boxes, torch.Tensor): diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index 1984ffc4f5d..a0411184e79 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -497,14 +497,15 @@ def forward(self, images, targets=None): if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") + return ({}, [{}]) # not reachable - added to make type checker happy for target in targets: boxes = target["boxes"] torch._assert( - isinstance(boxes, torch.Tensor), f"Expected target boxes to be of type Tensor, got {type(boxes)}." + isinstance(boxes, torch.Tensor), f"Expected target boxes to be of type Tensor." ) torch._assert( len(boxes.shape) == 2 and boxes.shape[-1] == 4, - f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", + f"Expected target boxes to be a tensor of shape [N, 4].", ) # get the original image sizes @@ -554,6 +555,7 @@ def forward(self, images, targets=None): if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") + return ({}, [{}]) # not reachable - added to make type checker happy # compute the losses losses = self.compute_loss(targets, head_outputs, anchors) else: diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 4a2562c6894..5a3e1296e8a 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -328,6 +328,7 @@ def forward( if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") + return ({}, [{}]) # not reachable - added to make type checker happy for target in targets: boxes = target["boxes"] if isinstance(boxes, torch.Tensor): @@ -380,6 +381,7 @@ def forward( if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") + return ({}, [{}]) # not reachable - added to make type checker happy matched_idxs = [] for anchors_per_image, targets_per_image in zip(anchors, targets): if targets_per_image["boxes"].numel() == 0: From 397c1732f2d4152bb228ad4ecaf3c68539d77681 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Mon, 28 Mar 2022 12:28:01 +0100 Subject: [PATCH 11/19] Fix test_ops tests --- test/test_ops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_ops.py b/test/test_ops.py index d1562b00a42..ad9aaefee52 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -138,13 +138,13 @@ def test_autocast(self, x_dtype, rois_dtype): def _helper_boxes_shape(self, func): # test boxes as Tensor[N, 5] - with pytest.raises(ValueError): + with pytest.raises(AssertionError): a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8) boxes = torch.tensor([[0, 0, 3, 3]], dtype=a.dtype) func(a, boxes, output_size=(2, 2)) # test boxes as List[Tensor[N, 4]] - with pytest.raises(ValueError): + with pytest.raises(AssertionError): a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8) boxes = torch.tensor([[0, 0, 3]], dtype=a.dtype) ops.roi_pool(a, [boxes], output_size=(2, 2)) From 9403e5bd876d681ef4dde2356863da0f053e9f44 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Mon, 28 Mar 2022 14:42:36 +0100 Subject: [PATCH 12/19] Fix expected exception in tests --- test/test_models.py | 8 ++++---- torchvision/models/detection/generalized_rcnn.py | 7 +++---- torchvision/models/detection/retinanet.py | 2 +- torchvision/models/detection/ssd.py | 10 +++------- 4 files changed, 11 insertions(+), 16 deletions(-) diff --git a/test/test_models.py b/test/test_models.py index 9a051a61eab..0fbf45b9750 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -745,24 +745,24 @@ def test_detection_model_validation(model_fn): x = [torch.rand(input_shape)] # validate that targets are present in training - with pytest.raises(ValueError): + with pytest.raises(AssertionError): model(x) # validate type targets = [{"boxes": 0.0}] - with pytest.raises(TypeError): + with pytest.raises(AssertionError): model(x, targets=targets) # validate boxes shape for boxes in (torch.rand((4,)), torch.rand((1, 5))): targets = [{"boxes": boxes}] - with pytest.raises(ValueError): + with pytest.raises(AssertionError): model(x, targets=targets) # validate that no degenerate boxes are present boxes = torch.tensor([[1, 3, 1, 4], [2, 4, 3, 4]]) targets = [{"boxes": boxes}] - with pytest.raises(ValueError): + with pytest.raises(AssertionError): model(x, targets=targets) diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index f53d4631394..b922c13e75f 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -64,10 +64,9 @@ def forward(self, images, targets=None): for target in targets: boxes = target["boxes"] if isinstance(boxes, torch.Tensor): - if len(boxes.shape) != 2 or boxes.shape[-1] != 4: - raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") + torch._assert(len(boxes.shape) == 2 and boxes.shape[-1] == 4, f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") else: - raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.") + torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.") original_image_sizes: List[Tuple[int, int]] = [] for img in images: @@ -90,7 +89,7 @@ def forward(self, images, targets=None): # print the first degenerate box bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] degen_bb: List[float] = boxes[bb_idx].tolist() - raise ValueError( + torch._assert(False, "All bounding boxes should have positive height and width." f" Found invalid box {degen_bb} for target at index {target_idx}." ) diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index a0411184e79..bde4c3002b8 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -531,7 +531,7 @@ def forward(self, images, targets=None): # print the first degenerate box bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] degen_bb: List[float] = boxes[bb_idx].tolist() - raise ValueError( + torch._assert(False, "All bounding boxes should have positive height and width." f" Found invalid box {degen_bb} for target at index {target_idx}." ) diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 5a3e1296e8a..21a5d85dc13 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -322,9 +322,6 @@ def compute_loss( def forward( self, images: List[Tensor], targets: Optional[List[Dict[str, Tensor]]] = None ) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]: - if self.training and targets is None: - raise ValueError("In training mode, targets should be passed") - if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") @@ -332,10 +329,9 @@ def forward( for target in targets: boxes = target["boxes"] if isinstance(boxes, torch.Tensor): - if len(boxes.shape) != 2 or boxes.shape[-1] != 4: - raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") + torch._assert(len(boxes.shape) == 2 and boxes.shape[-1] == 4, f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") else: - raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.") + torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.") # get the original image sizes original_image_sizes: List[Tuple[int, int]] = [] @@ -358,7 +354,7 @@ def forward( if degenerate_boxes.any(): bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] degen_bb: List[float] = boxes[bb_idx].tolist() - raise ValueError( + torch._assert(False, "All bounding boxes should have positive height and width." f" Found invalid box {degen_bb} for target at index {target_idx}." ) From e259d29009bca40b8fb5c79cdfc0b8a9e772eed9 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Mon, 28 Mar 2022 15:39:15 +0100 Subject: [PATCH 13/19] fix typo --- torchvision/models/detection/fcos.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index 74ea4a327fc..243794eeab7 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -568,7 +568,7 @@ def forward( if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # # not reachable - added to make type checker happy + return ({}, [{}]) # not reachable - added to make type checker happy for target in targets: boxes = target["boxes"] torch._assert( From 2d1e5e0e649408a6600cba8231739f3418f76406 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Mon, 28 Mar 2022 16:19:00 +0100 Subject: [PATCH 14/19] fix tests and format --- torchvision/models/detection/_utils.py | 6 +++--- torchvision/models/detection/fcos.py | 8 +++----- torchvision/models/detection/generalized_rcnn.py | 12 ++++++++---- torchvision/models/detection/retinanet.py | 15 +++++++-------- torchvision/models/detection/ssd.py | 14 +++++++++----- torchvision/ops/poolers.py | 7 ++++--- 6 files changed, 34 insertions(+), 28 deletions(-) diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index de5eacee27f..3fb2d5bcfb1 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -161,11 +161,11 @@ def encode_single(self, reference_boxes: Tensor, proposals: Tensor) -> Tensor: def decode(self, rel_codes: Tensor, boxes: List[Tensor]) -> Tensor: torch._assert( isinstance(boxes, (list, tuple)), - f"This function expects boxes of type list or tuple.", + "This function expects boxes of type list or tuple.", ) torch._assert( isinstance(rel_codes, torch.Tensor), - f"This function expects rel_codes of type torch.Tensor.", + "This function expects rel_codes of type torch.Tensor.", ) boxes_per_image = [b.size(0) for b in boxes] concat_boxes = torch.cat(boxes, dim=0) @@ -379,7 +379,7 @@ def __call__(self, match_quality_matrix: Tensor) -> Tensor: if self.allow_low_quality_matches: if all_matches is None: torch._assert(False, "all_matches should not be None") - return torch.empty((0), dtype=torch.int64) # not reachable - added to make type checker happy + return torch.empty((0), dtype=torch.int64) # not reachable - added to make type checker happy self.set_low_quality_matches_(matches, all_matches, match_quality_matrix) return matches diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index 243794eeab7..edf8985f35a 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -568,12 +568,10 @@ def forward( if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy + return ({}, [{}]) # not reachable - added to make type checker happy for target in targets: boxes = target["boxes"] - torch._assert( - isinstance(boxes, torch.Tensor), f"Expected target boxes to be of type Tensor, got {type(boxes)}." - ) + torch._assert(isinstance(boxes, torch.Tensor), f"Expected target boxes to be of type Tensor.") torch._assert( len(boxes.shape) == 2 and boxes.shape[-1] == 4, f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", @@ -625,7 +623,7 @@ def forward( if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy + return ({}, [{}]) # not reachable - added to make type checker happy # compute the losses losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level) else: diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index b922c13e75f..9ddf7461677 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -60,11 +60,14 @@ def forward(self, images, targets=None): if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy + return ({}, [{}]) # not reachable - added to make type checker happy for target in targets: boxes = target["boxes"] if isinstance(boxes, torch.Tensor): - torch._assert(len(boxes.shape) == 2 and boxes.shape[-1] == 4, f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") + torch._assert( + len(boxes.shape) == 2 and boxes.shape[-1] == 4, + f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", + ) else: torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.") @@ -89,9 +92,10 @@ def forward(self, images, targets=None): # print the first degenerate box bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] degen_bb: List[float] = boxes[bb_idx].tolist() - torch._assert(False, + torch._assert( + False, "All bounding boxes should have positive height and width." - f" Found invalid box {degen_bb} for target at index {target_idx}." + f" Found invalid box {degen_bb} for target at index {target_idx}.", ) features = self.backbone(images.tensors) diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index bde4c3002b8..a111f2c7893 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -497,15 +497,13 @@ def forward(self, images, targets=None): if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy + return ({}, [{}]) # not reachable - added to make type checker happy for target in targets: boxes = target["boxes"] - torch._assert( - isinstance(boxes, torch.Tensor), f"Expected target boxes to be of type Tensor." - ) + torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.") torch._assert( len(boxes.shape) == 2 and boxes.shape[-1] == 4, - f"Expected target boxes to be a tensor of shape [N, 4].", + "Expected target boxes to be a tensor of shape [N, 4].", ) # get the original image sizes @@ -531,9 +529,10 @@ def forward(self, images, targets=None): # print the first degenerate box bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] degen_bb: List[float] = boxes[bb_idx].tolist() - torch._assert(False, + torch._assert( + False, "All bounding boxes should have positive height and width." - f" Found invalid box {degen_bb} for target at index {target_idx}." + f" Found invalid box {degen_bb} for target at index {target_idx}.", ) # get the features from the backbone @@ -555,7 +554,7 @@ def forward(self, images, targets=None): if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy + return ({}, [{}]) # not reachable - added to make type checker happy # compute the losses losses = self.compute_loss(targets, head_outputs, anchors) else: diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 21a5d85dc13..758aecdd412 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -325,11 +325,14 @@ def forward( if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy + return ({}, [{}]) # not reachable - added to make type checker happy for target in targets: boxes = target["boxes"] if isinstance(boxes, torch.Tensor): - torch._assert(len(boxes.shape) == 2 and boxes.shape[-1] == 4, f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") + torch._assert( + len(boxes.shape) == 2 and boxes.shape[-1] == 4, + f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", + ) else: torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.") @@ -354,9 +357,10 @@ def forward( if degenerate_boxes.any(): bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] degen_bb: List[float] = boxes[bb_idx].tolist() - torch._assert(False, + torch._assert( + False, "All bounding boxes should have positive height and width." - f" Found invalid box {degen_bb} for target at index {target_idx}." + f" Found invalid box {degen_bb} for target at index {target_idx}.", ) # get the features from the backbone @@ -377,7 +381,7 @@ def forward( if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy + return ({}, [{}]) # not reachable - added to make type checker happy matched_idxs = [] for anchors_per_image, targets_per_image in zip(anchors, targets): if targets_per_image["boxes"].numel() == 0: diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index 7257f0dfd7a..f881201a2d2 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -111,7 +111,8 @@ def _infer_scale(feature: Tensor, original_size: List[int]) -> float: def _setup_scales( features: List[Tensor], image_shapes: List[Tuple[int, int]], canonical_scale: int, canonical_level: int ) -> Tuple[List[float], LevelMapper]: - torch._assert(image_shapes, "images list should not be empty") + if not image_shapes: + raise ValueError("images list should not be empty") max_x = 0 max_y = 0 for shape in image_shapes: @@ -165,8 +166,8 @@ def _multiscale_roi_align( Returns: result (Tensor) """ - torch._assert(scales is not None, "scales should not be None") - torch._assert(mapper is not None, "mapper should not be None") + if scales is None or mapper is None: + raise ValueError("scales and mapper should not be None") num_levels = len(x_filtered) rois = _convert_to_roi_format(boxes) From 07e9f1994a1376342e69bddcf9b7b9f37037be45 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Mon, 28 Mar 2022 16:45:16 +0100 Subject: [PATCH 15/19] fix flake8 --- torchvision/models/detection/fcos.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index edf8985f35a..e61f2d87e6a 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -571,7 +571,7 @@ def forward( return ({}, [{}]) # not reachable - added to make type checker happy for target in targets: boxes = target["boxes"] - torch._assert(isinstance(boxes, torch.Tensor), f"Expected target boxes to be of type Tensor.") + torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.") torch._assert( len(boxes.shape) == 2 and boxes.shape[-1] == 4, f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", From fb2d9a93e343bb9fce7555dea436e2388ae37c57 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Mon, 28 Mar 2022 17:05:03 +0100 Subject: [PATCH 16/19] remove one last exception --- torchvision/models/detection/anchor_utils.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/torchvision/models/detection/anchor_utils.py b/torchvision/models/detection/anchor_utils.py index 623ee59eeb5..34fb8d23069 100644 --- a/torchvision/models/detection/anchor_utils.py +++ b/torchvision/models/detection/anchor_utils.py @@ -85,14 +85,13 @@ def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) anchors = [] cell_anchors = self.cell_anchors torch._assert(cell_anchors is not None, "cell_anchors should not be None") - - if not (len(grid_sizes) == len(strides) == len(cell_anchors)): - raise ValueError( - "Anchors should be Tuple[Tuple[int]] because each feature " - "map could potentially have different sizes and aspect ratios. " - "There needs to be a match between the number of " - "feature maps passed and the number of sizes / aspect ratios specified." - ) + torch._assert( + len(grid_sizes) == len(strides) == len(cell_anchors), + "Anchors should be Tuple[Tuple[int]] because each feature " + "map could potentially have different sizes and aspect ratios. " + "There needs to be a match between the number of " + "feature maps passed and the number of sizes / aspect ratios specified.", + ) for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors): grid_height, grid_width = size From 7f406e34da466a455faefca584437872b61da896 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Mon, 28 Mar 2022 18:12:14 +0100 Subject: [PATCH 17/19] fix error --- test/test_models_detection_anchor_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/test_models_detection_anchor_utils.py b/test/test_models_detection_anchor_utils.py index 0e99a462158..fc679741abd 100644 --- a/test/test_models_detection_anchor_utils.py +++ b/test/test_models_detection_anchor_utils.py @@ -1,3 +1,4 @@ +from ast import Assert import pytest import torch from common_utils import assert_equal @@ -16,7 +17,7 @@ def test_incorrect_anchors(self): image1 = torch.randn(3, 800, 800) image_list = ImageList(image1, [(800, 800)]) feature_maps = [torch.randn(1, 50)] - pytest.raises(ValueError, anc, image_list, feature_maps) + pytest.raises(AssertionError, anc, image_list, feature_maps) def _init_test_anchor_generator(self): anchor_sizes = ((10,),) From e82b90ebf880d419d1a1321e9ce2d6c50f961f80 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Tue, 29 Mar 2022 09:22:21 +0100 Subject: [PATCH 18/19] remove unused immport --- test/test_models_detection_anchor_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/test_models_detection_anchor_utils.py b/test/test_models_detection_anchor_utils.py index fc679741abd..645d4624d64 100644 --- a/test/test_models_detection_anchor_utils.py +++ b/test/test_models_detection_anchor_utils.py @@ -1,4 +1,3 @@ -from ast import Assert import pytest import torch from common_utils import assert_equal From 25eda422b0c639dd374354290b91ebb0e4428d19 Mon Sep 17 00:00:00 2001 From: Joao Gomes Date: Tue, 29 Mar 2022 14:08:47 +0100 Subject: [PATCH 19/19] replace fake returns by else --- torchvision/models/detection/_utils.py | 4 +- torchvision/models/detection/fcos.py | 22 ++++----- .../models/detection/generalized_rcnn.py | 20 ++++---- torchvision/models/detection/retinanet.py | 22 ++++----- torchvision/models/detection/ssd.py | 48 ++++++++++--------- 5 files changed, 59 insertions(+), 57 deletions(-) diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index 3fb2d5bcfb1..40923794edf 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -379,8 +379,8 @@ def __call__(self, match_quality_matrix: Tensor) -> Tensor: if self.allow_low_quality_matches: if all_matches is None: torch._assert(False, "all_matches should not be None") - return torch.empty((0), dtype=torch.int64) # not reachable - added to make type checker happy - self.set_low_quality_matches_(matches, all_matches, match_quality_matrix) + else: + self.set_low_quality_matches_(matches, all_matches, match_quality_matrix) return matches diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index e61f2d87e6a..bb9dbe65ae7 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -568,14 +568,14 @@ def forward( if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy - for target in targets: - boxes = target["boxes"] - torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.") - torch._assert( - len(boxes.shape) == 2 and boxes.shape[-1] == 4, - f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", - ) + else: + for target in targets: + boxes = target["boxes"] + torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.") + torch._assert( + len(boxes.shape) == 2 and boxes.shape[-1] == 4, + f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", + ) original_image_sizes: List[Tuple[int, int]] = [] for img in images: @@ -623,9 +623,9 @@ def forward( if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy - # compute the losses - losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level) + else: + # compute the losses + losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level) else: # split outputs per level split_head_outputs: Dict[str, List[Tensor]] = {} diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 9ddf7461677..fdcaea5a3eb 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -60,16 +60,16 @@ def forward(self, images, targets=None): if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy - for target in targets: - boxes = target["boxes"] - if isinstance(boxes, torch.Tensor): - torch._assert( - len(boxes.shape) == 2 and boxes.shape[-1] == 4, - f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", - ) - else: - torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.") + else: + for target in targets: + boxes = target["boxes"] + if isinstance(boxes, torch.Tensor): + torch._assert( + len(boxes.shape) == 2 and boxes.shape[-1] == 4, + f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", + ) + else: + torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.") original_image_sizes: List[Tuple[int, int]] = [] for img in images: diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index a111f2c7893..c8b0de661f0 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -497,14 +497,14 @@ def forward(self, images, targets=None): if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy - for target in targets: - boxes = target["boxes"] - torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.") - torch._assert( - len(boxes.shape) == 2 and boxes.shape[-1] == 4, - "Expected target boxes to be a tensor of shape [N, 4].", - ) + else: + for target in targets: + boxes = target["boxes"] + torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.") + torch._assert( + len(boxes.shape) == 2 and boxes.shape[-1] == 4, + "Expected target boxes to be a tensor of shape [N, 4].", + ) # get the original image sizes original_image_sizes: List[Tuple[int, int]] = [] @@ -554,9 +554,9 @@ def forward(self, images, targets=None): if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy - # compute the losses - losses = self.compute_loss(targets, head_outputs, anchors) + else: + # compute the losses + losses = self.compute_loss(targets, head_outputs, anchors) else: # recover level sizes num_anchors_per_level = [x.size(2) * x.size(3) for x in features] diff --git a/torchvision/models/detection/ssd.py b/torchvision/models/detection/ssd.py index 758aecdd412..4f9d7546c2b 100644 --- a/torchvision/models/detection/ssd.py +++ b/torchvision/models/detection/ssd.py @@ -325,16 +325,16 @@ def forward( if self.training: if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy - for target in targets: - boxes = target["boxes"] - if isinstance(boxes, torch.Tensor): - torch._assert( - len(boxes.shape) == 2 and boxes.shape[-1] == 4, - f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", - ) - else: - torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.") + else: + for target in targets: + boxes = target["boxes"] + if isinstance(boxes, torch.Tensor): + torch._assert( + len(boxes.shape) == 2 and boxes.shape[-1] == 4, + f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.", + ) + else: + torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.") # get the original image sizes original_image_sizes: List[Tuple[int, int]] = [] @@ -379,21 +379,23 @@ def forward( losses = {} detections: List[Dict[str, Tensor]] = [] if self.training: + matched_idxs = [] if targets is None: torch._assert(False, "targets should not be none when in training mode") - return ({}, [{}]) # not reachable - added to make type checker happy - matched_idxs = [] - for anchors_per_image, targets_per_image in zip(anchors, targets): - if targets_per_image["boxes"].numel() == 0: - matched_idxs.append( - torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device) - ) - continue - - match_quality_matrix = box_ops.box_iou(targets_per_image["boxes"], anchors_per_image) - matched_idxs.append(self.proposal_matcher(match_quality_matrix)) - - losses = self.compute_loss(targets, head_outputs, anchors, matched_idxs) + else: + for anchors_per_image, targets_per_image in zip(anchors, targets): + if targets_per_image["boxes"].numel() == 0: + matched_idxs.append( + torch.full( + (anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device + ) + ) + continue + + match_quality_matrix = box_ops.box_iou(targets_per_image["boxes"], anchors_per_image) + matched_idxs.append(self.proposal_matcher(match_quality_matrix)) + + losses = self.compute_loss(targets, head_outputs, anchors, matched_idxs) else: detections = self.postprocess_detections(head_outputs, anchors, images.image_sizes) detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)