Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
4bdea55
reverting some recently introduced exceptions
jdsgomes Mar 22, 2022
c2fb56b
Update torchvision/ops/poolers.py
jdsgomes Mar 23, 2022
698af0e
address PR comments
jdsgomes Mar 24, 2022
2f92462
replace one more assert with torch._assert:
jdsgomes Mar 24, 2022
2bda809
address PR comments
jdsgomes Mar 24, 2022
c9edb05
make type checker happy
jdsgomes Mar 24, 2022
55d9926
Fix bug
jdsgomes Mar 24, 2022
f335534
Merge branch 'main' into revert_some_exceptions
jdsgomes Mar 24, 2022
661e136
fix bug
jdsgomes Mar 24, 2022
b99c3d8
Merge branch 'revert_some_exceptions' of github.com:jdsgomes/vision i…
jdsgomes Mar 24, 2022
846275c
fix for wrong asserts
jdsgomes Mar 24, 2022
ea6dd47
Merge branch 'main' into revert_some_exceptions
jdsgomes Mar 25, 2022
63831d4
attempt to make tests pass
jdsgomes Mar 25, 2022
5ace634
Merge branch 'main' into revert_some_exceptions
jdsgomes Mar 25, 2022
2734e9f
Merge branch 'main' into revert_some_exceptions
jdsgomes Mar 28, 2022
397c173
Fix test_ops tests
jdsgomes Mar 28, 2022
9403e5b
Fix expected exception in tests
jdsgomes Mar 28, 2022
c811dba
Merge branch 'main' into revert_some_exceptions
jdsgomes Mar 28, 2022
e259d29
fix typo
jdsgomes Mar 28, 2022
2d1e5e0
fix tests and format
jdsgomes Mar 28, 2022
07e9f19
fix flake8
jdsgomes Mar 28, 2022
09f630c
Merge branch 'main' into revert_some_exceptions
jdsgomes Mar 28, 2022
fb2d9a9
remove one last exception
jdsgomes Mar 28, 2022
4540ca2
Merge branch 'revert_some_exceptions' of github.com:jdsgomes/vision i…
jdsgomes Mar 28, 2022
7f406e3
fix error
jdsgomes Mar 28, 2022
e82b90e
remove unused immport
jdsgomes Mar 29, 2022
25eda42
replace fake returns by else
jdsgomes Mar 29, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions test/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -745,24 +745,24 @@ def test_detection_model_validation(model_fn):
x = [torch.rand(input_shape)]

# validate that targets are present in training
with pytest.raises(ValueError):
with pytest.raises(AssertionError):
model(x)

# validate type
targets = [{"boxes": 0.0}]
with pytest.raises(TypeError):
with pytest.raises(AssertionError):
model(x, targets=targets)

# validate boxes shape
for boxes in (torch.rand((4,)), torch.rand((1, 5))):
targets = [{"boxes": boxes}]
with pytest.raises(ValueError):
with pytest.raises(AssertionError):
model(x, targets=targets)

# validate that no degenerate boxes are present
boxes = torch.tensor([[1, 3, 1, 4], [2, 4, 3, 4]])
targets = [{"boxes": boxes}]
with pytest.raises(ValueError):
with pytest.raises(AssertionError):
model(x, targets=targets)


Expand Down
2 changes: 1 addition & 1 deletion test/test_models_detection_anchor_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def test_incorrect_anchors(self):
image1 = torch.randn(3, 800, 800)
image_list = ImageList(image1, [(800, 800)])
feature_maps = [torch.randn(1, 50)]
pytest.raises(ValueError, anc, image_list, feature_maps)
pytest.raises(AssertionError, anc, image_list, feature_maps)

def _init_test_anchor_generator(self):
anchor_sizes = ((10,),)
Expand Down
4 changes: 2 additions & 2 deletions test/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,13 +138,13 @@ def test_autocast(self, x_dtype, rois_dtype):

def _helper_boxes_shape(self, func):
# test boxes as Tensor[N, 5]
with pytest.raises(ValueError):
with pytest.raises(AssertionError):
a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8)
boxes = torch.tensor([[0, 0, 3, 3]], dtype=a.dtype)
func(a, boxes, output_size=(2, 2))

# test boxes as List[Tensor[N, 4]]
with pytest.raises(ValueError):
with pytest.raises(AssertionError):
a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8)
boxes = torch.tensor([[0, 0, 3]], dtype=a.dtype)
ops.roi_pool(a, [boxes], output_size=(2, 2))
Expand Down
20 changes: 12 additions & 8 deletions torchvision/models/detection/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,10 +159,14 @@ def encode_single(self, reference_boxes: Tensor, proposals: Tensor) -> Tensor:
return targets

def decode(self, rel_codes: Tensor, boxes: List[Tensor]) -> Tensor:
if not isinstance(boxes, (list, tuple)):
raise TypeError(f"This function expects boxes of type list or tuple, instead got {type(boxes)}")
if not isinstance(rel_codes, torch.Tensor):
raise TypeError(f"This function expects rel_codes of type torch.Tensor, instead got {type(rel_codes)}")
torch._assert(
isinstance(boxes, (list, tuple)),
"This function expects boxes of type list or tuple.",
)
torch._assert(
isinstance(rel_codes, torch.Tensor),
"This function expects rel_codes of type torch.Tensor.",
)
boxes_per_image = [b.size(0) for b in boxes]
concat_boxes = torch.cat(boxes, dim=0)
box_sum = 0
Expand Down Expand Up @@ -335,8 +339,7 @@ def __init__(self, high_threshold: float, low_threshold: float, allow_low_qualit
"""
self.BELOW_LOW_THRESHOLD = -1
self.BETWEEN_THRESHOLDS = -2
if low_threshold > high_threshold:
raise ValueError("low_threshold should be <= high_threshold")
torch._assert(low_threshold <= high_threshold, "low_threshold should be <= high_threshold")
self.high_threshold = high_threshold
self.low_threshold = low_threshold
self.allow_low_quality_matches = allow_low_quality_matches
Expand Down Expand Up @@ -375,8 +378,9 @@ def __call__(self, match_quality_matrix: Tensor) -> Tensor:

if self.allow_low_quality_matches:
if all_matches is None:
raise ValueError("all_matches should not be None")
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
torch._assert(False, "all_matches should not be None")
else:
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)

return matches

Expand Down
19 changes: 8 additions & 11 deletions torchvision/models/detection/anchor_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,17 +84,14 @@ def num_anchors_per_location(self):
def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]:
anchors = []
cell_anchors = self.cell_anchors

if cell_anchors is None:
ValueError("cell_anchors should not be None")

if not (len(grid_sizes) == len(strides) == len(cell_anchors)):
raise ValueError(
"Anchors should be Tuple[Tuple[int]] because each feature "
"map could potentially have different sizes and aspect ratios. "
"There needs to be a match between the number of "
"feature maps passed and the number of sizes / aspect ratios specified."
)
torch._assert(cell_anchors is not None, "cell_anchors should not be None")
torch._assert(
len(grid_sizes) == len(strides) == len(cell_anchors),
"Anchors should be Tuple[Tuple[int]] because each feature "
"map could potentially have different sizes and aspect ratios. "
"There needs to be a match between the number of "
"feature maps passed and the number of sizes / aspect ratios specified.",
)

for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors):
grid_height, grid_width = size
Expand Down
9 changes: 5 additions & 4 deletions torchvision/models/detection/faster_rcnn.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from typing import Any, Optional, Union

import torch
import torch.nn.functional as F
from torch import nn
from torchvision.ops import MultiScaleRoIAlign
Expand Down Expand Up @@ -313,10 +314,10 @@ def __init__(self, in_channels, num_classes):

def forward(self, x):
if x.dim() == 4:
if list(x.shape[2:]) != [1, 1]:
raise ValueError(
f"x has the wrong shape, expecting the last two dimensions to be [1,1] instead of {list(x.shape[2:])}"
)
torch._assert(
list(x.shape[2:]) == [1, 1],
f"x has the wrong shape, expecting the last two dimensions to be [1,1] instead of {list(x.shape[2:])}",
)
x = x.flatten(start_dim=1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
Expand Down
40 changes: 21 additions & 19 deletions torchvision/models/detection/fcos.py
Original file line number Diff line number Diff line change
Expand Up @@ -565,23 +565,25 @@ def forward(
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training:

if targets is None:
raise ValueError("In training mode, targets should be passed")
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.")
else:
raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets:
boxes = target["boxes"]
torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.")
torch._assert(
len(boxes.shape) == 2 and boxes.shape[-1] == 4,
f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.",
)

original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
if len(val) != 2:
raise ValueError(
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}"
)
torch._assert(
len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
)
original_image_sizes.append((val[0], val[1]))

# transform the input
Expand All @@ -596,9 +598,9 @@ def forward(
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError(
"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}."
torch._assert(
False,
f"All bounding boxes should have positive height and width. Found invalid box {degen_bb} for target at index {target_idx}.",
)

# get the features from the backbone
Expand All @@ -619,11 +621,11 @@ def forward(
losses = {}
detections: List[Dict[str, Tensor]] = []
if self.training:
# compute the losses
if targets is None:
raise ValueError("targets should not be none when in training mode")

losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level)
torch._assert(False, "targets should not be none when in training mode")
else:
# compute the losses
losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level)
else:
# split outputs per level
split_head_outputs: Dict[str, List[Tensor]] = {}
Expand Down
33 changes: 18 additions & 15 deletions torchvision/models/detection/generalized_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,23 +59,25 @@ def forward(self, images, targets=None):
"""
if self.training:
if targets is None:
raise ValueError("In training mode, targets should be passed")

for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.")
else:
raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
torch._assert(
len(boxes.shape) == 2 and boxes.shape[-1] == 4,
f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.",
)
else:
torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.")

original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
if len(val) != 2:
raise ValueError(
f"Expecting the last two dimensions of the input tensor to be H and W, instead got {img.shape[-2:]}"
)
torch._assert(
len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
)
original_image_sizes.append((val[0], val[1]))

images, targets = self.transform(images, targets)
Expand All @@ -90,9 +92,10 @@ def forward(self, images, targets=None):
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError(
torch._assert(
False,
"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}."
f" Found invalid box {degen_bb} for target at index {target_idx}.",
)

features = self.backbone(images.tensors)
Expand Down
40 changes: 20 additions & 20 deletions torchvision/models/detection/retinanet.py
Original file line number Diff line number Diff line change
Expand Up @@ -494,28 +494,26 @@ def forward(self, images, targets=None):
like `scores`, `labels` and `mask` (for Mask R-CNN models).

"""
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")

if self.training:
if targets is None:
raise ValueError("In training mode, targets should be passed")
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.")
else:
raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets:
boxes = target["boxes"]
torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.")
torch._assert(
len(boxes.shape) == 2 and boxes.shape[-1] == 4,
"Expected target boxes to be a tensor of shape [N, 4].",
)

# get the original image sizes
original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
if len(val) != 2:
raise ValueError(
f"Expecting the two last elements of the input tensors to be H and W instead got {img.shape[-2:]}"
)
torch._assert(
len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
)
original_image_sizes.append((val[0], val[1]))

# transform the input
Expand All @@ -531,9 +529,10 @@ def forward(self, images, targets=None):
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError(
torch._assert(
False,
"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}."
f" Found invalid box {degen_bb} for target at index {target_idx}.",
)

# get the features from the backbone
Expand All @@ -554,9 +553,10 @@ def forward(self, images, targets=None):
detections: List[Dict[str, Tensor]] = []
if self.training:
if targets is None:
raise ValueError("In training mode, targets should be passed")
# compute the losses
losses = self.compute_loss(targets, head_outputs, anchors)
torch._assert(False, "targets should not be none when in training mode")
else:
# compute the losses
losses = self.compute_loss(targets, head_outputs, anchors)
else:
# recover level sizes
num_anchors_per_level = [x.size(2) * x.size(3) for x in features]
Expand Down
Loading