Unverified Commit 61f82669 authored by Joao Gomes's avatar Joao Gomes Committed by GitHub
Browse files

reverting some recently introduced exceptions (#5659)



* reverting some recently introduced exceptions

* Update torchvision/ops/poolers.py
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>

* address PR comments

* replace one more assert with torch._assert:

* address PR comments

* make type checker happy

* Fix bug

* fix bug

* fix for wrong asserts

* attempt to make tests pass

* Fix test_ops tests

* Fix expected exception in tests

* fix typo

* fix tests and format

* fix flake8

* remove one last exception

* fix error

* remove unused immport

* replace fake returns by else
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent 1db87957
......@@ -745,24 +745,24 @@ def test_detection_model_validation(model_fn):
x = [torch.rand(input_shape)]
# validate that targets are present in training
with pytest.raises(ValueError):
with pytest.raises(AssertionError):
model(x)
# validate type
targets = [{"boxes": 0.0}]
with pytest.raises(TypeError):
with pytest.raises(AssertionError):
model(x, targets=targets)
# validate boxes shape
for boxes in (torch.rand((4,)), torch.rand((1, 5))):
targets = [{"boxes": boxes}]
with pytest.raises(ValueError):
with pytest.raises(AssertionError):
model(x, targets=targets)
# validate that no degenerate boxes are present
boxes = torch.tensor([[1, 3, 1, 4], [2, 4, 3, 4]])
targets = [{"boxes": boxes}]
with pytest.raises(ValueError):
with pytest.raises(AssertionError):
model(x, targets=targets)
......
......@@ -16,7 +16,7 @@ class Tester:
image1 = torch.randn(3, 800, 800)
image_list = ImageList(image1, [(800, 800)])
feature_maps = [torch.randn(1, 50)]
pytest.raises(ValueError, anc, image_list, feature_maps)
pytest.raises(AssertionError, anc, image_list, feature_maps)
def _init_test_anchor_generator(self):
anchor_sizes = ((10,),)
......
......@@ -138,13 +138,13 @@ class RoIOpTester(ABC):
def _helper_boxes_shape(self, func):
# test boxes as Tensor[N, 5]
with pytest.raises(ValueError):
with pytest.raises(AssertionError):
a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8)
boxes = torch.tensor([[0, 0, 3, 3]], dtype=a.dtype)
func(a, boxes, output_size=(2, 2))
# test boxes as List[Tensor[N, 4]]
with pytest.raises(ValueError):
with pytest.raises(AssertionError):
a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8)
boxes = torch.tensor([[0, 0, 3]], dtype=a.dtype)
ops.roi_pool(a, [boxes], output_size=(2, 2))
......
......@@ -159,10 +159,14 @@ class BoxCoder:
return targets
def decode(self, rel_codes: Tensor, boxes: List[Tensor]) -> Tensor:
if not isinstance(boxes, (list, tuple)):
raise TypeError(f"This function expects boxes of type list or tuple, instead got {type(boxes)}")
if not isinstance(rel_codes, torch.Tensor):
raise TypeError(f"This function expects rel_codes of type torch.Tensor, instead got {type(rel_codes)}")
torch._assert(
isinstance(boxes, (list, tuple)),
"This function expects boxes of type list or tuple.",
)
torch._assert(
isinstance(rel_codes, torch.Tensor),
"This function expects rel_codes of type torch.Tensor.",
)
boxes_per_image = [b.size(0) for b in boxes]
concat_boxes = torch.cat(boxes, dim=0)
box_sum = 0
......@@ -335,8 +339,7 @@ class Matcher:
"""
self.BELOW_LOW_THRESHOLD = -1
self.BETWEEN_THRESHOLDS = -2
if low_threshold > high_threshold:
raise ValueError("low_threshold should be <= high_threshold")
torch._assert(low_threshold <= high_threshold, "low_threshold should be <= high_threshold")
self.high_threshold = high_threshold
self.low_threshold = low_threshold
self.allow_low_quality_matches = allow_low_quality_matches
......@@ -375,8 +378,9 @@ class Matcher:
if self.allow_low_quality_matches:
if all_matches is None:
raise ValueError("all_matches should not be None")
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
torch._assert(False, "all_matches should not be None")
else:
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
return matches
......
......@@ -84,17 +84,14 @@ class AnchorGenerator(nn.Module):
def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]:
anchors = []
cell_anchors = self.cell_anchors
if cell_anchors is None:
ValueError("cell_anchors should not be None")
if not (len(grid_sizes) == len(strides) == len(cell_anchors)):
raise ValueError(
"Anchors should be Tuple[Tuple[int]] because each feature "
"map could potentially have different sizes and aspect ratios. "
"There needs to be a match between the number of "
"feature maps passed and the number of sizes / aspect ratios specified."
)
torch._assert(cell_anchors is not None, "cell_anchors should not be None")
torch._assert(
len(grid_sizes) == len(strides) == len(cell_anchors),
"Anchors should be Tuple[Tuple[int]] because each feature "
"map could potentially have different sizes and aspect ratios. "
"There needs to be a match between the number of "
"feature maps passed and the number of sizes / aspect ratios specified.",
)
for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors):
grid_height, grid_width = size
......
from typing import Any, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from torchvision.ops import MultiScaleRoIAlign
......@@ -313,10 +314,10 @@ class FastRCNNPredictor(nn.Module):
def forward(self, x):
if x.dim() == 4:
if list(x.shape[2:]) != [1, 1]:
raise ValueError(
f"x has the wrong shape, expecting the last two dimensions to be [1,1] instead of {list(x.shape[2:])}"
)
torch._assert(
list(x.shape[2:]) == [1, 1],
f"x has the wrong shape, expecting the last two dimensions to be [1,1] instead of {list(x.shape[2:])}",
)
x = x.flatten(start_dim=1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
......
......@@ -565,23 +565,25 @@ class FCOS(nn.Module):
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training:
if targets is None:
raise ValueError("In training mode, targets should be passed")
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.")
else:
raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets:
boxes = target["boxes"]
torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.")
torch._assert(
len(boxes.shape) == 2 and boxes.shape[-1] == 4,
f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.",
)
original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
if len(val) != 2:
raise ValueError(
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}"
)
torch._assert(
len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
)
original_image_sizes.append((val[0], val[1]))
# transform the input
......@@ -596,9 +598,9 @@ class FCOS(nn.Module):
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError(
"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}."
torch._assert(
False,
f"All bounding boxes should have positive height and width. Found invalid box {degen_bb} for target at index {target_idx}.",
)
# get the features from the backbone
......@@ -619,11 +621,11 @@ class FCOS(nn.Module):
losses = {}
detections: List[Dict[str, Tensor]] = []
if self.training:
# compute the losses
if targets is None:
raise ValueError("targets should not be none when in training mode")
losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level)
torch._assert(False, "targets should not be none when in training mode")
else:
# compute the losses
losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level)
else:
# split outputs per level
split_head_outputs: Dict[str, List[Tensor]] = {}
......
......@@ -59,23 +59,25 @@ class GeneralizedRCNN(nn.Module):
"""
if self.training:
if targets is None:
raise ValueError("In training mode, targets should be passed")
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.")
else:
raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
torch._assert(
len(boxes.shape) == 2 and boxes.shape[-1] == 4,
f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.",
)
else:
torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
if len(val) != 2:
raise ValueError(
f"Expecting the last two dimensions of the input tensor to be H and W, instead got {img.shape[-2:]}"
)
torch._assert(
len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
)
original_image_sizes.append((val[0], val[1]))
images, targets = self.transform(images, targets)
......@@ -90,9 +92,10 @@ class GeneralizedRCNN(nn.Module):
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError(
torch._assert(
False,
"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}."
f" Found invalid box {degen_bb} for target at index {target_idx}.",
)
features = self.backbone(images.tensors)
......
......@@ -494,28 +494,26 @@ class RetinaNet(nn.Module):
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
if self.training:
if targets is None:
raise ValueError("In training mode, targets should be passed")
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.")
else:
raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets:
boxes = target["boxes"]
torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.")
torch._assert(
len(boxes.shape) == 2 and boxes.shape[-1] == 4,
"Expected target boxes to be a tensor of shape [N, 4].",
)
# get the original image sizes
original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
if len(val) != 2:
raise ValueError(
f"Expecting the two last elements of the input tensors to be H and W instead got {img.shape[-2:]}"
)
torch._assert(
len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
)
original_image_sizes.append((val[0], val[1]))
# transform the input
......@@ -531,9 +529,10 @@ class RetinaNet(nn.Module):
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError(
torch._assert(
False,
"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}."
f" Found invalid box {degen_bb} for target at index {target_idx}.",
)
# get the features from the backbone
......@@ -554,9 +553,10 @@ class RetinaNet(nn.Module):
detections: List[Dict[str, Tensor]] = []
if self.training:
if targets is None:
raise ValueError("In training mode, targets should be passed")
# compute the losses
losses = self.compute_loss(targets, head_outputs, anchors)
torch._assert(False, "targets should not be none when in training mode")
else:
# compute the losses
losses = self.compute_loss(targets, head_outputs, anchors)
else:
# recover level sizes
num_anchors_per_level = [x.size(2) * x.size(3) for x in features]
......
......@@ -322,28 +322,28 @@ class SSD(nn.Module):
def forward(
self, images: List[Tensor], targets: Optional[List[Dict[str, Tensor]]] = None
) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]:
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
if self.training:
if targets is None:
raise ValueError("targets should not be None")
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.")
else:
raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
torch._assert(
len(boxes.shape) == 2 and boxes.shape[-1] == 4,
f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.",
)
else:
torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
# get the original image sizes
original_image_sizes: List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:]
if len(val) != 2:
raise ValueError(
f"The last two dimensions of the input tensors should contain H and W, instead got {img.shape[-2:]}"
)
torch._assert(
len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
)
original_image_sizes.append((val[0], val[1]))
# transform the input
......@@ -357,9 +357,10 @@ class SSD(nn.Module):
if degenerate_boxes.any():
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError(
torch._assert(
False,
"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}."
f" Found invalid box {degen_bb} for target at index {target_idx}.",
)
# get the features from the backbone
......@@ -378,21 +379,23 @@ class SSD(nn.Module):
losses = {}
detections: List[Dict[str, Tensor]] = []
if self.training:
if targets is None:
raise ValueError("targets should not be None when in training mode")
matched_idxs = []
for anchors_per_image, targets_per_image in zip(anchors, targets):
if targets_per_image["boxes"].numel() == 0:
matched_idxs.append(
torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device)
)
continue
match_quality_matrix = box_ops.box_iou(targets_per_image["boxes"], anchors_per_image)
matched_idxs.append(self.proposal_matcher(match_quality_matrix))
losses = self.compute_loss(targets, head_outputs, anchors, matched_idxs)
if targets is None:
torch._assert(False, "targets should not be none when in training mode")
else:
for anchors_per_image, targets_per_image in zip(anchors, targets):
if targets_per_image["boxes"].numel() == 0:
matched_idxs.append(
torch.full(
(anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device
)
)
continue
match_quality_matrix = box_ops.box_iou(targets_per_image["boxes"], anchors_per_image)
matched_idxs.append(self.proposal_matcher(match_quality_matrix))
losses = self.compute_loss(targets, head_outputs, anchors, matched_idxs)
else:
detections = self.postprocess_detections(head_outputs, anchors, images.image_sizes)
detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)
......@@ -550,8 +553,10 @@ def _vgg_extractor(backbone: VGG, highres: bool, trainable_layers: int):
num_stages = len(stage_indices)
# find the index of the layer from which we wont freeze
if not 0 <= trainable_layers <= num_stages:
raise ValueError(f"trainable_layers should be in the range [0, {num_stages}]. Instead got {trainable_layers}")
torch._assert(
0 <= trainable_layers <= num_stages,
f"trainable_layers should be in the range [0, {num_stages}]. Instead got {trainable_layers}",
)
freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]
for b in backbone[:freeze_before]:
......
......@@ -134,10 +134,10 @@ class GeneralizedRCNNTransform(nn.Module):
images = self.batch_images(images, size_divisible=self.size_divisible)
image_sizes_list: List[Tuple[int, int]] = []
for image_size in image_sizes:
if len(image_size) != 2:
raise ValueError(
f"Input tensors expected to have in the last two elements H and W, instead got {image_size}"
)
torch._assert(
len(image_size) == 2,
f"Input tensors expected to have in the last two elements H and W, instead got {image_size}",
)
image_sizes_list.append((image_size[0], image_size[1]))
image_list = ImageList(images, image_sizes_list)
......
......@@ -28,13 +28,13 @@ def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor:
def check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]):
if isinstance(boxes, (list, tuple)):
for _tensor in boxes:
if _tensor.size(1) != 4:
raise ValueError("The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]].")
torch._assert(
_tensor.size(1) == 4, "The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]"
)
elif isinstance(boxes, torch.Tensor):
if boxes.size(1) != 5:
raise ValueError("The boxes tensor shape is not correct as Tensor[K, 5]/")
torch._assert(boxes.size(1) == 5, "The boxes tensor shape is not correct as Tensor[K, 5]")
else:
raise TypeError(f"boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]], instead got {type(boxes)}")
torch._assert(False, "boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]")
return
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment