Unverified Commit 61f82669 authored by Joao Gomes's avatar Joao Gomes Committed by GitHub
Browse files

reverting some recently introduced exceptions (#5659)



* reverting some recently introduced exceptions

* Update torchvision/ops/poolers.py
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>

* address PR comments

* replace one more assert with torch._assert:

* address PR comments

* make type checker happy

* Fix bug

* fix bug

* fix for wrong asserts

* attempt to make tests pass

* Fix test_ops tests

* Fix expected exception in tests

* fix typo

* fix tests and format

* fix flake8

* remove one last exception

* fix error

* remove unused immport

* replace fake returns by else
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent 1db87957
...@@ -745,24 +745,24 @@ def test_detection_model_validation(model_fn): ...@@ -745,24 +745,24 @@ def test_detection_model_validation(model_fn):
x = [torch.rand(input_shape)] x = [torch.rand(input_shape)]
# validate that targets are present in training # validate that targets are present in training
with pytest.raises(ValueError): with pytest.raises(AssertionError):
model(x) model(x)
# validate type # validate type
targets = [{"boxes": 0.0}] targets = [{"boxes": 0.0}]
with pytest.raises(TypeError): with pytest.raises(AssertionError):
model(x, targets=targets) model(x, targets=targets)
# validate boxes shape # validate boxes shape
for boxes in (torch.rand((4,)), torch.rand((1, 5))): for boxes in (torch.rand((4,)), torch.rand((1, 5))):
targets = [{"boxes": boxes}] targets = [{"boxes": boxes}]
with pytest.raises(ValueError): with pytest.raises(AssertionError):
model(x, targets=targets) model(x, targets=targets)
# validate that no degenerate boxes are present # validate that no degenerate boxes are present
boxes = torch.tensor([[1, 3, 1, 4], [2, 4, 3, 4]]) boxes = torch.tensor([[1, 3, 1, 4], [2, 4, 3, 4]])
targets = [{"boxes": boxes}] targets = [{"boxes": boxes}]
with pytest.raises(ValueError): with pytest.raises(AssertionError):
model(x, targets=targets) model(x, targets=targets)
......
...@@ -16,7 +16,7 @@ class Tester: ...@@ -16,7 +16,7 @@ class Tester:
image1 = torch.randn(3, 800, 800) image1 = torch.randn(3, 800, 800)
image_list = ImageList(image1, [(800, 800)]) image_list = ImageList(image1, [(800, 800)])
feature_maps = [torch.randn(1, 50)] feature_maps = [torch.randn(1, 50)]
pytest.raises(ValueError, anc, image_list, feature_maps) pytest.raises(AssertionError, anc, image_list, feature_maps)
def _init_test_anchor_generator(self): def _init_test_anchor_generator(self):
anchor_sizes = ((10,),) anchor_sizes = ((10,),)
......
...@@ -138,13 +138,13 @@ class RoIOpTester(ABC): ...@@ -138,13 +138,13 @@ class RoIOpTester(ABC):
def _helper_boxes_shape(self, func): def _helper_boxes_shape(self, func):
# test boxes as Tensor[N, 5] # test boxes as Tensor[N, 5]
with pytest.raises(ValueError): with pytest.raises(AssertionError):
a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8) a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8)
boxes = torch.tensor([[0, 0, 3, 3]], dtype=a.dtype) boxes = torch.tensor([[0, 0, 3, 3]], dtype=a.dtype)
func(a, boxes, output_size=(2, 2)) func(a, boxes, output_size=(2, 2))
# test boxes as List[Tensor[N, 4]] # test boxes as List[Tensor[N, 4]]
with pytest.raises(ValueError): with pytest.raises(AssertionError):
a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8) a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8)
boxes = torch.tensor([[0, 0, 3]], dtype=a.dtype) boxes = torch.tensor([[0, 0, 3]], dtype=a.dtype)
ops.roi_pool(a, [boxes], output_size=(2, 2)) ops.roi_pool(a, [boxes], output_size=(2, 2))
......
...@@ -159,10 +159,14 @@ class BoxCoder: ...@@ -159,10 +159,14 @@ class BoxCoder:
return targets return targets
def decode(self, rel_codes: Tensor, boxes: List[Tensor]) -> Tensor: def decode(self, rel_codes: Tensor, boxes: List[Tensor]) -> Tensor:
if not isinstance(boxes, (list, tuple)): torch._assert(
raise TypeError(f"This function expects boxes of type list or tuple, instead got {type(boxes)}") isinstance(boxes, (list, tuple)),
if not isinstance(rel_codes, torch.Tensor): "This function expects boxes of type list or tuple.",
raise TypeError(f"This function expects rel_codes of type torch.Tensor, instead got {type(rel_codes)}") )
torch._assert(
isinstance(rel_codes, torch.Tensor),
"This function expects rel_codes of type torch.Tensor.",
)
boxes_per_image = [b.size(0) for b in boxes] boxes_per_image = [b.size(0) for b in boxes]
concat_boxes = torch.cat(boxes, dim=0) concat_boxes = torch.cat(boxes, dim=0)
box_sum = 0 box_sum = 0
...@@ -335,8 +339,7 @@ class Matcher: ...@@ -335,8 +339,7 @@ class Matcher:
""" """
self.BELOW_LOW_THRESHOLD = -1 self.BELOW_LOW_THRESHOLD = -1
self.BETWEEN_THRESHOLDS = -2 self.BETWEEN_THRESHOLDS = -2
if low_threshold > high_threshold: torch._assert(low_threshold <= high_threshold, "low_threshold should be <= high_threshold")
raise ValueError("low_threshold should be <= high_threshold")
self.high_threshold = high_threshold self.high_threshold = high_threshold
self.low_threshold = low_threshold self.low_threshold = low_threshold
self.allow_low_quality_matches = allow_low_quality_matches self.allow_low_quality_matches = allow_low_quality_matches
...@@ -375,7 +378,8 @@ class Matcher: ...@@ -375,7 +378,8 @@ class Matcher:
if self.allow_low_quality_matches: if self.allow_low_quality_matches:
if all_matches is None: if all_matches is None:
raise ValueError("all_matches should not be None") torch._assert(False, "all_matches should not be None")
else:
self.set_low_quality_matches_(matches, all_matches, match_quality_matrix) self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)
return matches return matches
......
...@@ -84,16 +84,13 @@ class AnchorGenerator(nn.Module): ...@@ -84,16 +84,13 @@ class AnchorGenerator(nn.Module):
def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]: def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]:
anchors = [] anchors = []
cell_anchors = self.cell_anchors cell_anchors = self.cell_anchors
torch._assert(cell_anchors is not None, "cell_anchors should not be None")
if cell_anchors is None: torch._assert(
ValueError("cell_anchors should not be None") len(grid_sizes) == len(strides) == len(cell_anchors),
if not (len(grid_sizes) == len(strides) == len(cell_anchors)):
raise ValueError(
"Anchors should be Tuple[Tuple[int]] because each feature " "Anchors should be Tuple[Tuple[int]] because each feature "
"map could potentially have different sizes and aspect ratios. " "map could potentially have different sizes and aspect ratios. "
"There needs to be a match between the number of " "There needs to be a match between the number of "
"feature maps passed and the number of sizes / aspect ratios specified." "feature maps passed and the number of sizes / aspect ratios specified.",
) )
for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors): for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors):
......
from typing import Any, Optional, Union from typing import Any, Optional, Union
import torch
import torch.nn.functional as F import torch.nn.functional as F
from torch import nn from torch import nn
from torchvision.ops import MultiScaleRoIAlign from torchvision.ops import MultiScaleRoIAlign
...@@ -313,9 +314,9 @@ class FastRCNNPredictor(nn.Module): ...@@ -313,9 +314,9 @@ class FastRCNNPredictor(nn.Module):
def forward(self, x): def forward(self, x):
if x.dim() == 4: if x.dim() == 4:
if list(x.shape[2:]) != [1, 1]: torch._assert(
raise ValueError( list(x.shape[2:]) == [1, 1],
f"x has the wrong shape, expecting the last two dimensions to be [1,1] instead of {list(x.shape[2:])}" f"x has the wrong shape, expecting the last two dimensions to be [1,1] instead of {list(x.shape[2:])}",
) )
x = x.flatten(start_dim=1) x = x.flatten(start_dim=1)
scores = self.cls_score(x) scores = self.cls_score(x)
......
...@@ -565,22 +565,24 @@ class FCOS(nn.Module): ...@@ -565,22 +565,24 @@ class FCOS(nn.Module):
like `scores`, `labels` and `mask` (for Mask R-CNN models). like `scores`, `labels` and `mask` (for Mask R-CNN models).
""" """
if self.training: if self.training:
if targets is None: if targets is None:
raise ValueError("In training mode, targets should be passed") torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets: for target in targets:
boxes = target["boxes"] boxes = target["boxes"]
if isinstance(boxes, torch.Tensor): torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.")
if len(boxes.shape) != 2 or boxes.shape[-1] != 4: torch._assert(
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") len(boxes.shape) == 2 and boxes.shape[-1] == 4,
else: f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.",
raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.") )
original_image_sizes: List[Tuple[int, int]] = [] original_image_sizes: List[Tuple[int, int]] = []
for img in images: for img in images:
val = img.shape[-2:] val = img.shape[-2:]
if len(val) != 2: torch._assert(
raise ValueError( len(val) == 2,
f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}" f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
) )
original_image_sizes.append((val[0], val[1])) original_image_sizes.append((val[0], val[1]))
...@@ -596,9 +598,9 @@ class FCOS(nn.Module): ...@@ -596,9 +598,9 @@ class FCOS(nn.Module):
# print the first degenerate box # print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist() degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError( torch._assert(
"All bounding boxes should have positive height and width." False,
f" Found invalid box {degen_bb} for target at index {target_idx}." f"All bounding boxes should have positive height and width. Found invalid box {degen_bb} for target at index {target_idx}.",
) )
# get the features from the backbone # get the features from the backbone
...@@ -619,10 +621,10 @@ class FCOS(nn.Module): ...@@ -619,10 +621,10 @@ class FCOS(nn.Module):
losses = {} losses = {}
detections: List[Dict[str, Tensor]] = [] detections: List[Dict[str, Tensor]] = []
if self.training: if self.training:
# compute the losses
if targets is None: if targets is None:
raise ValueError("targets should not be none when in training mode") torch._assert(False, "targets should not be none when in training mode")
else:
# compute the losses
losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level) losses = self.compute_loss(targets, head_outputs, anchors, num_anchors_per_level)
else: else:
# split outputs per level # split outputs per level
......
...@@ -59,22 +59,24 @@ class GeneralizedRCNN(nn.Module): ...@@ -59,22 +59,24 @@ class GeneralizedRCNN(nn.Module):
""" """
if self.training: if self.training:
if targets is None: if targets is None:
raise ValueError("In training mode, targets should be passed") torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets: for target in targets:
boxes = target["boxes"] boxes = target["boxes"]
if isinstance(boxes, torch.Tensor): if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4: torch._assert(
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") len(boxes.shape) == 2 and boxes.shape[-1] == 4,
f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.",
)
else: else:
raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.") torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
original_image_sizes: List[Tuple[int, int]] = [] original_image_sizes: List[Tuple[int, int]] = []
for img in images: for img in images:
val = img.shape[-2:] val = img.shape[-2:]
if len(val) != 2: torch._assert(
raise ValueError( len(val) == 2,
f"Expecting the last two dimensions of the input tensor to be H and W, instead got {img.shape[-2:]}" f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
) )
original_image_sizes.append((val[0], val[1])) original_image_sizes.append((val[0], val[1]))
...@@ -90,9 +92,10 @@ class GeneralizedRCNN(nn.Module): ...@@ -90,9 +92,10 @@ class GeneralizedRCNN(nn.Module):
# print the first degenerate box # print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist() degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError( torch._assert(
False,
"All bounding boxes should have positive height and width." "All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}." f" Found invalid box {degen_bb} for target at index {target_idx}.",
) )
features = self.backbone(images.tensors) features = self.backbone(images.tensors)
......
...@@ -494,27 +494,25 @@ class RetinaNet(nn.Module): ...@@ -494,27 +494,25 @@ class RetinaNet(nn.Module):
like `scores`, `labels` and `mask` (for Mask R-CNN models). like `scores`, `labels` and `mask` (for Mask R-CNN models).
""" """
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
if self.training: if self.training:
if targets is None: if targets is None:
raise ValueError("In training mode, targets should be passed") torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets: for target in targets:
boxes = target["boxes"] boxes = target["boxes"]
if isinstance(boxes, torch.Tensor): torch._assert(isinstance(boxes, torch.Tensor), "Expected target boxes to be of type Tensor.")
if len(boxes.shape) != 2 or boxes.shape[-1] != 4: torch._assert(
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") len(boxes.shape) == 2 and boxes.shape[-1] == 4,
else: "Expected target boxes to be a tensor of shape [N, 4].",
raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.") )
# get the original image sizes # get the original image sizes
original_image_sizes: List[Tuple[int, int]] = [] original_image_sizes: List[Tuple[int, int]] = []
for img in images: for img in images:
val = img.shape[-2:] val = img.shape[-2:]
if len(val) != 2: torch._assert(
raise ValueError( len(val) == 2,
f"Expecting the two last elements of the input tensors to be H and W instead got {img.shape[-2:]}" f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
) )
original_image_sizes.append((val[0], val[1])) original_image_sizes.append((val[0], val[1]))
...@@ -531,9 +529,10 @@ class RetinaNet(nn.Module): ...@@ -531,9 +529,10 @@ class RetinaNet(nn.Module):
# print the first degenerate box # print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist() degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError( torch._assert(
False,
"All bounding boxes should have positive height and width." "All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}." f" Found invalid box {degen_bb} for target at index {target_idx}.",
) )
# get the features from the backbone # get the features from the backbone
...@@ -554,7 +553,8 @@ class RetinaNet(nn.Module): ...@@ -554,7 +553,8 @@ class RetinaNet(nn.Module):
detections: List[Dict[str, Tensor]] = [] detections: List[Dict[str, Tensor]] = []
if self.training: if self.training:
if targets is None: if targets is None:
raise ValueError("In training mode, targets should be passed") torch._assert(False, "targets should not be none when in training mode")
else:
# compute the losses # compute the losses
losses = self.compute_loss(targets, head_outputs, anchors) losses = self.compute_loss(targets, head_outputs, anchors)
else: else:
......
...@@ -322,27 +322,27 @@ class SSD(nn.Module): ...@@ -322,27 +322,27 @@ class SSD(nn.Module):
def forward( def forward(
self, images: List[Tensor], targets: Optional[List[Dict[str, Tensor]]] = None self, images: List[Tensor], targets: Optional[List[Dict[str, Tensor]]] = None
) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]: ) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]:
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
if self.training: if self.training:
if targets is None: if targets is None:
raise ValueError("targets should not be None") torch._assert(False, "targets should not be none when in training mode")
else:
for target in targets: for target in targets:
boxes = target["boxes"] boxes = target["boxes"]
if isinstance(boxes, torch.Tensor): if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4: torch._assert(
raise ValueError(f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.") len(boxes.shape) == 2 and boxes.shape[-1] == 4,
f"Expected target boxes to be a tensor of shape [N, 4], got {boxes.shape}.",
)
else: else:
raise TypeError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.") torch._assert(False, f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
# get the original image sizes # get the original image sizes
original_image_sizes: List[Tuple[int, int]] = [] original_image_sizes: List[Tuple[int, int]] = []
for img in images: for img in images:
val = img.shape[-2:] val = img.shape[-2:]
if len(val) != 2: torch._assert(
raise ValueError( len(val) == 2,
f"The last two dimensions of the input tensors should contain H and W, instead got {img.shape[-2:]}" f"expecting the last two dimensions of the Tensor to be H and W instead got {img.shape[-2:]}",
) )
original_image_sizes.append((val[0], val[1])) original_image_sizes.append((val[0], val[1]))
...@@ -357,9 +357,10 @@ class SSD(nn.Module): ...@@ -357,9 +357,10 @@ class SSD(nn.Module):
if degenerate_boxes.any(): if degenerate_boxes.any():
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist() degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError( torch._assert(
False,
"All bounding boxes should have positive height and width." "All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}." f" Found invalid box {degen_bb} for target at index {target_idx}.",
) )
# get the features from the backbone # get the features from the backbone
...@@ -378,14 +379,16 @@ class SSD(nn.Module): ...@@ -378,14 +379,16 @@ class SSD(nn.Module):
losses = {} losses = {}
detections: List[Dict[str, Tensor]] = [] detections: List[Dict[str, Tensor]] = []
if self.training: if self.training:
if targets is None:
raise ValueError("targets should not be None when in training mode")
matched_idxs = [] matched_idxs = []
if targets is None:
torch._assert(False, "targets should not be none when in training mode")
else:
for anchors_per_image, targets_per_image in zip(anchors, targets): for anchors_per_image, targets_per_image in zip(anchors, targets):
if targets_per_image["boxes"].numel() == 0: if targets_per_image["boxes"].numel() == 0:
matched_idxs.append( matched_idxs.append(
torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device) torch.full(
(anchors_per_image.size(0),), -1, dtype=torch.int64, device=anchors_per_image.device
)
) )
continue continue
...@@ -550,8 +553,10 @@ def _vgg_extractor(backbone: VGG, highres: bool, trainable_layers: int): ...@@ -550,8 +553,10 @@ def _vgg_extractor(backbone: VGG, highres: bool, trainable_layers: int):
num_stages = len(stage_indices) num_stages = len(stage_indices)
# find the index of the layer from which we wont freeze # find the index of the layer from which we wont freeze
if not 0 <= trainable_layers <= num_stages: torch._assert(
raise ValueError(f"trainable_layers should be in the range [0, {num_stages}]. Instead got {trainable_layers}") 0 <= trainable_layers <= num_stages,
f"trainable_layers should be in the range [0, {num_stages}]. Instead got {trainable_layers}",
)
freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers] freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]
for b in backbone[:freeze_before]: for b in backbone[:freeze_before]:
......
...@@ -134,9 +134,9 @@ class GeneralizedRCNNTransform(nn.Module): ...@@ -134,9 +134,9 @@ class GeneralizedRCNNTransform(nn.Module):
images = self.batch_images(images, size_divisible=self.size_divisible) images = self.batch_images(images, size_divisible=self.size_divisible)
image_sizes_list: List[Tuple[int, int]] = [] image_sizes_list: List[Tuple[int, int]] = []
for image_size in image_sizes: for image_size in image_sizes:
if len(image_size) != 2: torch._assert(
raise ValueError( len(image_size) == 2,
f"Input tensors expected to have in the last two elements H and W, instead got {image_size}" f"Input tensors expected to have in the last two elements H and W, instead got {image_size}",
) )
image_sizes_list.append((image_size[0], image_size[1])) image_sizes_list.append((image_size[0], image_size[1]))
......
...@@ -28,13 +28,13 @@ def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor: ...@@ -28,13 +28,13 @@ def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor:
def check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]): def check_roi_boxes_shape(boxes: Union[Tensor, List[Tensor]]):
if isinstance(boxes, (list, tuple)): if isinstance(boxes, (list, tuple)):
for _tensor in boxes: for _tensor in boxes:
if _tensor.size(1) != 4: torch._assert(
raise ValueError("The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]].") _tensor.size(1) == 4, "The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]"
)
elif isinstance(boxes, torch.Tensor): elif isinstance(boxes, torch.Tensor):
if boxes.size(1) != 5: torch._assert(boxes.size(1) == 5, "The boxes tensor shape is not correct as Tensor[K, 5]")
raise ValueError("The boxes tensor shape is not correct as Tensor[K, 5]/")
else: else:
raise TypeError(f"boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]], instead got {type(boxes)}") torch._assert(False, "boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]")
return return
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment