Unverified Commit b8e93084 authored by vfdev's avatar vfdev Committed by GitHub
Browse files

Added rois shape check in C++ (#2794)

* Added rois shape check in C++

* Fixes code formatting

* Remove accidental include

* - Updated code according to the review
- Replaced old AT_ASSERT/ERROR by new TORCH_CHECK
parent 5bb81c8e
...@@ -32,7 +32,7 @@ at::Tensor DeformConv2d_forward( ...@@ -32,7 +32,7 @@ at::Tensor DeformConv2d_forward(
groups, groups,
offset_groups); offset_groups);
#else #else
AT_ERROR("Not compiled with GPU support"); TORCH_CHECK(false, "Not compiled with GPU support");
#endif #endif
} }
return DeformConv2d_forward_cpu( return DeformConv2d_forward_cpu(
...@@ -72,7 +72,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> DeformConv2d_backward ...@@ -72,7 +72,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> DeformConv2d_backward
groups, groups,
offset_groups); offset_groups);
#else #else
AT_ERROR("Not compiled with GPU support"); TORCH_CHECK(false, "Not compiled with GPU support");
#endif #endif
} }
return DeformConv2d_backward_cpu( return DeformConv2d_backward_cpu(
......
...@@ -28,7 +28,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward( ...@@ -28,7 +28,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward(
pooled_width, pooled_width,
sampling_ratio); sampling_ratio);
#else #else
AT_ERROR("Not compiled with GPU support"); TORCH_CHECK(false, "Not compiled with GPU support");
#endif #endif
} }
return PSROIAlign_forward_cpu( return PSROIAlign_forward_cpu(
...@@ -62,7 +62,7 @@ at::Tensor PSROIAlign_backward( ...@@ -62,7 +62,7 @@ at::Tensor PSROIAlign_backward(
height, height,
width); width);
#else #else
AT_ERROR("Not compiled with GPU support"); TORCH_CHECK(false, "Not compiled with GPU support");
#endif #endif
} }
return PSROIAlign_backward_cpu( return PSROIAlign_backward_cpu(
......
...@@ -20,7 +20,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward( ...@@ -20,7 +20,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward(
return PSROIPool_forward_cuda( return PSROIPool_forward_cuda(
input, rois, spatial_scale, pooled_height, pooled_width); input, rois, spatial_scale, pooled_height, pooled_width);
#else #else
AT_ERROR("Not compiled with GPU support"); TORCH_CHECK(false, "Not compiled with GPU support");
#endif #endif
} }
return PSROIPool_forward_cpu( return PSROIPool_forward_cpu(
...@@ -52,7 +52,7 @@ at::Tensor PSROIPool_backward( ...@@ -52,7 +52,7 @@ at::Tensor PSROIPool_backward(
height, height,
width); width);
#else #else
AT_ERROR("Not compiled with GPU support"); TORCH_CHECK(false, "Not compiled with GPU support");
#endif #endif
} }
return PSROIPool_backward_cpu( return PSROIPool_backward_cpu(
......
...@@ -20,7 +20,7 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward( ...@@ -20,7 +20,7 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward(
return ROIPool_forward_cuda( return ROIPool_forward_cuda(
input, rois, spatial_scale, pooled_height, pooled_width); input, rois, spatial_scale, pooled_height, pooled_width);
#else #else
AT_ERROR("Not compiled with GPU support"); TORCH_CHECK(false, "Not compiled with GPU support");
#endif #endif
} }
return ROIPool_forward_cpu( return ROIPool_forward_cpu(
...@@ -52,7 +52,7 @@ at::Tensor ROIPool_backward( ...@@ -52,7 +52,7 @@ at::Tensor ROIPool_backward(
height, height,
width); width);
#else #else
AT_ERROR("Not compiled with GPU support"); TORCH_CHECK(false, "Not compiled with GPU support");
#endif #endif
} }
return ROIPool_backward_cpu( return ROIPool_backward_cpu(
......
...@@ -308,8 +308,10 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cpu( ...@@ -308,8 +308,10 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cpu(
const int pooled_width, const int pooled_width,
const int sampling_ratio) { const int sampling_ratio) {
// Check if input tensors are CPU tensors // Check if input tensors are CPU tensors
AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor"); TORCH_CHECK(input.device().is_cpu(), "input must be a CPU tensor");
AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
...@@ -321,7 +323,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cpu( ...@@ -321,7 +323,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cpu(
int height = input.size(2); int height = input.size(2);
int width = input.size(3); int width = input.size(3);
AT_ASSERTM( TORCH_CHECK(
channels % (pooled_height * pooled_width) == 0, channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width"); "input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width); int channels_out = channels / (pooled_height * pooled_width);
...@@ -370,9 +372,9 @@ at::Tensor PSROIAlign_backward_cpu( ...@@ -370,9 +372,9 @@ at::Tensor PSROIAlign_backward_cpu(
const int height, const int height,
const int width) { const int width) {
// Check if input tensors are CPU tensors // Check if input tensors are CPU tensors
AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor"); TORCH_CHECK(grad.device().is_cpu(), "grad must be a CPU tensor");
AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor");
AT_ASSERTM( TORCH_CHECK(
channel_mapping.device().is_cpu(), channel_mapping.device().is_cpu(),
"channel_mapping must be a CPU tensor"); "channel_mapping must be a CPU tensor");
......
...@@ -150,8 +150,10 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cpu( ...@@ -150,8 +150,10 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cpu(
const int pooled_height, const int pooled_height,
const int pooled_width) { const int pooled_width) {
// Check if input tensors are CPU tensors // Check if input tensors are CPU tensors
AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor"); TORCH_CHECK(input.device().is_cpu(), "input must be a CPU tensor");
AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
...@@ -163,7 +165,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cpu( ...@@ -163,7 +165,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cpu(
int height = input.size(2); int height = input.size(2);
int width = input.size(3); int width = input.size(3);
AT_ASSERTM( TORCH_CHECK(
channels % (pooled_height * pooled_width) == 0, channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width"); "input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width); int channels_out = channels / (pooled_height * pooled_width);
...@@ -210,9 +212,9 @@ at::Tensor PSROIPool_backward_cpu( ...@@ -210,9 +212,9 @@ at::Tensor PSROIPool_backward_cpu(
const int height, const int height,
const int width) { const int width) {
// Check if input tensors are CPU tensors // Check if input tensors are CPU tensors
AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor"); TORCH_CHECK(grad.device().is_cpu(), "grad must be a CPU tensor");
AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor");
AT_ASSERTM( TORCH_CHECK(
channel_mapping.device().is_cpu(), channel_mapping.device().is_cpu(),
"channel_mapping must be a CPU tensor"); "channel_mapping must be a CPU tensor");
......
...@@ -394,8 +394,9 @@ at::Tensor ROIAlign_forward_cpu( ...@@ -394,8 +394,9 @@ at::Tensor ROIAlign_forward_cpu(
const int64_t pooled_width, const int64_t pooled_width,
const int64_t sampling_ratio, const int64_t sampling_ratio,
const bool aligned) { const bool aligned) {
AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor"); TORCH_CHECK(input.device().is_cpu(), "input must be a CPU tensor");
AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor");
TORCH_CHECK(rois.size(1) == 5, "rois must have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
...@@ -447,8 +448,8 @@ at::Tensor ROIAlign_backward_cpu( ...@@ -447,8 +448,8 @@ at::Tensor ROIAlign_backward_cpu(
const int64_t width, const int64_t width,
const int64_t sampling_ratio, const int64_t sampling_ratio,
const bool aligned) { const bool aligned) {
AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor"); TORCH_CHECK(grad.device().is_cpu(), "grad must be a CPU tensor");
AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
......
...@@ -126,8 +126,8 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cpu( ...@@ -126,8 +126,8 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cpu(
const float spatial_scale, const float spatial_scale,
const int pooled_height, const int pooled_height,
const int pooled_width) { const int pooled_width) {
AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor"); TORCH_CHECK(input.device().is_cpu(), "input must be a CPU tensor");
AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
...@@ -180,9 +180,11 @@ at::Tensor ROIPool_backward_cpu( ...@@ -180,9 +180,11 @@ at::Tensor ROIPool_backward_cpu(
const int height, const int height,
const int width) { const int width) {
// Check if input tensors are CPU tensors // Check if input tensors are CPU tensors
AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor"); TORCH_CHECK(grad.device().is_cpu(), "grad must be a CPU tensor");
AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor");
AT_ASSERTM(argmax.device().is_cpu(), "argmax must be a CPU tensor"); TORCH_CHECK(argmax.device().is_cpu(), "argmax must be a CPU tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#if !PNG_FOUND #if !PNG_FOUND
torch::Tensor decodePNG(const torch::Tensor& data) { torch::Tensor decodePNG(const torch::Tensor& data) {
AT_ERROR("decodePNG: torchvision not compiled with libPNG support"); TORCH_CHECK(false, "decodePNG: torchvision not compiled with libPNG support");
} }
#else #else
#include <png.h> #include <png.h>
......
...@@ -5,9 +5,9 @@ at::Tensor nms_cpu_kernel( ...@@ -5,9 +5,9 @@ at::Tensor nms_cpu_kernel(
const at::Tensor& dets, const at::Tensor& dets,
const at::Tensor& scores, const at::Tensor& scores,
const double iou_threshold) { const double iou_threshold) {
AT_ASSERTM(!dets.is_cuda(), "dets must be a CPU tensor"); TORCH_CHECK(!dets.is_cuda(), "dets must be a CPU tensor");
AT_ASSERTM(!scores.is_cuda(), "scores must be a CPU tensor"); TORCH_CHECK(!scores.is_cuda(), "scores must be a CPU tensor");
AT_ASSERTM( TORCH_CHECK(
dets.scalar_type() == scores.scalar_type(), dets.scalar_type() == scores.scalar_type(),
"dets should have the same type as scores"); "dets should have the same type as scores");
......
...@@ -66,7 +66,8 @@ std::pair<std::string, ffmpeg::MediaType> const* _parse_type( ...@@ -66,7 +66,8 @@ std::pair<std::string, ffmpeg::MediaType> const* _parse_type(
if (device != types.end()) { if (device != types.end()) {
return device; return device;
} }
AT_ERROR("Expected one of [audio, video, subtitle, cc] ", stream_string); TORCH_CHECK(
false, "Expected one of [audio, video, subtitle, cc] ", stream_string);
} }
std::string parse_type_to_string(const std::string& stream_string) { std::string parse_type_to_string(const std::string& stream_string) {
...@@ -97,7 +98,8 @@ std::tuple<std::string, long> _parseStream(const std::string& streamString) { ...@@ -97,7 +98,8 @@ std::tuple<std::string, long> _parseStream(const std::string& streamString) {
try { try {
index_ = c10::stoi(match[2].str()); index_ = c10::stoi(match[2].str());
} catch (const std::exception&) { } catch (const std::exception&) {
AT_ERROR( TORCH_CHECK(
false,
"Could not parse device index '", "Could not parse device index '",
match[2].str(), match[2].str(),
"' in device string '", "' in device string '",
......
...@@ -302,8 +302,10 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cuda( ...@@ -302,8 +302,10 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cuda(
const int pooled_width, const int pooled_width,
const int sampling_ratio) { const int sampling_ratio) {
// Check if input tensors are CUDA tensors // Check if input tensors are CUDA tensors
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
...@@ -318,7 +320,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cuda( ...@@ -318,7 +320,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cuda(
auto height = input.size(2); auto height = input.size(2);
auto width = input.size(3); auto width = input.size(3);
AT_ASSERTM( TORCH_CHECK(
channels % (pooled_height * pooled_width) == 0, channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width"); "input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width); int channels_out = channels / (pooled_height * pooled_width);
...@@ -378,9 +380,9 @@ at::Tensor PSROIAlign_backward_cuda( ...@@ -378,9 +380,9 @@ at::Tensor PSROIAlign_backward_cuda(
const int height, const int height,
const int width) { const int width) {
// Check if input tensors are CUDA tensors // Check if input tensors are CUDA tensors
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor"); TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM( TORCH_CHECK(
channel_mapping.is_cuda(), channel_mapping.is_cuda(),
"channel_mapping must be a CUDA tensor"); "channel_mapping must be a CUDA tensor");
......
...@@ -139,8 +139,10 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda( ...@@ -139,8 +139,10 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda(
const int pooled_height, const int pooled_height,
const int pooled_width) { const int pooled_width) {
// Check if input tensors are CUDA tensors // Check if input tensors are CUDA tensors
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
...@@ -155,7 +157,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda( ...@@ -155,7 +157,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda(
auto height = input.size(2); auto height = input.size(2);
auto width = input.size(3); auto width = input.size(3);
AT_ASSERTM( TORCH_CHECK(
channels % (pooled_height * pooled_width) == 0, channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width"); "input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width); int channels_out = channels / (pooled_height * pooled_width);
...@@ -212,9 +214,9 @@ at::Tensor PSROIPool_backward_cuda( ...@@ -212,9 +214,9 @@ at::Tensor PSROIPool_backward_cuda(
const int height, const int height,
const int width) { const int width) {
// Check if input tensors are CUDA tensors // Check if input tensors are CUDA tensors
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor"); TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM( TORCH_CHECK(
channel_mapping.is_cuda(), channel_mapping.is_cuda(),
"channel_mapping must be a CUDA tensor"); "channel_mapping must be a CUDA tensor");
......
...@@ -320,8 +320,10 @@ at::Tensor ROIAlign_forward_cuda( ...@@ -320,8 +320,10 @@ at::Tensor ROIAlign_forward_cuda(
const int64_t pooled_width, const int64_t pooled_width,
const int64_t sampling_ratio, const int64_t sampling_ratio,
const bool aligned) { const bool aligned) {
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "rois must have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
...@@ -385,8 +387,8 @@ at::Tensor ROIAlign_backward_cuda( ...@@ -385,8 +387,8 @@ at::Tensor ROIAlign_backward_cuda(
const int64_t width, const int64_t width,
const int64_t sampling_ratio, const int64_t sampling_ratio,
const bool aligned) { const bool aligned) {
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor"); TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
......
...@@ -121,8 +121,10 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda( ...@@ -121,8 +121,10 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(
const float spatial_scale, const float spatial_scale,
const int pooled_height, const int pooled_height,
const int pooled_width) { const int pooled_width) {
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
...@@ -188,9 +190,9 @@ at::Tensor ROIPool_backward_cuda( ...@@ -188,9 +190,9 @@ at::Tensor ROIPool_backward_cuda(
const int height, const int height,
const int width) { const int width) {
// Check if input tensors are CUDA tensors // Check if input tensors are CUDA tensors
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor"); TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(argmax.is_cuda(), "argmax must be a CUDA tensor"); TORCH_CHECK(argmax.is_cuda(), "argmax must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
argmax_t{argmax, "argmax", 3}; argmax_t{argmax, "argmax", 3};
......
...@@ -71,8 +71,8 @@ __global__ void nms_kernel( ...@@ -71,8 +71,8 @@ __global__ void nms_kernel(
at::Tensor nms_cuda(const at::Tensor& dets, at::Tensor nms_cuda(const at::Tensor& dets,
const at::Tensor& scores, const at::Tensor& scores,
const double iou_threshold) { const double iou_threshold) {
AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor"); TORCH_CHECK(dets.is_cuda(), "dets must be a CUDA tensor");
AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor"); TORCH_CHECK(scores.is_cuda(), "scores must be a CUDA tensor");
TORCH_CHECK( TORCH_CHECK(
dets.dim() == 2, "boxes should be a 2d tensor, got ", dets.dim(), "D"); dets.dim() == 2, "boxes should be a 2d tensor, got ", dets.dim(), "D");
...@@ -96,7 +96,7 @@ at::Tensor nms_cuda(const at::Tensor& dets, ...@@ -96,7 +96,7 @@ at::Tensor nms_cuda(const at::Tensor& dets,
#if defined(WITH_CUDA) || defined(WITH_HIP) #if defined(WITH_CUDA) || defined(WITH_HIP)
at::cuda::CUDAGuard device_guard(dets.device()); at::cuda::CUDAGuard device_guard(dets.device());
#else #else
AT_ERROR("Not compiled with GPU support"); TORCH_CHECK(false, "Not compiled with GPU support");
#endif #endif
if (dets.numel() == 0) { if (dets.numel() == 0) {
......
import torch import torch
from torch import Tensor from torch import Tensor
from torch.jit.annotations import List, Tuple from torch.jit.annotations import List
def _cat(tensors: List[Tensor], dim: int = 0) -> Tensor: def _cat(tensors: List[Tensor], dim: int = 0) -> Tensor:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment