Unverified Commit 57c789f8 authored by AhnDW's avatar AhnDW Committed by GitHub
Browse files

Remove warning about deprecated (#2064)

* Replace **.is_cuda() to just is_cuda()

* Replace type to scalar_type

* Fix lint, clang-format

* Fix lint, clang-format
parent 74679cc5
...@@ -19,7 +19,7 @@ at::Tensor DeformConv2d_forward( ...@@ -19,7 +19,7 @@ at::Tensor DeformConv2d_forward(
const std::pair<int, int>& dilation, const std::pair<int, int>& dilation,
const int groups, const int groups,
const int offset_groups) { const int offset_groups) {
if (input.type().is_cuda()) { if (input.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP) #if defined(WITH_CUDA) || defined(WITH_HIP)
return DeformConv2d_forward_cuda( return DeformConv2d_forward_cuda(
input.contiguous(), input.contiguous(),
...@@ -58,7 +58,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> DeformConv2d_backward ...@@ -58,7 +58,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> DeformConv2d_backward
const std::pair<int, int>& dilation, const std::pair<int, int>& dilation,
const int groups, const int groups,
const int offset_groups) { const int offset_groups) {
if (grad.type().is_cuda()) { if (grad.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP) #if defined(WITH_CUDA) || defined(WITH_HIP)
return DeformConv2d_backward_cuda( return DeformConv2d_backward_cuda(
grad.contiguous(), grad.contiguous(),
......
...@@ -18,7 +18,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward( ...@@ -18,7 +18,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward(
const int pooled_height, const int pooled_height,
const int pooled_width, const int pooled_width,
const int sampling_ratio) { const int sampling_ratio) {
if (input.type().is_cuda()) { if (input.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP) #if defined(WITH_CUDA) || defined(WITH_HIP)
return PSROIAlign_forward_cuda( return PSROIAlign_forward_cuda(
input, input,
...@@ -47,7 +47,7 @@ at::Tensor PSROIAlign_backward( ...@@ -47,7 +47,7 @@ at::Tensor PSROIAlign_backward(
const int channels, const int channels,
const int height, const int height,
const int width) { const int width) {
if (grad.type().is_cuda()) { if (grad.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP) #if defined(WITH_CUDA) || defined(WITH_HIP)
return PSROIAlign_backward_cuda( return PSROIAlign_backward_cuda(
grad, grad,
......
...@@ -15,7 +15,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward( ...@@ -15,7 +15,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward(
const float spatial_scale, const float spatial_scale,
const int pooled_height, const int pooled_height,
const int pooled_width) { const int pooled_width) {
if (input.type().is_cuda()) { if (input.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP) #if defined(WITH_CUDA) || defined(WITH_HIP)
return PSROIPool_forward_cuda( return PSROIPool_forward_cuda(
input, rois, spatial_scale, pooled_height, pooled_width); input, rois, spatial_scale, pooled_height, pooled_width);
...@@ -38,7 +38,7 @@ at::Tensor PSROIPool_backward( ...@@ -38,7 +38,7 @@ at::Tensor PSROIPool_backward(
const int channels, const int channels,
const int height, const int height,
const int width) { const int width) {
if (grad.type().is_cuda()) { if (grad.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP) #if defined(WITH_CUDA) || defined(WITH_HIP)
return PSROIPool_backward_cuda( return PSROIPool_backward_cuda(
grad, grad,
......
...@@ -21,7 +21,7 @@ at::Tensor ROIAlign_forward( ...@@ -21,7 +21,7 @@ at::Tensor ROIAlign_forward(
const bool aligned) // The flag for pixel shift const bool aligned) // The flag for pixel shift
// along each axis. // along each axis.
{ {
if (input.type().is_cuda()) { if (input.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP) #if defined(WITH_CUDA) || defined(WITH_HIP)
return ROIAlign_forward_cuda( return ROIAlign_forward_cuda(
input, input,
...@@ -57,7 +57,7 @@ at::Tensor ROIAlign_backward( ...@@ -57,7 +57,7 @@ at::Tensor ROIAlign_backward(
const int width, const int width,
const int sampling_ratio, const int sampling_ratio,
const bool aligned) { const bool aligned) {
if (grad.type().is_cuda()) { if (grad.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP) #if defined(WITH_CUDA) || defined(WITH_HIP)
return ROIAlign_backward_cuda( return ROIAlign_backward_cuda(
grad, grad,
......
...@@ -15,7 +15,7 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward( ...@@ -15,7 +15,7 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward(
const double spatial_scale, const double spatial_scale,
const int64_t pooled_height, const int64_t pooled_height,
const int64_t pooled_width) { const int64_t pooled_width) {
if (input.type().is_cuda()) { if (input.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP) #if defined(WITH_CUDA) || defined(WITH_HIP)
return ROIPool_forward_cuda( return ROIPool_forward_cuda(
input, rois, spatial_scale, pooled_height, pooled_width); input, rois, spatial_scale, pooled_height, pooled_width);
...@@ -38,7 +38,7 @@ at::Tensor ROIPool_backward( ...@@ -38,7 +38,7 @@ at::Tensor ROIPool_backward(
const int channels, const int channels,
const int height, const int height,
const int width) { const int width) {
if (grad.type().is_cuda()) { if (grad.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP) #if defined(WITH_CUDA) || defined(WITH_HIP)
return ROIPool_backward_cuda( return ROIPool_backward_cuda(
grad, grad,
......
...@@ -407,7 +407,8 @@ at::Tensor ROIAlign_forward_cpu( ...@@ -407,7 +407,8 @@ at::Tensor ROIAlign_forward_cpu(
if (output.numel() == 0) if (output.numel() == 0)
return output; return output;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIAlign_forward", [&] { AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "ROIAlign_forward", [&] {
ROIAlignForward<scalar_t>( ROIAlignForward<scalar_t>(
output_size, output_size,
input.contiguous().data_ptr<scalar_t>(), input.contiguous().data_ptr<scalar_t>(),
...@@ -459,7 +460,8 @@ at::Tensor ROIAlign_backward_cpu( ...@@ -459,7 +460,8 @@ at::Tensor ROIAlign_backward_cpu(
int h_stride = grad.stride(2); int h_stride = grad.stride(2);
int w_stride = grad.stride(3); int w_stride = grad.stride(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIAlign_forward", [&] { AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "ROIAlign_forward", [&] {
ROIAlignBackward<scalar_t>( ROIAlignBackward<scalar_t>(
grad.numel(), grad.numel(),
grad.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(),
......
...@@ -149,7 +149,8 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cpu( ...@@ -149,7 +149,8 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cpu(
return std::make_tuple(output, argmax); return std::make_tuple(output, argmax);
} }
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIPool_forward", [&] { AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "ROIPool_forward", [&] {
RoIPoolForward<scalar_t>( RoIPoolForward<scalar_t>(
input.contiguous().data_ptr<scalar_t>(), input.contiguous().data_ptr<scalar_t>(),
spatial_scale, spatial_scale,
...@@ -203,7 +204,8 @@ at::Tensor ROIPool_backward_cpu( ...@@ -203,7 +204,8 @@ at::Tensor ROIPool_backward_cpu(
int h_stride = grad.stride(2); int h_stride = grad.stride(2);
int w_stride = grad.stride(3); int w_stride = grad.stride(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIPool_backward", [&] { AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "ROIPool_backward", [&] {
RoIPoolBackward<scalar_t>( RoIPoolBackward<scalar_t>(
grad.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(),
argmax.data_ptr<int>(), argmax.data_ptr<int>(),
......
...@@ -5,9 +5,8 @@ at::Tensor nms_cpu_kernel( ...@@ -5,9 +5,8 @@ at::Tensor nms_cpu_kernel(
const at::Tensor& dets, const at::Tensor& dets,
const at::Tensor& scores, const at::Tensor& scores,
const float iou_threshold) { const float iou_threshold) {
AT_ASSERTM(!dets.options().device().is_cuda(), "dets must be a CPU tensor"); AT_ASSERTM(!dets.is_cuda(), "dets must be a CPU tensor");
AT_ASSERTM( AT_ASSERTM(!scores.is_cuda(), "scores must be a CPU tensor");
!scores.options().device().is_cuda(), "scores must be a CPU tensor");
AT_ASSERTM( AT_ASSERTM(
dets.scalar_type() == scores.scalar_type(), dets.scalar_type() == scores.scalar_type(),
"dets should have the same type as scores"); "dets should have the same type as scores");
......
...@@ -267,7 +267,7 @@ at::Tensor DeformConv2d_forward_cuda( ...@@ -267,7 +267,7 @@ at::Tensor DeformConv2d_forward_cuda(
TORCH_CHECK(input.is_contiguous()); TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(offset.is_contiguous()); TORCH_CHECK(offset.is_contiguous());
TORCH_CHECK(weight.is_contiguous()); TORCH_CHECK(weight.is_contiguous());
TORCH_CHECK(input.device().is_cuda(), "input must be a CUDA tensor"); TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
at::DeviceGuard guard(input.device()); at::DeviceGuard guard(input.device());
......
...@@ -302,8 +302,8 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cuda( ...@@ -302,8 +302,8 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cuda(
const int pooled_width, const int pooled_width,
const int sampling_ratio) { const int sampling_ratio) {
// Check if input tensors are CUDA tensors // Check if input tensors are CUDA tensors
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
...@@ -377,10 +377,10 @@ at::Tensor PSROIAlign_backward_cuda( ...@@ -377,10 +377,10 @@ at::Tensor PSROIAlign_backward_cuda(
const int height, const int height,
const int width) { const int width) {
// Check if input tensors are CUDA tensors // Check if input tensors are CUDA tensors
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM( AT_ASSERTM(
channel_mapping.type().is_cuda(), channel_mapping.is_cuda(),
"channel_mapping must be a CUDA tensor"); "channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
......
...@@ -139,8 +139,8 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda( ...@@ -139,8 +139,8 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda(
const int pooled_height, const int pooled_height,
const int pooled_width) { const int pooled_width) {
// Check if input tensors are CUDA tensors // Check if input tensors are CUDA tensors
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
...@@ -211,10 +211,10 @@ at::Tensor PSROIPool_backward_cuda( ...@@ -211,10 +211,10 @@ at::Tensor PSROIPool_backward_cuda(
const int height, const int height,
const int width) { const int width) {
// Check if input tensors are CUDA tensors // Check if input tensors are CUDA tensors
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM( AT_ASSERTM(
channel_mapping.type().is_cuda(), channel_mapping.is_cuda(),
"channel_mapping must be a CUDA tensor"); "channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
......
...@@ -312,8 +312,8 @@ at::Tensor ROIAlign_forward_cuda( ...@@ -312,8 +312,8 @@ at::Tensor ROIAlign_forward_cuda(
const int pooled_width, const int pooled_width,
const int sampling_ratio, const int sampling_ratio,
const bool aligned) { const bool aligned) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
...@@ -345,7 +345,7 @@ at::Tensor ROIAlign_forward_cuda( ...@@ -345,7 +345,7 @@ at::Tensor ROIAlign_forward_cuda(
return output; return output;
} }
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIAlign_forward", [&] { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIAlign_forward", [&] {
RoIAlignForward<scalar_t><<<grid, block, 0, stream>>>( RoIAlignForward<scalar_t><<<grid, block, 0, stream>>>(
output_size, output_size,
input.contiguous().data_ptr<scalar_t>(), input.contiguous().data_ptr<scalar_t>(),
...@@ -376,8 +376,8 @@ at::Tensor ROIAlign_backward_cuda( ...@@ -376,8 +376,8 @@ at::Tensor ROIAlign_backward_cuda(
const int width, const int width,
const int sampling_ratio, const int sampling_ratio,
const bool aligned) { const bool aligned) {
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
...@@ -409,7 +409,7 @@ at::Tensor ROIAlign_backward_cuda( ...@@ -409,7 +409,7 @@ at::Tensor ROIAlign_backward_cuda(
int h_stride = grad.stride(2); int h_stride = grad.stride(2);
int w_stride = grad.stride(3); int w_stride = grad.stride(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIAlign_backward", [&] { AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIAlign_backward", [&] {
RoIAlignBackward<scalar_t><<<grid, block, 0, stream>>>( RoIAlignBackward<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(), grad.numel(),
grad.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(),
......
...@@ -121,8 +121,8 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda( ...@@ -121,8 +121,8 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(
const float spatial_scale, const float spatial_scale,
const int pooled_height, const int pooled_height,
const int pooled_width) { const int pooled_width) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
...@@ -157,7 +157,7 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda( ...@@ -157,7 +157,7 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(
return std::make_tuple(output, argmax); return std::make_tuple(output, argmax);
} }
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIPool_forward", [&] { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIPool_forward", [&] {
RoIPoolForward<scalar_t><<<grid, block, 0, stream>>>( RoIPoolForward<scalar_t><<<grid, block, 0, stream>>>(
output_size, output_size,
input.contiguous().data_ptr<scalar_t>(), input.contiguous().data_ptr<scalar_t>(),
...@@ -187,9 +187,9 @@ at::Tensor ROIPool_backward_cuda( ...@@ -187,9 +187,9 @@ at::Tensor ROIPool_backward_cuda(
const int height, const int height,
const int width) { const int width) {
// Check if input tensors are CUDA tensors // Check if input tensors are CUDA tensors
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(argmax.device().is_cuda(), "argmax must be a CUDA tensor"); AT_ASSERTM(argmax.is_cuda(), "argmax must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
argmax_t{argmax, "argmax", 3}; argmax_t{argmax, "argmax", 3};
...@@ -224,7 +224,7 @@ at::Tensor ROIPool_backward_cuda( ...@@ -224,7 +224,7 @@ at::Tensor ROIPool_backward_cuda(
int h_stride = grad.stride(2); int h_stride = grad.stride(2);
int w_stride = grad.stride(3); int w_stride = grad.stride(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIPool_backward", [&] { AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIPool_backward", [&] {
RoIPoolBackward<scalar_t><<<grid, block, 0, stream>>>( RoIPoolBackward<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(), grad.numel(),
grad.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(),
......
...@@ -72,8 +72,8 @@ __global__ void nms_kernel( ...@@ -72,8 +72,8 @@ __global__ void nms_kernel(
at::Tensor nms_cuda(const at::Tensor& dets, at::Tensor nms_cuda(const at::Tensor& dets,
const at::Tensor& scores, const at::Tensor& scores,
float iou_threshold) { float iou_threshold) {
AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor"); AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor");
AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor"); AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor");
at::cuda::CUDAGuard device_guard(dets.device()); at::cuda::CUDAGuard device_guard(dets.device());
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
...@@ -91,7 +91,7 @@ at::Tensor nms_cuda(const at::Tensor& dets, ...@@ -91,7 +91,7 @@ at::Tensor nms_cuda(const at::Tensor& dets,
cudaStream_t stream = at::cuda::getCurrentCUDAStream(); cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
dets_sorted.type(), "nms_kernel_cuda", [&] { dets_sorted.scalar_type(), "nms_kernel_cuda", [&] {
nms_kernel<scalar_t><<<blocks, threads, 0, stream>>>( nms_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
dets_num, dets_num,
iou_threshold, iou_threshold,
......
...@@ -12,7 +12,7 @@ at::Tensor nms( ...@@ -12,7 +12,7 @@ at::Tensor nms(
const at::Tensor& dets, const at::Tensor& dets,
const at::Tensor& scores, const at::Tensor& scores,
const double iou_threshold) { const double iou_threshold) {
if (dets.device().is_cuda()) { if (dets.is_cuda()) {
#if defined(WITH_CUDA) #if defined(WITH_CUDA)
if (dets.numel() == 0) { if (dets.numel() == 0) {
at::cuda::CUDAGuard device_guard(dets.device()); at::cuda::CUDAGuard device_guard(dets.device());
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment