Unverified Commit 57c789f8 authored by AhnDW's avatar AhnDW Committed by GitHub
Browse files

Remove warning about deprecated (#2064)

* Replace **.is_cuda() to just is_cuda()

* Replace type to scalar_type

* Fix lint, clang-format

* Fix lint, clang-format
parent 74679cc5
......@@ -19,7 +19,7 @@ at::Tensor DeformConv2d_forward(
const std::pair<int, int>& dilation,
const int groups,
const int offset_groups) {
if (input.type().is_cuda()) {
if (input.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP)
return DeformConv2d_forward_cuda(
input.contiguous(),
......@@ -58,7 +58,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> DeformConv2d_backward
const std::pair<int, int>& dilation,
const int groups,
const int offset_groups) {
if (grad.type().is_cuda()) {
if (grad.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP)
return DeformConv2d_backward_cuda(
grad.contiguous(),
......
......@@ -18,7 +18,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward(
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
if (input.type().is_cuda()) {
if (input.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP)
return PSROIAlign_forward_cuda(
input,
......@@ -47,7 +47,7 @@ at::Tensor PSROIAlign_backward(
const int channels,
const int height,
const int width) {
if (grad.type().is_cuda()) {
if (grad.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP)
return PSROIAlign_backward_cuda(
grad,
......
......@@ -15,7 +15,7 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward(
const float spatial_scale,
const int pooled_height,
const int pooled_width) {
if (input.type().is_cuda()) {
if (input.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP)
return PSROIPool_forward_cuda(
input, rois, spatial_scale, pooled_height, pooled_width);
......@@ -38,7 +38,7 @@ at::Tensor PSROIPool_backward(
const int channels,
const int height,
const int width) {
if (grad.type().is_cuda()) {
if (grad.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP)
return PSROIPool_backward_cuda(
grad,
......
......@@ -21,7 +21,7 @@ at::Tensor ROIAlign_forward(
const bool aligned) // The flag for pixel shift
// along each axis.
{
if (input.type().is_cuda()) {
if (input.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP)
return ROIAlign_forward_cuda(
input,
......@@ -57,7 +57,7 @@ at::Tensor ROIAlign_backward(
const int width,
const int sampling_ratio,
const bool aligned) {
if (grad.type().is_cuda()) {
if (grad.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP)
return ROIAlign_backward_cuda(
grad,
......
......@@ -15,7 +15,7 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward(
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width) {
if (input.type().is_cuda()) {
if (input.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP)
return ROIPool_forward_cuda(
input, rois, spatial_scale, pooled_height, pooled_width);
......@@ -38,7 +38,7 @@ at::Tensor ROIPool_backward(
const int channels,
const int height,
const int width) {
if (grad.type().is_cuda()) {
if (grad.is_cuda()) {
#if defined(WITH_CUDA) || defined(WITH_HIP)
return ROIPool_backward_cuda(
grad,
......
......@@ -407,7 +407,8 @@ at::Tensor ROIAlign_forward_cpu(
if (output.numel() == 0)
return output;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIAlign_forward", [&] {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "ROIAlign_forward", [&] {
ROIAlignForward<scalar_t>(
output_size,
input.contiguous().data_ptr<scalar_t>(),
......@@ -459,7 +460,8 @@ at::Tensor ROIAlign_backward_cpu(
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIAlign_forward", [&] {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "ROIAlign_forward", [&] {
ROIAlignBackward<scalar_t>(
grad.numel(),
grad.data_ptr<scalar_t>(),
......
......@@ -149,7 +149,8 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cpu(
return std::make_tuple(output, argmax);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIPool_forward", [&] {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "ROIPool_forward", [&] {
RoIPoolForward<scalar_t>(
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
......@@ -203,7 +204,8 @@ at::Tensor ROIPool_backward_cpu(
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIPool_backward", [&] {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "ROIPool_backward", [&] {
RoIPoolBackward<scalar_t>(
grad.data_ptr<scalar_t>(),
argmax.data_ptr<int>(),
......
......@@ -5,9 +5,8 @@ at::Tensor nms_cpu_kernel(
const at::Tensor& dets,
const at::Tensor& scores,
const float iou_threshold) {
AT_ASSERTM(!dets.options().device().is_cuda(), "dets must be a CPU tensor");
AT_ASSERTM(
!scores.options().device().is_cuda(), "scores must be a CPU tensor");
AT_ASSERTM(!dets.is_cuda(), "dets must be a CPU tensor");
AT_ASSERTM(!scores.is_cuda(), "scores must be a CPU tensor");
AT_ASSERTM(
dets.scalar_type() == scores.scalar_type(),
"dets should have the same type as scores");
......
......@@ -267,7 +267,7 @@ at::Tensor DeformConv2d_forward_cuda(
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(offset.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
TORCH_CHECK(input.device().is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
at::DeviceGuard guard(input.device());
......
......@@ -302,8 +302,8 @@ std::tuple<at::Tensor, at::Tensor> PSROIAlign_forward_cuda(
const int pooled_width,
const int sampling_ratio) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
......@@ -377,10 +377,10 @@ at::Tensor PSROIAlign_backward_cuda(
const int height,
const int width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(
channel_mapping.type().is_cuda(),
channel_mapping.is_cuda(),
"channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
......
......@@ -139,8 +139,8 @@ std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda(
const int pooled_height,
const int pooled_width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
......@@ -211,10 +211,10 @@ at::Tensor PSROIPool_backward_cuda(
const int height,
const int width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(
channel_mapping.type().is_cuda(),
channel_mapping.is_cuda(),
"channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
......
......@@ -312,8 +312,8 @@ at::Tensor ROIAlign_forward_cuda(
const int pooled_width,
const int sampling_ratio,
const bool aligned) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
......@@ -345,7 +345,7 @@ at::Tensor ROIAlign_forward_cuda(
return output;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIAlign_forward", [&] {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIAlign_forward", [&] {
RoIAlignForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data_ptr<scalar_t>(),
......@@ -376,8 +376,8 @@ at::Tensor ROIAlign_backward_cuda(
const int width,
const int sampling_ratio,
const bool aligned) {
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
......@@ -409,7 +409,7 @@ at::Tensor ROIAlign_backward_cuda(
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIAlign_backward", [&] {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIAlign_backward", [&] {
RoIAlignBackward<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.data_ptr<scalar_t>(),
......
......@@ -121,8 +121,8 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(
const float spatial_scale,
const int pooled_height,
const int pooled_width) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
......@@ -157,7 +157,7 @@ std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(
return std::make_tuple(output, argmax);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIPool_forward", [&] {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "ROIPool_forward", [&] {
RoIPoolForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data_ptr<scalar_t>(),
......@@ -187,9 +187,9 @@ at::Tensor ROIPool_backward_cuda(
const int height,
const int width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(argmax.device().is_cuda(), "argmax must be a CUDA tensor");
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(argmax.is_cuda(), "argmax must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
argmax_t{argmax, "argmax", 3};
......@@ -224,7 +224,7 @@ at::Tensor ROIPool_backward_cuda(
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIPool_backward", [&] {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "ROIPool_backward", [&] {
RoIPoolBackward<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.data_ptr<scalar_t>(),
......
......@@ -72,8 +72,8 @@ __global__ void nms_kernel(
at::Tensor nms_cuda(const at::Tensor& dets,
const at::Tensor& scores,
float iou_threshold) {
AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor");
AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor");
AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor");
AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor");
at::cuda::CUDAGuard device_guard(dets.device());
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
......@@ -91,7 +91,7 @@ at::Tensor nms_cuda(const at::Tensor& dets,
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
dets_sorted.type(), "nms_kernel_cuda", [&] {
dets_sorted.scalar_type(), "nms_kernel_cuda", [&] {
nms_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
dets_num,
iou_threshold,
......
......@@ -12,7 +12,7 @@ at::Tensor nms(
const at::Tensor& dets,
const at::Tensor& scores,
const double iou_threshold) {
if (dets.device().is_cuda()) {
if (dets.is_cuda()) {
#if defined(WITH_CUDA)
if (dets.numel() == 0) {
at::cuda::CUDAGuard device_guard(dets.device());
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment