Commit f4ac1567 authored by Nikita Shulga's avatar Nikita Shulga Committed by Facebook GitHub Bot
Browse files

Do not use deprecated `at::Tensor::type()`

Summary:
X-link: https://github.com/facebookexternal/vizard/pull/5

X-link: https://github.com/fairinternal/egohowto/pull/72

Pull Request resolved: https://github.com/facebookresearch/d2go/pull/680

Replace `tensor.type().scalarType()` with `tensor.scalar_type()` (this be able to get rid of the cast function in https://github.com/pytorch/pytorch/pull/139358 )

Remove extraneous braces around lambdas

Reviewed By: huydhn, r-barnes

Differential Revision: D65308547

fbshipit-source-id: d04c62cfa7361c0f69a2eaf1fd331befa9df4395
parent 5b37512b
Pipeline #2711 failed with stages
in 0 seconds
...@@ -61,7 +61,7 @@ at::Tensor ms_deform_attn_cuda_forward( ...@@ -61,7 +61,7 @@ at::Tensor ms_deform_attn_cuda_forward(
for (int n = 0; n < batch/im2col_step_; ++n) for (int n = 0; n < batch/im2col_step_; ++n)
{ {
auto columns = output_n.select(0, n); auto columns = output_n.select(0, n);
AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] { AT_DISPATCH_FLOATING_TYPES(value.scalar_type(), "ms_deform_attn_forward_cuda", [&] {
ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
value.data<scalar_t>() + n * im2col_step_ * per_value_size, value.data<scalar_t>() + n * im2col_step_ * per_value_size,
spatial_shapes.data<int64_t>(), spatial_shapes.data<int64_t>(),
...@@ -71,7 +71,7 @@ at::Tensor ms_deform_attn_cuda_forward( ...@@ -71,7 +71,7 @@ at::Tensor ms_deform_attn_cuda_forward(
batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
columns.data<scalar_t>()); columns.data<scalar_t>());
})); });
} }
output = output.view({batch, num_query, num_heads*channels}); output = output.view({batch, num_query, num_heads*channels});
...@@ -131,7 +131,7 @@ std::vector<at::Tensor> ms_deform_attn_cuda_backward( ...@@ -131,7 +131,7 @@ std::vector<at::Tensor> ms_deform_attn_cuda_backward(
for (int n = 0; n < batch/im2col_step_; ++n) for (int n = 0; n < batch/im2col_step_; ++n)
{ {
auto grad_output_g = grad_output_n.select(0, n); auto grad_output_g = grad_output_n.select(0, n);
AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_backward_cuda", ([&] { AT_DISPATCH_FLOATING_TYPES(value.scalar_type(), "ms_deform_attn_backward_cuda", [&] {
ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(), ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(),
grad_output_g.data<scalar_t>(), grad_output_g.data<scalar_t>(),
value.data<scalar_t>() + n * im2col_step_ * per_value_size, value.data<scalar_t>() + n * im2col_step_ * per_value_size,
...@@ -144,7 +144,7 @@ std::vector<at::Tensor> ms_deform_attn_cuda_backward( ...@@ -144,7 +144,7 @@ std::vector<at::Tensor> ms_deform_attn_cuda_backward(
grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size, grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size); grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size);
})); });
} }
return { return {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment