Commit ed3e1b77 authored by zhangwenwei's avatar zhangwenwei
Browse files

Fix compilation failure in PyTorch1.5

parent b5f1f52f
...@@ -33,7 +33,7 @@ int ball_query_wrapper(int b, int n, int m, float radius, int nsample, ...@@ -33,7 +33,7 @@ int ball_query_wrapper(int b, int n, int m, float radius, int nsample,
const float *xyz = xyz_tensor.data_ptr<float>(); const float *xyz = xyz_tensor.data_ptr<float>();
int *idx = idx_tensor.data_ptr<int>(); int *idx = idx_tensor.data_ptr<int>();
cudaStream_t stream = THCState_getCurrentStream(state); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
ball_query_kernel_launcher(b, n, m, radius, nsample, new_xyz, xyz, idx, ball_query_kernel_launcher(b, n, m, radius, nsample, new_xyz, xyz, idx,
stream); stream);
return 1; return 1;
......
...@@ -24,7 +24,7 @@ int furthest_point_sampling_wrapper(int b, int n, int m, ...@@ -24,7 +24,7 @@ int furthest_point_sampling_wrapper(int b, int n, int m,
float *temp = temp_tensor.data_ptr<float>(); float *temp = temp_tensor.data_ptr<float>();
int *idx = idx_tensor.data_ptr<int>(); int *idx = idx_tensor.data_ptr<int>();
cudaStream_t stream = THCState_getCurrentStream(state); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
furthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx, stream); furthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx, stream);
return 1; return 1;
} }
......
...@@ -32,7 +32,7 @@ int gather_points_wrapper(int b, int c, int n, int npoints, ...@@ -32,7 +32,7 @@ int gather_points_wrapper(int b, int c, int n, int npoints,
const int *idx = idx_tensor.data_ptr<int>(); const int *idx = idx_tensor.data_ptr<int>();
float *out = out_tensor.data_ptr<float>(); float *out = out_tensor.data_ptr<float>();
cudaStream_t stream = THCState_getCurrentStream(state); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
gather_points_kernel_launcher(b, c, n, npoints, points, idx, out, stream); gather_points_kernel_launcher(b, c, n, npoints, points, idx, out, stream);
return 1; return 1;
} }
...@@ -45,7 +45,7 @@ int gather_points_grad_wrapper(int b, int c, int n, int npoints, ...@@ -45,7 +45,7 @@ int gather_points_grad_wrapper(int b, int c, int n, int npoints,
const int *idx = idx_tensor.data_ptr<int>(); const int *idx = idx_tensor.data_ptr<int>();
float *grad_points = grad_points_tensor.data_ptr<float>(); float *grad_points = grad_points_tensor.data_ptr<float>();
cudaStream_t stream = THCState_getCurrentStream(state); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
gather_points_grad_kernel_launcher(b, c, n, npoints, grad_out, idx, gather_points_grad_kernel_launcher(b, c, n, npoints, grad_out, idx,
grad_points, stream); grad_points, stream);
return 1; return 1;
......
...@@ -32,7 +32,7 @@ int group_points_grad_wrapper(int b, int c, int n, int npoints, int nsample, ...@@ -32,7 +32,7 @@ int group_points_grad_wrapper(int b, int c, int n, int npoints, int nsample,
const int *idx = idx_tensor.data_ptr<int>(); const int *idx = idx_tensor.data_ptr<int>();
const float *grad_out = grad_out_tensor.data_ptr<float>(); const float *grad_out = grad_out_tensor.data_ptr<float>();
cudaStream_t stream = THCState_getCurrentStream(state); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
group_points_grad_kernel_launcher(b, c, n, npoints, nsample, grad_out, idx, group_points_grad_kernel_launcher(b, c, n, npoints, nsample, grad_out, idx,
grad_points, stream); grad_points, stream);
...@@ -46,7 +46,7 @@ int group_points_wrapper(int b, int c, int n, int npoints, int nsample, ...@@ -46,7 +46,7 @@ int group_points_wrapper(int b, int c, int n, int npoints, int nsample,
const int *idx = idx_tensor.data_ptr<int>(); const int *idx = idx_tensor.data_ptr<int>();
float *out = out_tensor.data_ptr<float>(); float *out = out_tensor.data_ptr<float>();
cudaStream_t stream = THCState_getCurrentStream(state); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
group_points_kernel_launcher(b, c, n, npoints, nsample, points, idx, out, group_points_kernel_launcher(b, c, n, npoints, nsample, points, idx, out,
stream); stream);
......
...@@ -48,7 +48,7 @@ void three_nn_wrapper(int b, int n, int m, at::Tensor unknown_tensor, ...@@ -48,7 +48,7 @@ void three_nn_wrapper(int b, int n, int m, at::Tensor unknown_tensor,
float *dist2 = dist2_tensor.data_ptr<float>(); float *dist2 = dist2_tensor.data_ptr<float>();
int *idx = idx_tensor.data_ptr<int>(); int *idx = idx_tensor.data_ptr<int>();
cudaStream_t stream = THCState_getCurrentStream(state); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
three_nn_kernel_launcher(b, n, m, unknown, known, dist2, idx, stream); three_nn_kernel_launcher(b, n, m, unknown, known, dist2, idx, stream);
} }
...@@ -61,7 +61,7 @@ void three_interpolate_wrapper(int b, int c, int m, int n, ...@@ -61,7 +61,7 @@ void three_interpolate_wrapper(int b, int c, int m, int n,
float *out = out_tensor.data_ptr<float>(); float *out = out_tensor.data_ptr<float>();
const int *idx = idx_tensor.data_ptr<int>(); const int *idx = idx_tensor.data_ptr<int>();
cudaStream_t stream = THCState_getCurrentStream(state); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
three_interpolate_kernel_launcher(b, c, m, n, points, idx, weight, out, three_interpolate_kernel_launcher(b, c, m, n, points, idx, weight, out,
stream); stream);
} }
...@@ -76,7 +76,7 @@ void three_interpolate_grad_wrapper(int b, int c, int n, int m, ...@@ -76,7 +76,7 @@ void three_interpolate_grad_wrapper(int b, int c, int n, int m,
float *grad_points = grad_points_tensor.data_ptr<float>(); float *grad_points = grad_points_tensor.data_ptr<float>();
const int *idx = idx_tensor.data_ptr<int>(); const int *idx = idx_tensor.data_ptr<int>();
cudaStream_t stream = THCState_getCurrentStream(state); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
three_interpolate_grad_kernel_launcher(b, c, n, m, grad_out, idx, weight, three_interpolate_grad_kernel_launcher(b, c, n, m, grad_out, idx, weight,
grad_points, stream); grad_points, stream);
} }
......
...@@ -229,7 +229,11 @@ if __name__ == '__main__': ...@@ -229,7 +229,11 @@ if __name__ == '__main__':
name='sparse_conv_ext', name='sparse_conv_ext',
module='mmdet3d.ops.spconv', module='mmdet3d.ops.spconv',
extra_include_path=[ extra_include_path=[
os.path.join(*'mmdet3d.ops.spconv'.split('.'), 'include/') # PyTorch 1.5 uses ninjia, which requires absolute path
# of included files, relative path will cause failure.
os.path.abspath(
os.path.join(*'mmdet3d.ops.spconv'.split('.'),
'include/'))
], ],
sources=[ sources=[
'src/all.cc', 'src/all.cc',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment