"vscode:/vscode.git/clone" did not exist on "a6314a8d4e1c301bce4e45c10f325f594220617f"
Unverified Commit b586cc2f authored by Jiazhen Wang's avatar Jiazhen Wang Committed by GitHub
Browse files

[Refactor] Replace DIVUP with GET_BLOCKS (#1586)

* [Improve] migrating DIVUP to GET_BLOCKS

* [Fix] use GET_BLOCKS only for block alloc and del useless statements

* [Fix] add kernel loop for nms and del useless statements
parent cf754db9
...@@ -22,8 +22,7 @@ __global__ void assign_score_withk_forward_cuda_kernel( ...@@ -22,8 +22,7 @@ __global__ void assign_score_withk_forward_cuda_kernel(
const int O, const int aggregate, const T* points, const T* centers, const int O, const int aggregate, const T* points, const T* centers,
const T* scores, const int64_t* knn_idx, T* output) { const T* scores, const int64_t* knn_idx, T* output) {
// ----- parallel loop for B, N1, K and O --------- // ----- parallel loop for B, N1, K and O ---------
long i = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(i, B * O * N1 * K) {
if (i >= B * N1 * K * O) return;
// ------- loop for M ---------- // ------- loop for M ----------
const int b = (int)(i / (O * N1 * K)); const int b = (int)(i / (O * N1 * K));
const int o = (int)(i % (O * N1 * K) / (N1 * K)); const int o = (int)(i % (O * N1 * K) / (N1 * K));
...@@ -50,6 +49,7 @@ __global__ void assign_score_withk_forward_cuda_kernel( ...@@ -50,6 +49,7 @@ __global__ void assign_score_withk_forward_cuda_kernel(
scores[b * N1 * K * M + n * K * M + k * M + m]; scores[b * N1 * K * M + n * K * M + k * M + m];
} }
output[out_idx] = val; output[out_idx] = val;
}
} }
template <typename T> template <typename T>
...@@ -58,8 +58,7 @@ __global__ void assign_score_withk_points_backward_cuda_kernel( ...@@ -58,8 +58,7 @@ __global__ void assign_score_withk_points_backward_cuda_kernel(
const int O, const int aggregate, const T* grad_out, const T* scores, const int O, const int aggregate, const T* grad_out, const T* scores,
const int64_t* knn_idx, T* grad_points, T* grad_centers) { const int64_t* knn_idx, T* grad_points, T* grad_centers) {
// ----- parallel loop for B, M, O --------- // ----- parallel loop for B, M, O ---------
long i = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(i, B * M * O) {
if (i >= B * M * O) return;
int b = (int)(i / (M * O)); int b = (int)(i / (M * O));
int m = (int)(i % (M * O) / O); int m = (int)(i % (M * O) / O);
int o = (int)(i % O); int o = (int)(i % O);
...@@ -69,8 +68,8 @@ __global__ void assign_score_withk_points_backward_cuda_kernel( ...@@ -69,8 +68,8 @@ __global__ void assign_score_withk_points_backward_cuda_kernel(
for (int k = 0; k < K; k++) { for (int k = 0; k < K; k++) {
int kn = knn_idx[b * N * K + n * K + k]; int kn = knn_idx[b * N * K + n * K + k];
int cn = knn_idx[b * N * K + n * K + 0]; int cn = knn_idx[b * N * K + n * K + 0];
if (kn >= N0 || if (kn >= N0 || kn < 0) { // if index overflows, it is out of the
kn < 0) { // if index overflows, it is out of the neighborhood range // neighborhood range
continue; continue;
} }
atomicAdd(grad_points + b * N0 * M * O + kn * M * O + m * O + o, atomicAdd(grad_points + b * N0 * M * O + kn * M * O + m * O + o,
...@@ -81,6 +80,7 @@ __global__ void assign_score_withk_points_backward_cuda_kernel( ...@@ -81,6 +80,7 @@ __global__ void assign_score_withk_points_backward_cuda_kernel(
grad_out[b * O * N * K + o * N * K + n * K + k]); grad_out[b * O * N * K + o * N * K + n * K + k]);
} }
} }
}
} }
template <typename T> template <typename T>
...@@ -89,8 +89,7 @@ __global__ void assign_score_withk_scores_backward_cuda_kernel( ...@@ -89,8 +89,7 @@ __global__ void assign_score_withk_scores_backward_cuda_kernel(
const int O, const int aggregate, const T* grad_out, const T* points, const int O, const int aggregate, const T* grad_out, const T* points,
const T* centers, const int64_t* knn_idx, T* grad_scores) { const T* centers, const int64_t* knn_idx, T* grad_scores) {
// ----- parallel loop for B, N, K, M --------- // ----- parallel loop for B, N, K, M ---------
long i = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(i, B * N * K * M) {
if (i >= B * N * K * M) return;
const int b = (int)(i / (N * M * K)); const int b = (int)(i / (N * M * K));
const int n = (int)(i % (N * M * K) / M / K); const int n = (int)(i % (N * M * K) / M / K);
const int k = (int)(i % (M * K) / M); const int k = (int)(i % (M * K) / M);
...@@ -111,6 +110,7 @@ __global__ void assign_score_withk_scores_backward_cuda_kernel( ...@@ -111,6 +110,7 @@ __global__ void assign_score_withk_scores_backward_cuda_kernel(
grad_out[b * O * N * K + o * N * K + n * K + k]; grad_out[b * O * N * K + o * N * K + n * K + k];
} }
grad_scores[out_idx] = val; grad_scores[out_idx] = val;
}
} }
#endif // ASSIGN_SCORE_WITHK_CUDA_KERNEL_CUH #endif // ASSIGN_SCORE_WITHK_CUDA_KERNEL_CUH
...@@ -21,8 +21,8 @@ __global__ void ball_query_forward_cuda_kernel(int b, int n, int m, ...@@ -21,8 +21,8 @@ __global__ void ball_query_forward_cuda_kernel(int b, int n, int m,
// output: // output:
// idx: (B, M, nsample) // idx: (B, M, nsample)
int bs_idx = blockIdx.y; int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(pt_idx, m) {
if (bs_idx >= b || pt_idx >= m) return; if (bs_idx >= b) return;
new_xyz += bs_idx * m * 3 + pt_idx * 3; new_xyz += bs_idx * m * 3 + pt_idx * 3;
xyz += bs_idx * n * 3; xyz += bs_idx * n * 3;
...@@ -52,6 +52,7 @@ __global__ void ball_query_forward_cuda_kernel(int b, int n, int m, ...@@ -52,6 +52,7 @@ __global__ void ball_query_forward_cuda_kernel(int b, int n, int m,
if (cnt >= nsample) break; if (cnt >= nsample) break;
} }
} }
}
} }
#endif // BALL_QUERY_CUDA_KERNEL_CUH #endif // BALL_QUERY_CUDA_KERNEL_CUH
...@@ -7,12 +7,20 @@ ...@@ -7,12 +7,20 @@
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x) i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 512 #define CUDA_2D_KERNEL_LOOP(i, n, j, m) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x) \
for (size_t j = blockIdx.y * blockDim.y + threadIdx.y; j < (m); \
j += blockDim.y * gridDim.y)
#define CUDA_2D_KERNEL_BLOCK_LOOP(i, n, j, m) \
for (size_t i = blockIdx.x; i < (n); i += gridDim.x) \
for (size_t j = blockIdx.y; j < (m); j += gridDim.y)
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) #define THREADS_PER_BLOCK 512
inline int GET_BLOCKS(const int N) { inline int GET_BLOCKS(const int N, const int num_threads = THREADS_PER_BLOCK) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int optimal_block_num = (N + num_threads - 1) / num_threads;
int max_block_num = 4096; int max_block_num = 4096;
return min(optimal_block_num, max_block_num); return min(optimal_block_num, max_block_num);
} }
......
...@@ -22,13 +22,14 @@ __global__ void gather_points_forward_cuda_kernel(int b, int c, int n, int m, ...@@ -22,13 +22,14 @@ __global__ void gather_points_forward_cuda_kernel(int b, int c, int n, int m,
int bs_idx = blockIdx.z; int bs_idx = blockIdx.z;
int c_idx = blockIdx.y; int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(pt_idx, m) {
if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; if (bs_idx >= b || c_idx >= c) return;
out += bs_idx * c * m + c_idx * m + pt_idx; out += bs_idx * c * m + c_idx * m + pt_idx;
idx += bs_idx * m + pt_idx; idx += bs_idx * m + pt_idx;
points += bs_idx * c * n + c_idx * n; points += bs_idx * c * n + c_idx * n;
out[0] = points[idx[0]]; out[0] = points[idx[0]];
}
} }
template <typename T> template <typename T>
...@@ -43,14 +44,15 @@ __global__ void gather_points_backward_cuda_kernel(int b, int c, int n, int m, ...@@ -43,14 +44,15 @@ __global__ void gather_points_backward_cuda_kernel(int b, int c, int n, int m,
int bs_idx = blockIdx.z; int bs_idx = blockIdx.z;
int c_idx = blockIdx.y; int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(pt_idx, m) {
if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; if (bs_idx >= b || c_idx >= c) return;
grad_out += bs_idx * c * m + c_idx * m + pt_idx; grad_out += bs_idx * c * m + c_idx * m + pt_idx;
idx += bs_idx * m + pt_idx; idx += bs_idx * m + pt_idx;
grad_points += bs_idx * c * n + c_idx * n; grad_points += bs_idx * c * n + c_idx * n;
atomicAdd(grad_points + idx[0], grad_out[0]); atomicAdd(grad_points + idx[0], grad_out[0]);
}
} }
#endif // GATHER_POINTS_CUDA_KERNEL_CUH #endif // GATHER_POINTS_CUDA_KERNEL_CUH
...@@ -22,10 +22,10 @@ __global__ void group_points_forward_cuda_kernel(int b, int c, int n, ...@@ -22,10 +22,10 @@ __global__ void group_points_forward_cuda_kernel(int b, int c, int n,
// out: (B, C, npoints, nsample) // out: (B, C, npoints, nsample)
int bs_idx = blockIdx.z; int bs_idx = blockIdx.z;
int c_idx = blockIdx.y; int c_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(index, npoints * nsample) {
int pt_idx = index / nsample; if (bs_idx >= b || c_idx >= c) return;
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
int pt_idx = index / nsample;
int sample_idx = index % nsample; int sample_idx = index % nsample;
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
...@@ -34,6 +34,7 @@ __global__ void group_points_forward_cuda_kernel(int b, int c, int n, ...@@ -34,6 +34,7 @@ __global__ void group_points_forward_cuda_kernel(int b, int c, int n,
pt_idx * nsample + sample_idx; pt_idx * nsample + sample_idx;
out[out_idx] = points[in_idx]; out[out_idx] = points[in_idx];
}
} }
template <typename T> template <typename T>
...@@ -48,9 +49,9 @@ __global__ void group_points_backward_cuda_kernel(int b, int c, int n, ...@@ -48,9 +49,9 @@ __global__ void group_points_backward_cuda_kernel(int b, int c, int n,
// grad_points: (B, C, N) // grad_points: (B, C, N)
int bs_idx = blockIdx.z; int bs_idx = blockIdx.z;
int c_idx = blockIdx.y; int c_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(index, npoints * nsample) {
int pt_idx = index / nsample; int pt_idx = index / nsample;
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; if (bs_idx >= b || c_idx >= c) return;
int sample_idx = index % nsample; int sample_idx = index % nsample;
grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample + grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample +
...@@ -58,6 +59,7 @@ __global__ void group_points_backward_cuda_kernel(int b, int c, int n, ...@@ -58,6 +59,7 @@ __global__ void group_points_backward_cuda_kernel(int b, int c, int n,
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0], grad_out[0]); atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0], grad_out[0]);
}
} }
#endif // GROUP_POINTS_CUDA_KERNEL_CUH #endif // GROUP_POINTS_CUDA_KERNEL_CUH
...@@ -220,9 +220,7 @@ __device__ inline float iou_bev(const float *box_a, const float *box_b) { ...@@ -220,9 +220,7 @@ __device__ inline float iou_bev(const float *box_a, const float *box_b) {
__global__ void iou3d_boxes_overlap_bev_forward_cuda_kernel( __global__ void iou3d_boxes_overlap_bev_forward_cuda_kernel(
const int num_a, const float *boxes_a, const int num_b, const int num_a, const float *boxes_a, const int num_b,
const float *boxes_b, float *ans_overlap) { const float *boxes_b, float *ans_overlap) {
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; CUDA_2D_KERNEL_LOOP(b_idx, num_b, a_idx, num_a) {
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) { if (a_idx >= num_a || b_idx >= num_b) {
return; return;
} }
...@@ -230,6 +228,7 @@ __global__ void iou3d_boxes_overlap_bev_forward_cuda_kernel( ...@@ -230,6 +228,7 @@ __global__ void iou3d_boxes_overlap_bev_forward_cuda_kernel(
const float *cur_box_b = boxes_b + b_idx * 5; const float *cur_box_b = boxes_b + b_idx * 5;
float s_overlap = box_overlap(cur_box_a, cur_box_b); float s_overlap = box_overlap(cur_box_a, cur_box_b);
ans_overlap[a_idx * num_b + b_idx] = s_overlap; ans_overlap[a_idx * num_b + b_idx] = s_overlap;
}
} }
__global__ void iou3d_boxes_iou_bev_forward_cuda_kernel(const int num_a, __global__ void iou3d_boxes_iou_bev_forward_cuda_kernel(const int num_a,
...@@ -237,9 +236,7 @@ __global__ void iou3d_boxes_iou_bev_forward_cuda_kernel(const int num_a, ...@@ -237,9 +236,7 @@ __global__ void iou3d_boxes_iou_bev_forward_cuda_kernel(const int num_a,
const int num_b, const int num_b,
const float *boxes_b, const float *boxes_b,
float *ans_iou) { float *ans_iou) {
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; CUDA_2D_KERNEL_LOOP(b_idx, num_b, a_idx, num_a) {
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) { if (a_idx >= num_a || b_idx >= num_b) {
return; return;
} }
...@@ -248,6 +245,7 @@ __global__ void iou3d_boxes_iou_bev_forward_cuda_kernel(const int num_a, ...@@ -248,6 +245,7 @@ __global__ void iou3d_boxes_iou_bev_forward_cuda_kernel(const int num_a,
const float *cur_box_b = boxes_b + b_idx * 5; const float *cur_box_b = boxes_b + b_idx * 5;
float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); float cur_iou_bev = iou_bev(cur_box_a, cur_box_b);
ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; ans_iou[a_idx * num_b + b_idx] = cur_iou_bev;
}
} }
__global__ void nms_forward_cuda_kernel(const int boxes_num, __global__ void nms_forward_cuda_kernel(const int boxes_num,
...@@ -256,10 +254,9 @@ __global__ void nms_forward_cuda_kernel(const int boxes_num, ...@@ -256,10 +254,9 @@ __global__ void nms_forward_cuda_kernel(const int boxes_num,
unsigned long long *mask) { unsigned long long *mask) {
// params: boxes (N, 5) [x1, y1, x2, y2, ry] // params: boxes (N, 5) [x1, y1, x2, y2, ry]
// params: mask (N, N/THREADS_PER_BLOCK_NMS) // params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int blocks =
const int row_start = blockIdx.y; (boxes_num + THREADS_PER_BLOCK_NMS - 1) / THREADS_PER_BLOCK_NMS;
const int col_start = blockIdx.x; CUDA_2D_KERNEL_BLOCK_LOOP(col_start, blocks, row_start, blocks) {
// if (row_start > col_start) return; // if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
...@@ -298,9 +295,11 @@ __global__ void nms_forward_cuda_kernel(const int boxes_num, ...@@ -298,9 +295,11 @@ __global__ void nms_forward_cuda_kernel(const int boxes_num,
t |= 1ULL << i; t |= 1ULL << i;
} }
} }
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); const int col_blocks =
(boxes_num + THREADS_PER_BLOCK_NMS - 1) / THREADS_PER_BLOCK_NMS;
mask[cur_box_idx * col_blocks + col_start] = t; mask[cur_box_idx * col_blocks + col_start] = t;
} }
}
} }
__device__ inline float iou_normal(float const *const a, float const *const b) { __device__ inline float iou_normal(float const *const a, float const *const b) {
...@@ -320,9 +319,9 @@ __global__ void nms_normal_forward_cuda_kernel(const int boxes_num, ...@@ -320,9 +319,9 @@ __global__ void nms_normal_forward_cuda_kernel(const int boxes_num,
// params: boxes (N, 5) [x1, y1, x2, y2, ry] // params: boxes (N, 5) [x1, y1, x2, y2, ry]
// params: mask (N, N/THREADS_PER_BLOCK_NMS) // params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y; const int blocks =
const int col_start = blockIdx.x; (boxes_num + THREADS_PER_BLOCK_NMS - 1) / THREADS_PER_BLOCK_NMS;
CUDA_2D_KERNEL_BLOCK_LOOP(col_start, blocks, row_start, blocks) {
// if (row_start > col_start) return; // if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
...@@ -361,9 +360,11 @@ __global__ void nms_normal_forward_cuda_kernel(const int boxes_num, ...@@ -361,9 +360,11 @@ __global__ void nms_normal_forward_cuda_kernel(const int boxes_num,
t |= 1ULL << i; t |= 1ULL << i;
} }
} }
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); const int col_blocks =
(boxes_num + THREADS_PER_BLOCK_NMS - 1) / THREADS_PER_BLOCK_NMS;
mask[cur_box_idx * col_blocks + col_start] = t; mask[cur_box_idx * col_blocks + col_start] = t;
} }
}
} }
#endif // IOU3D_CUDA_KERNEL_CUH #endif // IOU3D_CUDA_KERNEL_CUH
...@@ -51,8 +51,8 @@ __global__ void knn_forward_cuda_kernel(int b, int n, int m, int nsample, ...@@ -51,8 +51,8 @@ __global__ void knn_forward_cuda_kernel(int b, int n, int m, int nsample,
const T *xyz, const T *new_xyz, const T *xyz, const T *new_xyz,
int *__restrict__ idx, T *dist2) { int *__restrict__ idx, T *dist2) {
int bs_idx = blockIdx.y; int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(pt_idx, m) {
if (bs_idx >= b || pt_idx >= m) return; if (bs_idx >= b) return;
new_xyz += bs_idx * m * 3 + pt_idx * 3; new_xyz += bs_idx * m * 3 + pt_idx * 3;
xyz += bs_idx * n * 3; xyz += bs_idx * n * 3;
...@@ -86,6 +86,7 @@ __global__ void knn_forward_cuda_kernel(int b, int n, int m, int nsample, ...@@ -86,6 +86,7 @@ __global__ void knn_forward_cuda_kernel(int b, int n, int m, int nsample,
idx[i] = best_idx[i]; idx[i] = best_idx[i];
dist2[i] = best_dist[i]; dist2[i] = best_dist[i];
} }
}
} }
#endif // KNN_CUDA_KERNEL_CUH #endif // KNN_CUDA_KERNEL_CUH
...@@ -15,9 +15,6 @@ ...@@ -15,9 +15,6 @@
#include "pytorch_cuda_helper.hpp" #include "pytorch_cuda_helper.hpp"
const int CUDA_NUM_THREADS = 1024; const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N, const int num_threads) {
return (N + num_threads - 1) / num_threads;
}
template <typename scalar_t> template <typename scalar_t>
__device__ scalar_t ms_deform_attn_im2col_bilinear( __device__ scalar_t ms_deform_attn_im2col_bilinear(
......
...@@ -30,8 +30,8 @@ __device__ inline bool devIoU(float const *const a, float const *const b, ...@@ -30,8 +30,8 @@ __device__ inline bool devIoU(float const *const a, float const *const b,
__global__ void nms_cuda(const int n_boxes, const float iou_threshold, __global__ void nms_cuda(const int n_boxes, const float iou_threshold,
const int offset, const float *dev_boxes, const int offset, const float *dev_boxes,
unsigned long long *dev_mask) { unsigned long long *dev_mask) {
const int row_start = blockIdx.y; int blocks = (n_boxes + threadsPerBlock - 1) / threadsPerBlock;
const int col_start = blockIdx.x; CUDA_2D_KERNEL_BLOCK_LOOP(col_start, blocks, row_start, blocks) {
const int tid = threadIdx.x; const int tid = threadIdx.x;
if (row_start > col_start) return; if (row_start > col_start) return;
...@@ -70,5 +70,6 @@ __global__ void nms_cuda(const int n_boxes, const float iou_threshold, ...@@ -70,5 +70,6 @@ __global__ void nms_cuda(const int n_boxes, const float iou_threshold,
} }
dev_mask[cur_box_idx * gridDim.y + col_start] = t; dev_mask[cur_box_idx * gridDim.y + col_start] = t;
} }
}
} }
#endif // NMS_CUDA_KERNEL_CUH #endif // NMS_CUDA_KERNEL_CUH
...@@ -45,8 +45,8 @@ __global__ void points_in_boxes_part_forward_cuda_kernel( ...@@ -45,8 +45,8 @@ __global__ void points_in_boxes_part_forward_cuda_kernel(
// (B, npoints), default -1 // (B, npoints), default -1
int bs_idx = blockIdx.y; int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(pt_idx, pts_num) {
if (bs_idx >= batch_size || pt_idx >= pts_num) return; if (bs_idx >= batch_size) return;
boxes += bs_idx * boxes_num * 7; boxes += bs_idx * boxes_num * 7;
pts += bs_idx * pts_num * 3 + pt_idx * 3; pts += bs_idx * pts_num * 3 + pt_idx * 3;
...@@ -61,6 +61,7 @@ __global__ void points_in_boxes_part_forward_cuda_kernel( ...@@ -61,6 +61,7 @@ __global__ void points_in_boxes_part_forward_cuda_kernel(
break; break;
} }
} }
}
} }
template <typename T> template <typename T>
...@@ -73,8 +74,8 @@ __global__ void points_in_boxes_all_forward_cuda_kernel( ...@@ -73,8 +74,8 @@ __global__ void points_in_boxes_all_forward_cuda_kernel(
// (B, npoints), default -1 // (B, npoints), default -1
int bs_idx = blockIdx.y; int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(pt_idx, pts_num) {
if (bs_idx >= batch_size || pt_idx >= pts_num) return; if (bs_idx >= batch_size) return;
boxes += bs_idx * boxes_num * 7; boxes += bs_idx * boxes_num * 7;
pts += bs_idx * pts_num * 3 + pt_idx * 3; pts += bs_idx * pts_num * 3 + pt_idx * 3;
...@@ -88,6 +89,7 @@ __global__ void points_in_boxes_all_forward_cuda_kernel( ...@@ -88,6 +89,7 @@ __global__ void points_in_boxes_all_forward_cuda_kernel(
box_idx_of_points[k] = 1; box_idx_of_points[k] = 1;
} }
} }
}
} }
#endif // POINT_IN_BOXES_CUDA_KERNEL_CUH #endif // POINT_IN_BOXES_CUDA_KERNEL_CUH
...@@ -44,9 +44,9 @@ __global__ void generate_pts_mask_for_box3d(int boxes_num, int pts_num, ...@@ -44,9 +44,9 @@ __global__ void generate_pts_mask_for_box3d(int boxes_num, int pts_num,
// coordinate params pts: (npoints, 3) [x, y, z] params pts_mask: (N, // coordinate params pts: (npoints, 3) [x, y, z] params pts_mask: (N,
// npoints): -1 means point does not in this box, otherwise: encode (x_idxs, // npoints): -1 means point does not in this box, otherwise: encode (x_idxs,
// y_idxs, z_idxs) by binary bit // y_idxs, z_idxs) by binary bit
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
int box_idx = blockIdx.y; int box_idx = blockIdx.y;
if (pt_idx >= pts_num || box_idx >= boxes_num) return; CUDA_1D_KERNEL_LOOP(pt_idx, pts_num) {
if (box_idx >= boxes_num) return;
pts += pt_idx * 3; pts += pt_idx * 3;
rois += box_idx * 7; rois += box_idx * 7;
...@@ -76,6 +76,7 @@ __global__ void generate_pts_mask_for_box3d(int boxes_num, int pts_num, ...@@ -76,6 +76,7 @@ __global__ void generate_pts_mask_for_box3d(int boxes_num, int pts_num,
pts_mask[0] = idx_encoding; pts_mask[0] = idx_encoding;
} }
}
} }
template <typename T> template <typename T>
...@@ -86,10 +87,7 @@ __global__ void collect_inside_pts_for_box3d(int boxes_num, int pts_num, ...@@ -86,10 +87,7 @@ __global__ void collect_inside_pts_for_box3d(int boxes_num, int pts_num,
T *pts_idx_of_voxels) { T *pts_idx_of_voxels) {
// params pts_mask: (N, npoints) 0 or 1 // params pts_mask: (N, npoints) 0 or 1
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel)
CUDA_1D_KERNEL_LOOP(box_idx, boxes_num) {
int box_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (box_idx >= boxes_num) return;
int max_num_pts = max_pts_each_voxel - 1; // index 0 is the counter int max_num_pts = max_pts_each_voxel - 1; // index 0 is the counter
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel; pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel;
...@@ -109,6 +107,7 @@ __global__ void collect_inside_pts_for_box3d(int boxes_num, int pts_num, ...@@ -109,6 +107,7 @@ __global__ void collect_inside_pts_for_box3d(int boxes_num, int pts_num,
} }
} }
} }
}
} }
template <typename T> template <typename T>
...@@ -124,14 +123,11 @@ __global__ void roiaware_maxpool3d(int boxes_num, int pts_num, int channels, ...@@ -124,14 +123,11 @@ __global__ void roiaware_maxpool3d(int boxes_num, int pts_num, int channels,
int box_idx = blockIdx.z; int box_idx = blockIdx.z;
int channel_idx = blockIdx.y; int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(voxel_idx_flat, out_x * out_y * out_z) {
int x_idx = voxel_idx_flat / (out_y * out_z); int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z; int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || if (box_idx >= boxes_num || channel_idx >= channels) return;
y_idx >= out_y || z_idx >= out_z)
return;
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel +
...@@ -147,7 +143,8 @@ __global__ void roiaware_maxpool3d(int boxes_num, int pts_num, int channels, ...@@ -147,7 +143,8 @@ __global__ void roiaware_maxpool3d(int boxes_num, int pts_num, int channels,
int total_pts = pts_idx_of_voxels[0]; int total_pts = pts_idx_of_voxels[0];
for (int k = 1; k <= total_pts; k++) { for (int k = 1; k <= total_pts; k++) {
if (pts_feature[pts_idx_of_voxels[k] * channels + channel_idx] > max_val) { if (pts_feature[pts_idx_of_voxels[k] * channels + channel_idx] >
max_val) {
max_val = pts_feature[pts_idx_of_voxels[k] * channels + channel_idx]; max_val = pts_feature[pts_idx_of_voxels[k] * channels + channel_idx];
argmax_idx = pts_idx_of_voxels[k]; argmax_idx = pts_idx_of_voxels[k];
} }
...@@ -157,6 +154,7 @@ __global__ void roiaware_maxpool3d(int boxes_num, int pts_num, int channels, ...@@ -157,6 +154,7 @@ __global__ void roiaware_maxpool3d(int boxes_num, int pts_num, int channels,
pooled_features[0] = max_val; pooled_features[0] = max_val;
} }
argmax[0] = argmax_idx; argmax[0] = argmax_idx;
}
} }
template <typename T> template <typename T>
...@@ -172,14 +170,11 @@ __global__ void roiaware_avgpool3d(int boxes_num, int pts_num, int channels, ...@@ -172,14 +170,11 @@ __global__ void roiaware_avgpool3d(int boxes_num, int pts_num, int channels,
int box_idx = blockIdx.z; int box_idx = blockIdx.z;
int channel_idx = blockIdx.y; int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(voxel_idx_flat, out_x * out_y * out_z) {
int x_idx = voxel_idx_flat / (out_y * out_z); int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z; int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || if (box_idx >= boxes_num || channel_idx >= channels) return;
y_idx >= out_y || z_idx >= out_z)
return;
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel +
...@@ -197,6 +192,7 @@ __global__ void roiaware_avgpool3d(int boxes_num, int pts_num, int channels, ...@@ -197,6 +192,7 @@ __global__ void roiaware_avgpool3d(int boxes_num, int pts_num, int channels,
if (total_pts > 0) { if (total_pts > 0) {
pooled_features[0] = sum_val / total_pts; pooled_features[0] = sum_val / total_pts;
} }
}
} }
template <typename T> template <typename T>
...@@ -210,14 +206,11 @@ __global__ void roiaware_maxpool3d_backward(int boxes_num, int channels, ...@@ -210,14 +206,11 @@ __global__ void roiaware_maxpool3d_backward(int boxes_num, int channels,
int box_idx = blockIdx.z; int box_idx = blockIdx.z;
int channel_idx = blockIdx.y; int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(voxel_idx_flat, out_x * out_y * out_z) {
int x_idx = voxel_idx_flat / (out_y * out_z); int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z; int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || if (box_idx >= boxes_num || channel_idx >= channels) return;
y_idx >= out_y || z_idx >= out_z)
return;
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
argmax += box_idx * out_x * out_y * out_z * channels + argmax += box_idx * out_x * out_y * out_z * channels +
...@@ -228,6 +221,7 @@ __global__ void roiaware_maxpool3d_backward(int boxes_num, int channels, ...@@ -228,6 +221,7 @@ __global__ void roiaware_maxpool3d_backward(int boxes_num, int channels,
if (argmax[0] == -1) return; if (argmax[0] == -1) return;
atomicAdd(grad_in + argmax[0] * channels + channel_idx, grad_out[0] * 1); atomicAdd(grad_in + argmax[0] * channels + channel_idx, grad_out[0] * 1);
}
} }
template <typename T> template <typename T>
...@@ -242,14 +236,11 @@ __global__ void roiaware_avgpool3d_backward(int boxes_num, int channels, ...@@ -242,14 +236,11 @@ __global__ void roiaware_avgpool3d_backward(int boxes_num, int channels,
int box_idx = blockIdx.z; int box_idx = blockIdx.z;
int channel_idx = blockIdx.y; int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(voxel_idx_flat, out_x * out_y * out_z) {
int x_idx = voxel_idx_flat / (out_y * out_z); int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z; int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || if (box_idx >= boxes_num || channel_idx >= channels) return;
y_idx >= out_y || z_idx >= out_z)
return;
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel +
...@@ -263,6 +254,7 @@ __global__ void roiaware_avgpool3d_backward(int boxes_num, int channels, ...@@ -263,6 +254,7 @@ __global__ void roiaware_avgpool3d_backward(int boxes_num, int channels,
atomicAdd(grad_in + pts_idx_of_voxels[k] * channels + channel_idx, atomicAdd(grad_in + pts_idx_of_voxels[k] * channels + channel_idx,
grad_out[0] * cur_grad); grad_out[0] * cur_grad);
} }
}
} }
#endif // ROIAWARE_POOL3D_CUDA_KERNEL_CUH #endif // ROIAWARE_POOL3D_CUDA_KERNEL_CUH
...@@ -42,14 +42,13 @@ __global__ void assign_pts_to_box3d(int batch_size, int pts_num, int boxes_num, ...@@ -42,14 +42,13 @@ __global__ void assign_pts_to_box3d(int batch_size, int pts_num, int boxes_num,
// params boxes3d: (B, M, 7) // params boxes3d: (B, M, 7)
// params pts_assign: (B, N, M): idx of the corresponding box3d, -1 means // params pts_assign: (B, N, M): idx of the corresponding box3d, -1 means
// background points // background points
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
int box_idx = blockIdx.y; int box_idx = blockIdx.y;
int bs_idx = blockIdx.z; int bs_idx = blockIdx.z;
CUDA_1D_KERNEL_LOOP(pt_idx, pts_num) {
if (box_idx >= boxes_num || bs_idx >= batch_size) return;
if (pt_idx >= pts_num || box_idx >= boxes_num || bs_idx >= batch_size) { int assign_idx =
return; bs_idx * pts_num * boxes_num + pt_idx * boxes_num + box_idx;
}
int assign_idx = bs_idx * pts_num * boxes_num + pt_idx * boxes_num + box_idx;
pts_assign[assign_idx] = 0; pts_assign[assign_idx] = 0;
int box_offset = bs_idx * boxes_num * 7 + box_idx * 7; int box_offset = bs_idx * boxes_num * 7 + box_idx * 7;
...@@ -59,6 +58,7 @@ __global__ void assign_pts_to_box3d(int batch_size, int pts_num, int boxes_num, ...@@ -59,6 +58,7 @@ __global__ void assign_pts_to_box3d(int batch_size, int pts_num, int boxes_num,
int cur_in_flag = check_pt_in_box3d(xyz + pt_offset, boxes3d + box_offset, int cur_in_flag = check_pt_in_box3d(xyz + pt_offset, boxes3d + box_offset,
local_x, local_y); local_x, local_y);
pts_assign[assign_idx] = cur_in_flag; pts_assign[assign_idx] = cur_in_flag;
}
} }
__global__ void get_pooled_idx(int batch_size, int pts_num, int boxes_num, __global__ void get_pooled_idx(int batch_size, int pts_num, int boxes_num,
...@@ -69,17 +69,13 @@ __global__ void get_pooled_idx(int batch_size, int pts_num, int boxes_num, ...@@ -69,17 +69,13 @@ __global__ void get_pooled_idx(int batch_size, int pts_num, int boxes_num,
// params pts_assign: (B, N) // params pts_assign: (B, N)
// params pts_idx: (B, M, 512) // params pts_idx: (B, M, 512)
// params pooled_empty_flag: (B, M) // params pooled_empty_flag: (B, M)
CUDA_1D_KERNEL_LOOP(boxes_idx, boxes_num) {
int boxes_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (boxes_idx >= boxes_num) {
return;
}
int bs_idx = blockIdx.y; int bs_idx = blockIdx.y;
int cnt = 0; int cnt = 0;
for (int k = 0; k < pts_num; k++) { for (int k = 0; k < pts_num; k++) {
if (pts_assign[bs_idx * pts_num * boxes_num + k * boxes_num + boxes_idx]) { if (pts_assign[bs_idx * pts_num * boxes_num + k * boxes_num +
boxes_idx]) {
if (cnt < sampled_pts_num) { if (cnt < sampled_pts_num) {
pts_idx[bs_idx * boxes_num * sampled_pts_num + pts_idx[bs_idx * boxes_num * sampled_pts_num +
boxes_idx * sampled_pts_num + cnt] = k; boxes_idx * sampled_pts_num + cnt] = k;
...@@ -100,6 +96,7 @@ __global__ void get_pooled_idx(int batch_size, int pts_num, int boxes_num, ...@@ -100,6 +96,7 @@ __global__ void get_pooled_idx(int batch_size, int pts_num, int boxes_num,
pts_idx[base_offset + k] = pts_idx[base_offset + duplicate_idx]; pts_idx[base_offset + k] = pts_idx[base_offset + duplicate_idx];
} }
} }
}
} }
template <typename T> template <typename T>
...@@ -112,19 +109,11 @@ __global__ void roipoint_pool3d_forward( ...@@ -112,19 +109,11 @@ __global__ void roipoint_pool3d_forward(
// params pts_feature: (B, N, C) // params pts_feature: (B, N, C)
// params pooled_features: (B, M, 512, 3+C) // params pooled_features: (B, M, 512, 3+C)
// params pooled_empty_flag: (B, M) // params pooled_empty_flag: (B, M)
int sample_pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
int box_idx = blockIdx.y; int box_idx = blockIdx.y;
int bs_idx = blockIdx.z; int bs_idx = blockIdx.z;
CUDA_1D_KERNEL_LOOP(sample_pt_idx, sampled_pts_num) {
if (sample_pt_idx >= sampled_pts_num || box_idx >= boxes_num || if (box_idx >= boxes_num || bs_idx >= batch_size) return;
bs_idx >= batch_size) { if (pooled_empty_flag[bs_idx * boxes_num + box_idx]) return;
return;
}
if (pooled_empty_flag[bs_idx * boxes_num + box_idx]) {
return;
}
int temp_idx = bs_idx * boxes_num * sampled_pts_num + int temp_idx = bs_idx * boxes_num * sampled_pts_num +
box_idx * sampled_pts_num + sample_pt_idx; box_idx * sampled_pts_num + sample_pt_idx;
...@@ -139,6 +128,7 @@ __global__ void roipoint_pool3d_forward( ...@@ -139,6 +128,7 @@ __global__ void roipoint_pool3d_forward(
bs_idx * pts_num * feature_in_len + src_pt_idx * feature_in_len; bs_idx * pts_num * feature_in_len + src_pt_idx * feature_in_len;
memcpy(pooled_features + dst_feature_offset + 3, memcpy(pooled_features + dst_feature_offset + 3,
pts_feature + src_feature_offset, feature_in_len * sizeof(T)); pts_feature + src_feature_offset, feature_in_len * sizeof(T));
}
} }
#endif // ROIPOINT_POOL3D_CUDA_KERNEL_CUH #endif // ROIPOINT_POOL3D_CUDA_KERNEL_CUH
...@@ -20,9 +20,8 @@ __global__ void three_interpolate_forward_cuda_kernel( ...@@ -20,9 +20,8 @@ __global__ void three_interpolate_forward_cuda_kernel(
int bs_idx = blockIdx.z; int bs_idx = blockIdx.z;
int c_idx = blockIdx.y; int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(pt_idx, n) {
if (bs_idx >= b || c_idx >= c) return;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
weight += bs_idx * n * 3 + pt_idx * 3; weight += bs_idx * n * 3 + pt_idx * 3;
points += bs_idx * c * m + c_idx * m; points += bs_idx * c * m + c_idx * m;
...@@ -31,6 +30,7 @@ __global__ void three_interpolate_forward_cuda_kernel( ...@@ -31,6 +30,7 @@ __global__ void three_interpolate_forward_cuda_kernel(
out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] + out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] +
weight[2] * points[idx[2]]; weight[2] * points[idx[2]];
}
} }
template <typename T> template <typename T>
...@@ -44,9 +44,8 @@ __global__ void three_interpolate_backward_cuda_kernel( ...@@ -44,9 +44,8 @@ __global__ void three_interpolate_backward_cuda_kernel(
int bs_idx = blockIdx.z; int bs_idx = blockIdx.z;
int c_idx = blockIdx.y; int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(pt_idx, n) {
if (bs_idx >= b || c_idx >= c) return;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
grad_out += bs_idx * c * n + c_idx * n + pt_idx; grad_out += bs_idx * c * n + c_idx * n + pt_idx;
weight += bs_idx * n * 3 + pt_idx * 3; weight += bs_idx * n * 3 + pt_idx * 3;
...@@ -56,6 +55,7 @@ __global__ void three_interpolate_backward_cuda_kernel( ...@@ -56,6 +55,7 @@ __global__ void three_interpolate_backward_cuda_kernel(
atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]); atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]);
atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]); atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]);
atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]); atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]);
}
} }
#endif // THREE_INTERPOLATE_CUDA_KERNEL_CUH #endif // THREE_INTERPOLATE_CUDA_KERNEL_CUH
...@@ -19,8 +19,8 @@ __global__ void three_nn_forward_cuda_kernel(int b, int n, int m, ...@@ -19,8 +19,8 @@ __global__ void three_nn_forward_cuda_kernel(int b, int n, int m,
// idx: (B, N, 3) // idx: (B, N, 3)
int bs_idx = blockIdx.y; int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(pt_idx, n) {
if (bs_idx >= b || pt_idx >= n) return; if (bs_idx >= b) return;
unknown += bs_idx * n * 3 + pt_idx * 3; unknown += bs_idx * n * 3 + pt_idx * 3;
known += bs_idx * m * 3; known += bs_idx * m * 3;
...@@ -61,6 +61,7 @@ __global__ void three_nn_forward_cuda_kernel(int b, int n, int m, ...@@ -61,6 +61,7 @@ __global__ void three_nn_forward_cuda_kernel(int b, int n, int m,
idx[0] = besti1; idx[0] = besti1;
idx[1] = besti2; idx[1] = besti2;
idx[2] = besti3; idx[2] = besti3;
}
} }
#endif // THREE_NN_CUDA_KERNEL_CUH #endif // THREE_NN_CUDA_KERNEL_CUH
...@@ -101,7 +101,7 @@ __global__ void point_to_voxelidx_kernel(const T_int* coor, ...@@ -101,7 +101,7 @@ __global__ void point_to_voxelidx_kernel(const T_int* coor,
CUDA_1D_KERNEL_LOOP(index, num_points) { CUDA_1D_KERNEL_LOOP(index, num_points) {
auto coor_offset = coor + index * NDim; auto coor_offset = coor + index * NDim;
// skip invalid points // skip invalid points
if ((index >= num_points) || (coor_offset[0] == -1)) return; if (coor_offset[0] == -1) return;
int num = 0; int num = 0;
int coor_x = coor_offset[0]; int coor_x = coor_offset[0];
......
...@@ -6,8 +6,6 @@ ...@@ -6,8 +6,6 @@
using namespace at; using namespace at;
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define CHECK_CUDA(x) \ #define CHECK_CUDA(x) \
TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CPU(x) \ #define CHECK_CPU(x) \
......
...@@ -73,8 +73,8 @@ void iou3d_nms_forward(Tensor boxes, Tensor keep, Tensor keep_num, ...@@ -73,8 +73,8 @@ void iou3d_nms_forward(Tensor boxes, Tensor keep, Tensor keep_num,
int64_t *keep_data = keep.data_ptr<int64_t>(); int64_t *keep_data = keep.data_ptr<int64_t>();
int64_t *keep_num_data = keep_num.data_ptr<int64_t>(); int64_t *keep_num_data = keep_num.data_ptr<int64_t>();
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); const int col_blocks =
(boxes_num + THREADS_PER_BLOCK_NMS - 1) / THREADS_PER_BLOCK_NMS;
Tensor mask = Tensor mask =
at::empty({boxes_num, col_blocks}, boxes.options().dtype(at::kLong)); at::empty({boxes_num, col_blocks}, boxes.options().dtype(at::kLong));
unsigned long long *mask_data = unsigned long long *mask_data =
......
...@@ -13,7 +13,7 @@ void AssignScoreWithKForwardCUDAKernelLauncher( ...@@ -13,7 +13,7 @@ void AssignScoreWithKForwardCUDAKernelLauncher(
at::cuda::CUDAGuard device_guard(points.device()); at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream(); cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(DIVUP(B * O * N1 * K, THREADS_PER_BLOCK)); dim3 blocks(GET_BLOCKS(B * O * N1 * K, THREADS_PER_BLOCK));
dim3 threads(THREADS_PER_BLOCK); dim3 threads(THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
...@@ -36,9 +36,9 @@ void AssignScoreWithKBackwardCUDAKernelLauncher( ...@@ -36,9 +36,9 @@ void AssignScoreWithKBackwardCUDAKernelLauncher(
at::cuda::CUDAGuard device_guard(grad_out.device()); at::cuda::CUDAGuard device_guard(grad_out.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream(); cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks1(DIVUP(B * M * O, THREADS_PER_BLOCK)); dim3 blocks1(GET_BLOCKS(B * M * O, THREADS_PER_BLOCK));
dim3 threads1(THREADS_PER_BLOCK); dim3 threads1(THREADS_PER_BLOCK);
dim3 blocks2(DIVUP(B * N1 * K * M, THREADS_PER_BLOCK)); dim3 blocks2(GET_BLOCKS(B * N1 * K * M, THREADS_PER_BLOCK));
dim3 threads2(THREADS_PER_BLOCK); dim3 threads2(THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
......
...@@ -22,7 +22,7 @@ void BallQueryForwardCUDAKernelLauncher(int b, int n, int m, float min_radius, ...@@ -22,7 +22,7 @@ void BallQueryForwardCUDAKernelLauncher(int b, int n, int m, float min_radius,
cudaStream_t stream = at::cuda::getCurrentCUDAStream(); cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// blockIdx.x(col), blockIdx.y(row) // blockIdx.x(col), blockIdx.y(row)
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); dim3 blocks(GET_BLOCKS(m, THREADS_PER_BLOCK), b);
dim3 threads(THREADS_PER_BLOCK); dim3 threads(THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
......
...@@ -16,7 +16,7 @@ void GatherPointsForwardCUDAKernelLauncher(int b, int c, int n, int npoints, ...@@ -16,7 +16,7 @@ void GatherPointsForwardCUDAKernelLauncher(int b, int c, int n, int npoints,
cudaStream_t stream = at::cuda::getCurrentCUDAStream(); cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// blockIdx.x(col), blockIdx.y(row) // blockIdx.x(col), blockIdx.y(row)
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); dim3 blocks(GET_BLOCKS(npoints, THREADS_PER_BLOCK), c, b);
dim3 threads(THREADS_PER_BLOCK); dim3 threads(THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
...@@ -43,7 +43,7 @@ void GatherPointsBackwardCUDAKernelLauncher(int b, int c, int n, int npoints, ...@@ -43,7 +43,7 @@ void GatherPointsBackwardCUDAKernelLauncher(int b, int c, int n, int npoints,
cudaStream_t stream = at::cuda::getCurrentCUDAStream(); cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// blockIdx.x(col), blockIdx.y(row) // blockIdx.x(col), blockIdx.y(row)
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); dim3 blocks(GET_BLOCKS(npoints, THREADS_PER_BLOCK), c, b);
dim3 threads(THREADS_PER_BLOCK); dim3 threads(THREADS_PER_BLOCK);
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment