group_points_cuda.cu 3.68 KB
Newer Older
wuyuefeng's avatar
wuyuefeng committed
1
2
3
4
#include <stdio.h>
#include <stdlib.h>

#define THREADS_PER_BLOCK 256
zhangwenwei's avatar
zhangwenwei committed
5
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
wuyuefeng's avatar
wuyuefeng committed
6

zhangwenwei's avatar
zhangwenwei committed
7
8
9
10
11
12
13
14
15
16
17
18
19
20
__global__ void group_points_grad_kernel(int b, int c, int n, int npoints,
                                         int nsample,
                                         const float *__restrict__ grad_out,
                                         const int *__restrict__ idx,
                                         float *__restrict__ grad_points) {
  // grad_out: (B, C, npoints, nsample)
  // idx: (B, npoints, nsample)
  // output:
  //      grad_points: (B, C, N)
  int bs_idx = blockIdx.z;
  int c_idx = blockIdx.y;
  int index = blockIdx.x * blockDim.x + threadIdx.x;
  int pt_idx = index / nsample;
  if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
wuyuefeng's avatar
wuyuefeng committed
21

zhangwenwei's avatar
zhangwenwei committed
22
23
24
25
  int sample_idx = index % nsample;
  grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample +
              pt_idx * nsample + sample_idx;
  idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
wuyuefeng's avatar
wuyuefeng committed
26

zhangwenwei's avatar
zhangwenwei committed
27
  atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0], grad_out[0]);
wuyuefeng's avatar
wuyuefeng committed
28
29
}

zhangwenwei's avatar
zhangwenwei committed
30
31
32
33
34
35
36
37
38
39
40
41
void group_points_grad_kernel_launcher(int b, int c, int n, int npoints,
                                       int nsample, const float *grad_out,
                                       const int *idx, float *grad_points,
                                       cudaStream_t stream) {
  // grad_out: (B, C, npoints, nsample)
  // idx: (B, npoints, nsample)
  // output:
  //      grad_points: (B, C, N)
  cudaError_t err;
  dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c,
              b);  // blockIdx.x(col), blockIdx.y(row)
  dim3 threads(THREADS_PER_BLOCK);
wuyuefeng's avatar
wuyuefeng committed
42

zhangwenwei's avatar
zhangwenwei committed
43
44
  group_points_grad_kernel<<<blocks, threads, 0, stream>>>(
      b, c, n, npoints, nsample, grad_out, idx, grad_points);
wuyuefeng's avatar
wuyuefeng committed
45

zhangwenwei's avatar
zhangwenwei committed
46
47
48
49
50
  err = cudaGetLastError();
  if (cudaSuccess != err) {
    fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
    exit(-1);
  }
wuyuefeng's avatar
wuyuefeng committed
51
52
}

zhangwenwei's avatar
zhangwenwei committed
53
54
55
56
57
58
59
60
61
62
63
64
65
66
__global__ void group_points_kernel(int b, int c, int n, int npoints,
                                    int nsample,
                                    const float *__restrict__ points,
                                    const int *__restrict__ idx,
                                    float *__restrict__ out) {
  // points: (B, C, N)
  // idx: (B, npoints, nsample)
  // output:
  //      out: (B, C, npoints, nsample)
  int bs_idx = blockIdx.z;
  int c_idx = blockIdx.y;
  int index = blockIdx.x * blockDim.x + threadIdx.x;
  int pt_idx = index / nsample;
  if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
wuyuefeng's avatar
wuyuefeng committed
67

zhangwenwei's avatar
zhangwenwei committed
68
  int sample_idx = index % nsample;
wuyuefeng's avatar
wuyuefeng committed
69

zhangwenwei's avatar
zhangwenwei committed
70
71
72
73
  idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
  int in_idx = bs_idx * c * n + c_idx * n + idx[0];
  int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample +
                pt_idx * nsample + sample_idx;
wuyuefeng's avatar
wuyuefeng committed
74

zhangwenwei's avatar
zhangwenwei committed
75
  out[out_idx] = points[in_idx];
wuyuefeng's avatar
wuyuefeng committed
76
77
78
}

void group_points_kernel_launcher(int b, int c, int n, int npoints, int nsample,
zhangwenwei's avatar
zhangwenwei committed
79
80
81
82
83
84
85
86
87
88
                                  const float *points, const int *idx,
                                  float *out, cudaStream_t stream) {
  // points: (B, C, N)
  // idx: (B, npoints, nsample)
  // output:
  //      out: (B, C, npoints, nsample)
  cudaError_t err;
  dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c,
              b);  // blockIdx.x(col), blockIdx.y(row)
  dim3 threads(THREADS_PER_BLOCK);
wuyuefeng's avatar
wuyuefeng committed
89

zhangwenwei's avatar
zhangwenwei committed
90
91
92
93
94
95
96
97
  group_points_kernel<<<blocks, threads, 0, stream>>>(b, c, n, npoints, nsample,
                                                      points, idx, out);
  // cudaDeviceSynchronize();  // for using printf in kernel function
  err = cudaGetLastError();
  if (cudaSuccess != err) {
    fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
    exit(-1);
  }
wuyuefeng's avatar
wuyuefeng committed
98
}