sampling.cpp 1.82 KB
Newer Older
1
2
3
4
5
#include <torch/serialize/tensor.h>
#include <ATen/cuda/CUDAContext.h>
#include <vector>
#include "sampling_gpu.h"

6
7
8
9
10
11
12
13
14
15
16
17
18
#define CHECK_CUDA(x) do { \
  if (!x.type().is_cuda()) { \
    fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \
    exit(-1); \
  } \
} while (0)
#define CHECK_CONTIGUOUS(x) do { \
  if (!x.is_contiguous()) { \
    fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \
    exit(-1); \
  } \
} while (0)
#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
19
20


21
int farthest_point_sampling_wrapper(int b, int n, int m,
22
23
    at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) {

24
25
26
27
    CHECK_INPUT(points_tensor);
    CHECK_INPUT(temp_tensor);
    CHECK_INPUT(idx_tensor);

28
29
30
31
    const float *points = points_tensor.data<float>();
    float *temp = temp_tensor.data<float>();
    int *idx = idx_tensor.data<int>();

32
    farthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx);
33
34
    return 1;
}
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57


int stack_farthest_point_sampling_wrapper(at::Tensor points_tensor,
  at::Tensor temp_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor,
  at::Tensor num_sampled_points_tensor) {

    CHECK_INPUT(points_tensor);
    CHECK_INPUT(temp_tensor);
    CHECK_INPUT(idx_tensor);
    CHECK_INPUT(xyz_batch_cnt_tensor);
    CHECK_INPUT(num_sampled_points_tensor);

    int batch_size = xyz_batch_cnt_tensor.size(0);
    int N = points_tensor.size(0);
    const float *points = points_tensor.data<float>();
    float *temp = temp_tensor.data<float>();
    int *xyz_batch_cnt = xyz_batch_cnt_tensor.data<int>();
    int *idx = idx_tensor.data<int>();
    int *num_sampled_points = num_sampled_points_tensor.data<int>();

    stack_farthest_point_sampling_kernel_launcher(N, batch_size, points, temp, xyz_batch_cnt, idx, num_sampled_points);
    return 1;
}