graclus.cpp 835 Bytes
Newer Older
limm's avatar
limm committed
1
#ifdef WITH_PYTHON
quyuanhao123's avatar
quyuanhao123 committed
2
#include <Python.h>
limm's avatar
limm committed
3
#endif
quyuanhao123's avatar
quyuanhao123 committed
4
5
6
7
#include <torch/script.h>

#include "cpu/graclus_cpu.h"

yangzhong's avatar
yangzhong committed
8
9
#ifdef WITH_CUDA
#include "cuda/graclus_cuda.h"
quyuanhao123's avatar
quyuanhao123 committed
10
11
12
#endif

#ifdef _WIN32
limm's avatar
limm committed
13
#ifdef WITH_PYTHON
yangzhong's avatar
yangzhong committed
14
#ifdef WITH_CUDA
quyuanhao123's avatar
quyuanhao123 committed
15
16
17
18
19
PyMODINIT_FUNC PyInit__graclus_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__graclus_cpu(void) { return NULL; }
#endif
#endif
limm's avatar
limm committed
20
#endif
quyuanhao123's avatar
quyuanhao123 committed
21

limm's avatar
limm committed
22
CLUSTER_API torch::Tensor graclus(torch::Tensor rowptr, torch::Tensor col,
quyuanhao123's avatar
quyuanhao123 committed
23
24
                      torch::optional<torch::Tensor> optional_weight) {
  if (rowptr.device().is_cuda()) {
yangzhong's avatar
yangzhong committed
25
#ifdef WITH_CUDA
quyuanhao123's avatar
quyuanhao123 committed
26
27
28
29
30
31
32
33
34
35
36
    return graclus_cuda(rowptr, col, optional_weight);
#else
    AT_ERROR("Not compiled with CUDA support");
#endif
  } else {
    return graclus_cpu(rowptr, col, optional_weight);
  }
}

static auto registry =
    torch::RegisterOperators().op("torch_cluster::graclus", &graclus);