"src/turbomind/vscode:/vscode.git/clone" did not exist on "b8354dae03e3b942ddef98c3447545f78b84f902"
spspmm.cpp 1.12 KB
Newer Older
quyuanhao123's avatar
quyuanhao123 committed
1
2
3
4
5
6
7
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>

#include "cpu/spspmm_cpu.h"

limm's avatar
limm committed
8
9
#ifdef WITH_CUDA
#include "cuda/spspmm_cuda.h"
quyuanhao123's avatar
quyuanhao123 committed
10
11
12
13
#endif

#ifdef _WIN32
#ifdef WITH_PYTHON
limm's avatar
limm committed
14
#ifdef WITH_CUDA
quyuanhao123's avatar
quyuanhao123 committed
15
16
17
18
19
20
21
22
23
24
25
26
27
PyMODINIT_FUNC PyInit__spspmm_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__spspmm_cpu(void) { return NULL; }
#endif
#endif
#endif

SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>>
spspmm_sum(torch::Tensor rowptrA, torch::Tensor colA,
           torch::optional<torch::Tensor> optional_valueA,
           torch::Tensor rowptrB, torch::Tensor colB,
           torch::optional<torch::Tensor> optional_valueB, int64_t K) {
  if (rowptrA.device().is_cuda()) {
limm's avatar
limm committed
28
#ifdef WITH_CUDA
quyuanhao123's avatar
quyuanhao123 committed
29
30
31
32
33
34
35
36
37
38
39
40
41
    return spspmm_cuda(rowptrA, colA, optional_valueA, rowptrB, colB,
                       optional_valueB, K, "sum");
#else
    AT_ERROR("Not compiled with CUDA support");
#endif
  } else {
    return spspmm_cpu(rowptrA, colA, optional_valueA, rowptrB, colB,
                      optional_valueB, K, "sum");
  }
}

static auto registry =
    torch::RegisterOperators().op("torch_sparse::spspmm_sum", &spspmm_sum);