Unverified Commit 66e70ae4 authored by Matthias Fey's avatar Matthias Fey Committed by GitHub
Browse files

cmake fixes (#212)

parent 3c64bb0d
#pragma once
#include <torch/library.h>
#include <torch/extension.h>
#ifdef _WIN32
#if defined(torchsparse_EXPORTS)
......@@ -12,61 +12,79 @@
#define SPARSE_API
#endif
#if (defined __cpp_inline_variables) || __cplusplus >= 201703L
#define SPARSE_INLINE_VARIABLE inline
#else
#ifdef _MSC_VER
#define SPARSE_INLINE_VARIABLE __declspec(selectany)
#else
#define SPARSE_INLINE_VARIABLE __attribute__((weak))
#endif
#endif
SPARSE_API int64_t cuda_version();
namespace sparse {
SPARSE_API int64_t cuda_version() noexcept;
namespace detail {
SPARSE_INLINE_VARIABLE int64_t _cuda_version = cuda_version();
} // namespace detail
} // namespace sparse
SPARSE_API torch::Tensor ind2ptr(torch::Tensor ind, int64_t M);
SPARSE_API torch::Tensor ptr2ind(torch::Tensor ptr, int64_t E);
SPARSE_API torch::Tensor partition(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value,
int64_t num_parts, bool recursive);
SPARSE_API torch::Tensor partition2(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value,
torch::optional<torch::Tensor> optional_node_weight,
int64_t num_parts, bool recursive);
SPARSE_API torch::Tensor mt_partition(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value,
torch::optional<torch::Tensor> optional_node_weight,
int64_t num_parts, bool recursive,
int64_t num_workers);
SPARSE_API torch::Tensor
partition(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value, int64_t num_parts,
bool recursive);
SPARSE_API torch::Tensor
partition2(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value,
torch::optional<torch::Tensor> optional_node_weight,
int64_t num_parts, bool recursive);
SPARSE_API torch::Tensor
mt_partition(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value,
torch::optional<torch::Tensor> optional_node_weight,
int64_t num_parts, bool recursive, int64_t num_workers);
SPARSE_API std::tuple<torch::Tensor, torch::Tensor> relabel(torch::Tensor col,
torch::Tensor idx);
torch::Tensor idx);
SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>,
torch::Tensor>
SPARSE_API std::tuple<torch::Tensor, torch::Tensor,
torch::optional<torch::Tensor>, torch::Tensor>
relabel_one_hop(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value,
torch::Tensor idx, bool bipartite);
SPARSE_API torch::Tensor random_walk(torch::Tensor rowptr, torch::Tensor col,
torch::Tensor start, int64_t walk_length);
torch::Tensor start, int64_t walk_length);
SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
subgraph(torch::Tensor idx, torch::Tensor rowptr, torch::Tensor row,
torch::Tensor col);
SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
SPARSE_API
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
sample_adj(torch::Tensor rowptr, torch::Tensor col, torch::Tensor idx,
int64_t num_neighbors, bool replace);
SPARSE_API torch::Tensor spmm_sum(torch::optional<torch::Tensor> opt_row,
torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> opt_value,
torch::optional<torch::Tensor> opt_colptr,
torch::optional<torch::Tensor> opt_csr2csc,
torch::Tensor mat);
torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> opt_value,
torch::optional<torch::Tensor> opt_colptr,
torch::optional<torch::Tensor> opt_csr2csc,
torch::Tensor mat);
SPARSE_API torch::Tensor spmm_mean(torch::optional<torch::Tensor> opt_row,
torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> opt_value,
torch::optional<torch::Tensor> opt_rowcount,
torch::optional<torch::Tensor> opt_colptr,
torch::optional<torch::Tensor> opt_csr2csc,
torch::Tensor mat);
torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> opt_value,
torch::optional<torch::Tensor> opt_rowcount,
torch::optional<torch::Tensor> opt_colptr,
torch::optional<torch::Tensor> opt_csr2csc,
torch::Tensor mat);
SPARSE_API std::tuple<torch::Tensor, torch::Tensor>
spmm_min(torch::Tensor rowptr, torch::Tensor col,
......@@ -76,8 +94,9 @@ SPARSE_API std::tuple<torch::Tensor, torch::Tensor>
spmm_max(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> opt_value, torch::Tensor mat);
SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>>
SPARSE_API
std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>>
spspmm_sum(torch::Tensor rowptrA, torch::Tensor colA,
torch::optional<torch::Tensor> optional_valueA,
torch::Tensor rowptrB, torch::Tensor colB,
torch::optional<torch::Tensor> optional_valueB, int64_t K);
torch::optional<torch::Tensor> optional_valueB, int64_t K);
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "sparse.h"
#include <torch/script.h>
#ifdef WITH_CUDA
#include <cuda.h>
......@@ -18,13 +18,15 @@ PyMODINIT_FUNC PyInit__version_cpu(void) { return NULL; }
#endif
#endif
SPARSE_API int64_t cuda_version() {
namespace sparse {
SPARSE_API int64_t cuda_version() noexcept {
#ifdef WITH_CUDA
return CUDA_VERSION;
#else
return -1;
#endif
}
} // namespace sparse
static auto registry =
torch::RegisterOperators().op("torch_sparse::cuda_version", &cuda_version);
static auto registry = torch::RegisterOperators().op(
"torch_sparse::cuda_version", &sparse::cuda_version);
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment