Commit 1cb25232 authored by limm's avatar limm
Browse files

push 0.6.15 version

parent e8309f27
#pragma once
#ifdef _WIN32
#if defined(torchsparse_EXPORTS)
#define SPARSE_API __declspec(dllexport)
#else
#define SPARSE_API __declspec(dllimport)
#endif
#else
#define SPARSE_API
#endif
#if (defined __cpp_inline_variables) || __cplusplus >= 201703L
#define SPARSE_INLINE_VARIABLE inline
#else
#ifdef _MSC_VER
#define SPARSE_INLINE_VARIABLE __declspec(selectany)
#else
#define SPARSE_INLINE_VARIABLE __attribute__((weak))
#endif
#endif
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#ifdef _WIN32 #ifdef _WIN32
#ifdef WITH_PYTHON #ifdef WITH_PYTHON
#ifdef WITH_HIP #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__metis_cuda(void) { return NULL; } PyMODINIT_FUNC PyInit__metis_cuda(void) { return NULL; }
#else #else
PyMODINIT_FUNC PyInit__metis_cpu(void) { return NULL; } PyMODINIT_FUNC PyInit__metis_cpu(void) { return NULL; }
...@@ -19,7 +19,7 @@ SPARSE_API torch::Tensor partition(torch::Tensor rowptr, torch::Tensor col, ...@@ -19,7 +19,7 @@ SPARSE_API torch::Tensor partition(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value, torch::optional<torch::Tensor> optional_value,
int64_t num_parts, bool recursive) { int64_t num_parts, bool recursive) {
if (rowptr.device().is_cuda()) { if (rowptr.device().is_cuda()) {
#ifdef WITH_HIP #ifdef WITH_CUDA
AT_ERROR("No CUDA version supported"); AT_ERROR("No CUDA version supported");
#else #else
AT_ERROR("Not compiled with CUDA support"); AT_ERROR("Not compiled with CUDA support");
...@@ -35,7 +35,7 @@ SPARSE_API torch::Tensor partition2(torch::Tensor rowptr, torch::Tensor col, ...@@ -35,7 +35,7 @@ SPARSE_API torch::Tensor partition2(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_node_weight, torch::optional<torch::Tensor> optional_node_weight,
int64_t num_parts, bool recursive) { int64_t num_parts, bool recursive) {
if (rowptr.device().is_cuda()) { if (rowptr.device().is_cuda()) {
#ifdef WITH_HIP #ifdef WITH_CUDA
AT_ERROR("No CUDA version supported"); AT_ERROR("No CUDA version supported");
#else #else
AT_ERROR("Not compiled with CUDA support"); AT_ERROR("Not compiled with CUDA support");
...@@ -52,7 +52,7 @@ SPARSE_API torch::Tensor mt_partition(torch::Tensor rowptr, torch::Tensor col, ...@@ -52,7 +52,7 @@ SPARSE_API torch::Tensor mt_partition(torch::Tensor rowptr, torch::Tensor col,
int64_t num_parts, bool recursive, int64_t num_parts, bool recursive,
int64_t num_workers) { int64_t num_workers) {
if (rowptr.device().is_cuda()) { if (rowptr.device().is_cuda()) {
#ifdef WITH_HIP #ifdef WITH_CUDA
AT_ERROR("No CUDA version supported"); AT_ERROR("No CUDA version supported");
#else #else
AT_ERROR("Not compiled with CUDA support"); AT_ERROR("Not compiled with CUDA support");
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#ifdef _WIN32 #ifdef _WIN32
#ifdef WITH_PYTHON #ifdef WITH_PYTHON
#ifdef WITH_HIP #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__neighbor_sample_cuda(void) { return NULL; } PyMODINIT_FUNC PyInit__neighbor_sample_cuda(void) { return NULL; }
#else #else
PyMODINIT_FUNC PyInit__neighbor_sample_cpu(void) { return NULL; } PyMODINIT_FUNC PyInit__neighbor_sample_cpu(void) { return NULL; }
...@@ -16,7 +16,8 @@ PyMODINIT_FUNC PyInit__neighbor_sample_cpu(void) { return NULL; } ...@@ -16,7 +16,8 @@ PyMODINIT_FUNC PyInit__neighbor_sample_cpu(void) { return NULL; }
#endif #endif
// Returns 'output_node', 'row', 'col', 'output_edge' // Returns 'output_node', 'row', 'col', 'output_edge'
SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> SPARSE_API
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
neighbor_sample(const torch::Tensor &colptr, const torch::Tensor &row, neighbor_sample(const torch::Tensor &colptr, const torch::Tensor &row,
const torch::Tensor &input_node, const torch::Tensor &input_node,
const std::vector<int64_t> num_neighbors, const bool replace, const std::vector<int64_t> num_neighbors, const bool replace,
...@@ -25,7 +26,8 @@ neighbor_sample(const torch::Tensor &colptr, const torch::Tensor &row, ...@@ -25,7 +26,8 @@ neighbor_sample(const torch::Tensor &colptr, const torch::Tensor &row,
directed); directed);
} }
SPARSE_API std::tuple<c10::Dict<node_t, torch::Tensor>, c10::Dict<rel_t, torch::Tensor>, SPARSE_API
std::tuple<c10::Dict<node_t, torch::Tensor>, c10::Dict<rel_t, torch::Tensor>,
c10::Dict<rel_t, torch::Tensor>, c10::Dict<rel_t, torch::Tensor>> c10::Dict<rel_t, torch::Tensor>, c10::Dict<rel_t, torch::Tensor>>
hetero_neighbor_sample( hetero_neighbor_sample(
const std::vector<node_t> &node_types, const std::vector<node_t> &node_types,
...@@ -40,7 +42,25 @@ hetero_neighbor_sample( ...@@ -40,7 +42,25 @@ hetero_neighbor_sample(
num_neighbors_dict, num_hops, replace, directed); num_neighbors_dict, num_hops, replace, directed);
} }
std::tuple<c10::Dict<node_t, torch::Tensor>, c10::Dict<rel_t, torch::Tensor>,
c10::Dict<rel_t, torch::Tensor>, c10::Dict<rel_t, torch::Tensor>>
hetero_temporal_neighbor_sample(
const std::vector<node_t> &node_types,
const std::vector<edge_t> &edge_types,
const c10::Dict<rel_t, torch::Tensor> &colptr_dict,
const c10::Dict<rel_t, torch::Tensor> &row_dict,
const c10::Dict<node_t, torch::Tensor> &input_node_dict,
const c10::Dict<rel_t, std::vector<int64_t>> &num_neighbors_dict,
const c10::Dict<node_t, torch::Tensor> &node_time_dict,
const int64_t num_hops, const bool replace, const bool directed) {
return hetero_temporal_neighbor_sample_cpu(
node_types, edge_types, colptr_dict, row_dict, input_node_dict,
num_neighbors_dict, node_time_dict, num_hops, replace, directed);
}
static auto registry = static auto registry =
torch::RegisterOperators() torch::RegisterOperators()
.op("torch_sparse::neighbor_sample", &neighbor_sample) .op("torch_sparse::neighbor_sample", &neighbor_sample)
.op("torch_sparse::hetero_neighbor_sample", &hetero_neighbor_sample); .op("torch_sparse::hetero_neighbor_sample", &hetero_neighbor_sample)
.op("torch_sparse::hetero_temporal_neighbor_sample",
&hetero_temporal_neighbor_sample);
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#ifdef _WIN32 #ifdef _WIN32
#ifdef WITH_PYTHON #ifdef WITH_PYTHON
#ifdef WITH_HIP #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__relabel_cuda(void) { return NULL; } PyMODINIT_FUNC PyInit__relabel_cuda(void) { return NULL; }
#else #else
PyMODINIT_FUNC PyInit__relabel_cpu(void) { return NULL; } PyMODINIT_FUNC PyInit__relabel_cpu(void) { return NULL; }
...@@ -18,7 +18,7 @@ PyMODINIT_FUNC PyInit__relabel_cpu(void) { return NULL; } ...@@ -18,7 +18,7 @@ PyMODINIT_FUNC PyInit__relabel_cpu(void) { return NULL; }
SPARSE_API std::tuple<torch::Tensor, torch::Tensor> relabel(torch::Tensor col, SPARSE_API std::tuple<torch::Tensor, torch::Tensor> relabel(torch::Tensor col,
torch::Tensor idx) { torch::Tensor idx) {
if (col.device().is_cuda()) { if (col.device().is_cuda()) {
#ifdef WITH_HIP #ifdef WITH_CUDA
AT_ERROR("No CUDA version supported"); AT_ERROR("No CUDA version supported");
#else #else
AT_ERROR("Not compiled with CUDA support"); AT_ERROR("Not compiled with CUDA support");
...@@ -34,7 +34,7 @@ relabel_one_hop(torch::Tensor rowptr, torch::Tensor col, ...@@ -34,7 +34,7 @@ relabel_one_hop(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value, torch::optional<torch::Tensor> optional_value,
torch::Tensor idx, bool bipartite) { torch::Tensor idx, bool bipartite) {
if (rowptr.device().is_cuda()) { if (rowptr.device().is_cuda()) {
#ifdef WITH_HIP #ifdef WITH_CUDA
AT_ERROR("No CUDA version supported"); AT_ERROR("No CUDA version supported");
#else #else
AT_ERROR("Not compiled with CUDA support"); AT_ERROR("Not compiled with CUDA support");
......
...@@ -5,13 +5,13 @@ ...@@ -5,13 +5,13 @@
#include "cpu/rw_cpu.h" #include "cpu/rw_cpu.h"
#ifdef WITH_HIP #ifdef WITH_CUDA
#include "hip/rw_hip.h" #include "cuda/rw_cuda.h"
#endif #endif
#ifdef _WIN32 #ifdef _WIN32
#ifdef WITH_PYTHON #ifdef WITH_PYTHON
#ifdef WITH_HIP #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__rw_cuda(void) { return NULL; } PyMODINIT_FUNC PyInit__rw_cuda(void) { return NULL; }
#else #else
PyMODINIT_FUNC PyInit__rw_cpu(void) { return NULL; } PyMODINIT_FUNC PyInit__rw_cpu(void) { return NULL; }
...@@ -22,7 +22,7 @@ PyMODINIT_FUNC PyInit__rw_cpu(void) { return NULL; } ...@@ -22,7 +22,7 @@ PyMODINIT_FUNC PyInit__rw_cpu(void) { return NULL; }
SPARSE_API torch::Tensor random_walk(torch::Tensor rowptr, torch::Tensor col, SPARSE_API torch::Tensor random_walk(torch::Tensor rowptr, torch::Tensor col,
torch::Tensor start, int64_t walk_length) { torch::Tensor start, int64_t walk_length) {
if (rowptr.device().is_cuda()) { if (rowptr.device().is_cuda()) {
#ifdef WITH_HIP #ifdef WITH_CUDA
return random_walk_cuda(rowptr, col, start, walk_length); return random_walk_cuda(rowptr, col, start, walk_length);
#else #else
AT_ERROR("Not compiled with CUDA support"); AT_ERROR("Not compiled with CUDA support");
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#ifdef _WIN32 #ifdef _WIN32
#ifdef WITH_PYTHON #ifdef WITH_PYTHON
#ifdef WITH_HIP #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__saint_cuda(void) { return NULL; } PyMODINIT_FUNC PyInit__saint_cuda(void) { return NULL; }
#else #else
PyMODINIT_FUNC PyInit__saint_cpu(void) { return NULL; } PyMODINIT_FUNC PyInit__saint_cpu(void) { return NULL; }
...@@ -19,7 +19,7 @@ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> ...@@ -19,7 +19,7 @@ std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
subgraph(torch::Tensor idx, torch::Tensor rowptr, torch::Tensor row, subgraph(torch::Tensor idx, torch::Tensor rowptr, torch::Tensor row,
torch::Tensor col) { torch::Tensor col) {
if (idx.device().is_cuda()) { if (idx.device().is_cuda()) {
#ifdef WITH_HIP #ifdef WITH_CUDA
AT_ERROR("No CUDA version supported"); AT_ERROR("No CUDA version supported");
#else #else
AT_ERROR("Not compiled with CUDA support"); AT_ERROR("Not compiled with CUDA support");
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#ifdef _WIN32 #ifdef _WIN32
#ifdef WITH_PYTHON #ifdef WITH_PYTHON
#ifdef WITH_HIP #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__sample_cuda(void) { return NULL; } PyMODINIT_FUNC PyInit__sample_cuda(void) { return NULL; }
#else #else
PyMODINIT_FUNC PyInit__sample_cpu(void) { return NULL; } PyMODINIT_FUNC PyInit__sample_cpu(void) { return NULL; }
...@@ -19,7 +19,7 @@ SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor ...@@ -19,7 +19,7 @@ SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor
sample_adj(torch::Tensor rowptr, torch::Tensor col, torch::Tensor idx, sample_adj(torch::Tensor rowptr, torch::Tensor col, torch::Tensor idx,
int64_t num_neighbors, bool replace) { int64_t num_neighbors, bool replace) {
if (rowptr.device().is_cuda()) { if (rowptr.device().is_cuda()) {
#ifdef WITH_HIP #ifdef WITH_CUDA
AT_ERROR("No CUDA version supported"); AT_ERROR("No CUDA version supported");
#else #else
AT_ERROR("Not compiled with CUDA support"); AT_ERROR("Not compiled with CUDA support");
......
#pragma once #pragma once
#include <torch/library.h> #include "extensions.h"
#include "macros.h"
#ifdef _WIN32 namespace sparse {
#if defined(torchsparse_EXPORTS) SPARSE_API int64_t cuda_version() noexcept;
#define SPARSE_API __declspec(dllexport)
#else
#define SPARSE_API __declspec(dllimport)
#endif
#else
#define SPARSE_API
#endif
namespace detail {
SPARSE_API int64_t cuda_version(); SPARSE_INLINE_VARIABLE int64_t _cuda_version = cuda_version();
} // namespace detail
} // namespace sparse
SPARSE_API torch::Tensor ind2ptr(torch::Tensor ind, int64_t M); SPARSE_API torch::Tensor ind2ptr(torch::Tensor ind, int64_t M);
SPARSE_API torch::Tensor ptr2ind(torch::Tensor ptr, int64_t E); SPARSE_API torch::Tensor ptr2ind(torch::Tensor ptr, int64_t E);
SPARSE_API torch::Tensor partition(torch::Tensor rowptr, torch::Tensor col, SPARSE_API torch::Tensor
torch::optional<torch::Tensor> optional_value, partition(torch::Tensor rowptr, torch::Tensor col,
int64_t num_parts, bool recursive); torch::optional<torch::Tensor> optional_value, int64_t num_parts,
bool recursive);
SPARSE_API torch::Tensor partition2(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value, SPARSE_API torch::Tensor
torch::optional<torch::Tensor> optional_node_weight, partition2(torch::Tensor rowptr, torch::Tensor col,
int64_t num_parts, bool recursive); torch::optional<torch::Tensor> optional_value,
torch::optional<torch::Tensor> optional_node_weight,
SPARSE_API torch::Tensor mt_partition(torch::Tensor rowptr, torch::Tensor col, int64_t num_parts, bool recursive);
torch::optional<torch::Tensor> optional_value,
torch::optional<torch::Tensor> optional_node_weight, SPARSE_API torch::Tensor
int64_t num_parts, bool recursive, mt_partition(torch::Tensor rowptr, torch::Tensor col,
int64_t num_workers); torch::optional<torch::Tensor> optional_value,
torch::optional<torch::Tensor> optional_node_weight,
int64_t num_parts, bool recursive, int64_t num_workers);
SPARSE_API std::tuple<torch::Tensor, torch::Tensor> relabel(torch::Tensor col, SPARSE_API std::tuple<torch::Tensor, torch::Tensor> relabel(torch::Tensor col,
torch::Tensor idx); torch::Tensor idx);
SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>, SPARSE_API std::tuple<torch::Tensor, torch::Tensor,
torch::Tensor> torch::optional<torch::Tensor>, torch::Tensor>
relabel_one_hop(torch::Tensor rowptr, torch::Tensor col, relabel_one_hop(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value, torch::optional<torch::Tensor> optional_value,
torch::Tensor idx, bool bipartite); torch::Tensor idx, bool bipartite);
SPARSE_API torch::Tensor random_walk(torch::Tensor rowptr, torch::Tensor col, SPARSE_API torch::Tensor random_walk(torch::Tensor rowptr, torch::Tensor col,
torch::Tensor start, int64_t walk_length); torch::Tensor start, int64_t walk_length);
SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
subgraph(torch::Tensor idx, torch::Tensor rowptr, torch::Tensor row, subgraph(torch::Tensor idx, torch::Tensor rowptr, torch::Tensor row,
torch::Tensor col); torch::Tensor col);
SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> SPARSE_API
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
sample_adj(torch::Tensor rowptr, torch::Tensor col, torch::Tensor idx, sample_adj(torch::Tensor rowptr, torch::Tensor col, torch::Tensor idx,
int64_t num_neighbors, bool replace); int64_t num_neighbors, bool replace);
SPARSE_API torch::Tensor spmm_sum(torch::optional<torch::Tensor> opt_row, SPARSE_API torch::Tensor spmm_sum(torch::optional<torch::Tensor> opt_row,
torch::Tensor rowptr, torch::Tensor col, torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> opt_value, torch::optional<torch::Tensor> opt_value,
torch::optional<torch::Tensor> opt_colptr, torch::optional<torch::Tensor> opt_colptr,
torch::optional<torch::Tensor> opt_csr2csc, torch::optional<torch::Tensor> opt_csr2csc,
torch::Tensor mat); torch::Tensor mat);
SPARSE_API torch::Tensor spmm_mean(torch::optional<torch::Tensor> opt_row, SPARSE_API torch::Tensor spmm_mean(torch::optional<torch::Tensor> opt_row,
torch::Tensor rowptr, torch::Tensor col, torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> opt_value, torch::optional<torch::Tensor> opt_value,
torch::optional<torch::Tensor> opt_rowcount, torch::optional<torch::Tensor> opt_rowcount,
torch::optional<torch::Tensor> opt_colptr, torch::optional<torch::Tensor> opt_colptr,
torch::optional<torch::Tensor> opt_csr2csc, torch::optional<torch::Tensor> opt_csr2csc,
torch::Tensor mat); torch::Tensor mat);
SPARSE_API std::tuple<torch::Tensor, torch::Tensor> SPARSE_API std::tuple<torch::Tensor, torch::Tensor>
spmm_min(torch::Tensor rowptr, torch::Tensor col, spmm_min(torch::Tensor rowptr, torch::Tensor col,
...@@ -76,8 +75,9 @@ SPARSE_API std::tuple<torch::Tensor, torch::Tensor> ...@@ -76,8 +75,9 @@ SPARSE_API std::tuple<torch::Tensor, torch::Tensor>
spmm_max(torch::Tensor rowptr, torch::Tensor col, spmm_max(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> opt_value, torch::Tensor mat); torch::optional<torch::Tensor> opt_value, torch::Tensor mat);
SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>> SPARSE_API
std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>>
spspmm_sum(torch::Tensor rowptrA, torch::Tensor colA, spspmm_sum(torch::Tensor rowptrA, torch::Tensor colA,
torch::optional<torch::Tensor> optional_valueA, torch::optional<torch::Tensor> optional_valueA,
torch::Tensor rowptrB, torch::Tensor colB, torch::Tensor rowptrB, torch::Tensor colB,
torch::optional<torch::Tensor> optional_valueB, int64_t K); torch::optional<torch::Tensor> optional_valueB, int64_t K);
...@@ -5,13 +5,13 @@ ...@@ -5,13 +5,13 @@
#include "cpu/spmm_cpu.h" #include "cpu/spmm_cpu.h"
#ifdef WITH_HIP #ifdef WITH_CUDA
#include "hip/spmm_hip.h" #include "cuda/spmm_cuda.h"
#endif #endif
#ifdef _WIN32 #ifdef _WIN32
#ifdef WITH_PYTHON #ifdef WITH_PYTHON
#ifdef WITH_HIP #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__spmm_cuda(void) { return NULL; } PyMODINIT_FUNC PyInit__spmm_cuda(void) { return NULL; }
#else #else
PyMODINIT_FUNC PyInit__spmm_cpu(void) { return NULL; } PyMODINIT_FUNC PyInit__spmm_cpu(void) { return NULL; }
...@@ -24,7 +24,7 @@ spmm_fw(torch::Tensor rowptr, torch::Tensor col, ...@@ -24,7 +24,7 @@ spmm_fw(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value, torch::Tensor mat, torch::optional<torch::Tensor> optional_value, torch::Tensor mat,
std::string reduce) { std::string reduce) {
if (rowptr.device().is_cuda()) { if (rowptr.device().is_cuda()) {
#ifdef WITH_HIP #ifdef WITH_CUDA
return spmm_cuda(rowptr, col, optional_value, mat, reduce); return spmm_cuda(rowptr, col, optional_value, mat, reduce);
#else #else
AT_ERROR("Not compiled with CUDA support"); AT_ERROR("Not compiled with CUDA support");
...@@ -38,7 +38,7 @@ torch::Tensor spmm_value_bw(torch::Tensor row, torch::Tensor rowptr, ...@@ -38,7 +38,7 @@ torch::Tensor spmm_value_bw(torch::Tensor row, torch::Tensor rowptr,
torch::Tensor col, torch::Tensor mat, torch::Tensor col, torch::Tensor mat,
torch::Tensor grad, std::string reduce) { torch::Tensor grad, std::string reduce) {
if (row.device().is_cuda()) { if (row.device().is_cuda()) {
#ifdef WITH_HIP #ifdef WITH_CUDA
return spmm_value_bw_cuda(row, rowptr, col, mat, grad, reduce); return spmm_value_bw_cuda(row, rowptr, col, mat, grad, reduce);
#else #else
AT_ERROR("Not compiled with CUDA support"); AT_ERROR("Not compiled with CUDA support");
......
...@@ -5,13 +5,13 @@ ...@@ -5,13 +5,13 @@
#include "cpu/spspmm_cpu.h" #include "cpu/spspmm_cpu.h"
#ifdef WITH_HIP #ifdef WITH_CUDA
#include "hip/spspmm_hip.h" #include "cuda/spspmm_cuda.h"
#endif #endif
#ifdef _WIN32 #ifdef _WIN32
#ifdef WITH_PYTHON #ifdef WITH_PYTHON
#ifdef WITH_HIP #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__spspmm_cuda(void) { return NULL; } PyMODINIT_FUNC PyInit__spspmm_cuda(void) { return NULL; }
#else #else
PyMODINIT_FUNC PyInit__spspmm_cpu(void) { return NULL; } PyMODINIT_FUNC PyInit__spspmm_cpu(void) { return NULL; }
...@@ -25,7 +25,7 @@ spspmm_sum(torch::Tensor rowptrA, torch::Tensor colA, ...@@ -25,7 +25,7 @@ spspmm_sum(torch::Tensor rowptrA, torch::Tensor colA,
torch::Tensor rowptrB, torch::Tensor colB, torch::Tensor rowptrB, torch::Tensor colB,
torch::optional<torch::Tensor> optional_valueB, int64_t K) { torch::optional<torch::Tensor> optional_valueB, int64_t K) {
if (rowptrA.device().is_cuda()) { if (rowptrA.device().is_cuda()) {
#ifdef WITH_HIP #ifdef WITH_CUDA
return spspmm_cuda(rowptrA, colA, optional_valueA, rowptrB, colB, return spspmm_cuda(rowptrA, colA, optional_valueA, rowptrB, colB,
optional_valueB, K, "sum"); optional_valueB, K, "sum");
#else #else
......
...@@ -2,15 +2,16 @@ ...@@ -2,15 +2,16 @@
#include <Python.h> #include <Python.h>
#endif #endif
#include <torch/script.h> #include <torch/script.h>
#include "sparse.h"
#ifdef WITH_HIP #ifdef WITH_CUDA
#include <hip/hip_runtime.h> #include <cuda.h>
#endif #endif
#include "macros.h"
#ifdef _WIN32 #ifdef _WIN32
#ifdef WITH_PYTHON #ifdef WITH_PYTHON
#ifdef WITH_HIP #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__version_cuda(void) { return NULL; } PyMODINIT_FUNC PyInit__version_cuda(void) { return NULL; }
#else #else
PyMODINIT_FUNC PyInit__version_cpu(void) { return NULL; } PyMODINIT_FUNC PyInit__version_cpu(void) { return NULL; }
...@@ -18,13 +19,15 @@ PyMODINIT_FUNC PyInit__version_cpu(void) { return NULL; } ...@@ -18,13 +19,15 @@ PyMODINIT_FUNC PyInit__version_cpu(void) { return NULL; }
#endif #endif
#endif #endif
SPARSE_API int64_t cuda_version() { namespace sparse {
#ifdef WITH_HIP SPARSE_API int64_t cuda_version() noexcept {
return TORCH_HIP_VERSION; #ifdef WITH_CUDA
return CUDA_VERSION;
#else #else
return -1; return -1;
#endif #endif
} }
} // namespace sparse
static auto registry = static auto registry = torch::RegisterOperators().op(
torch::RegisterOperators().op("torch_sparse::cuda_version", &cuda_version); "torch_sparse::cuda_version", &sparse::cuda_version);
[metadata] [metadata]
long_description = file: README.md long_description=file: README.md
long_description_content_type = text/markdown long_description_content_type=text/markdown
classifiers =
Development Status :: 5 - Production/Stable classifiers =
License :: OSI Approved :: MIT License Development Status :: 5 - Production/Stable
Programming Language :: Python License :: OSI Approved :: MIT License
Programming Language :: Python :: 3.7 Programming Language :: Python
Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.9
Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3.10
Programming Language :: Python :: 3 :: Only
[aliases] [aliases]
test = pytest test = pytest
...@@ -17,7 +18,7 @@ test = pytest ...@@ -17,7 +18,7 @@ test = pytest
[tool:pytest] [tool:pytest]
addopts = --capture=no addopts = --capture=no
[egg_info] [isort]
tag_build = multi_line_output=3
tag_date = 0 include_trailing_comma = True
skip=.gitignore,__init__.py
...@@ -8,23 +8,24 @@ from itertools import product ...@@ -8,23 +8,24 @@ from itertools import product
import torch import torch
from setuptools import find_packages, setup from setuptools import find_packages, setup
from torch.__config__ import parallel_info from torch.__config__ import parallel_info
from torch.utils.cpp_extension import (CUDA_HOME, BuildExtension, CppExtension, from torch.utils.cpp_extension import (
CUDAExtension) CUDA_HOME,
BuildExtension,
CppExtension,
CUDAExtension,
)
__version__ = '0.6.13' __version__ = '0.6.15'
URL = 'https://github.com/rusty1s/pytorch_sparse' URL = 'https://github.com/rusty1s/pytorch_sparse'
WITH_HIP = torch.cuda.is_available() and CUDA_HOME is not None WITH_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
suffices = ['cpu', 'cuda'] if WITH_HIP else ['cpu'] suffices = ['cpu', 'cuda'] if WITH_CUDA else ['cpu']
if os.getenv('FORCE_CUDA', '0') == '1': if os.getenv('FORCE_CUDA', '0') == '1':
suffices = ['cuda', 'cpu'] suffices = ['cuda', 'cpu']
if os.getenv('FORCE_ONLY_HIP', '0') == '1': if os.getenv('FORCE_ONLY_CUDA', '0') == '1':
suffices = ['hip'] suffices = ['cuda']
if os.getenv('FORCE_ONLY_CPU', '0') == '1': if os.getenv('FORCE_ONLY_CPU', '0') == '1':
suffices = ['cpu'] suffices = ['cpu']
ROCM_PATH = os.getenv('ROCM_PATH')
HIPLIB2 = osp.join(ROCM_PATH, 'hiprand', 'include')
HIPLIB1 = osp.join(ROCM_PATH, 'hipsparse', 'include')
BUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1' BUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1'
...@@ -79,18 +80,17 @@ def get_extensions(): ...@@ -79,18 +80,17 @@ def get_extensions():
extra_compile_args['cxx'] += ['-arch', 'arm64'] extra_compile_args['cxx'] += ['-arch', 'arm64']
extra_link_args += ['-arch', 'arm64'] extra_link_args += ['-arch', 'arm64']
if suffix == 'hip': if suffix == 'cuda':
define_macros += [('WITH_HIP', None)] define_macros += [('WITH_CUDA', None)]
hipcc_flags = os.getenv('HIPCC_FLAGS', '') nvcc_flags = os.getenv('NVCC_FLAGS', '')
hipcc_flags = [] if hipcc_flags == '' else hipcc_flags.split(' ') nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ')
hipcc_flags += ['--expt-relaxed-constexpr', '-O2'] nvcc_flags += ['--expt-relaxed-constexpr', '-O2']
extra_compile_args['hipcc'] = hipcc_flags extra_compile_args['nvcc'] = nvcc_flags
if sys.platform == 'win32': if sys.platform == 'win32':
extra_link_args += ['hipsparse.lib'] extra_link_args += ['cusparse.lib']
else: else:
extra_link_args += ['-lhipsparse', '-l', 'hipsparse'] extra_link_args += ['-lcusparse', '-l', 'cusparse']
extra_link_args += ['-fopenmp','-lomp']
name = main.split(os.sep)[-1][:-4] name = main.split(os.sep)[-1][:-4]
sources = [main] sources = [main]
...@@ -99,16 +99,17 @@ def get_extensions(): ...@@ -99,16 +99,17 @@ def get_extensions():
if osp.exists(path): if osp.exists(path):
sources += [path] sources += [path]
path = osp.join(extensions_dir, 'hip', f'{name}_hip.hip') path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu')
if suffix == 'hip' and osp.exists(path): if suffix == 'cuda' and osp.exists(path):
sources += [path] sources += [path]
phmap_dir = "third_party/parallel-hashmap"
Extension = CppExtension if suffix == 'cpu' else CUDAExtension Extension = CppExtension if suffix == 'cpu' else CUDAExtension
define_macros += [('TORCH_HIP_VERSION', 10000), ('__HIP__', None), ('__HCC__', None)]
extension = Extension( extension = Extension(
f'torch_sparse._{name}_{suffix}', f'torch_sparse._{name}_{suffix}',
sources, sources,
include_dirs=[extensions_dir, HIPLIB1, HIPLIB2], include_dirs=[extensions_dir, phmap_dir],
define_macros=define_macros, define_macros=define_macros,
extra_compile_args=extra_compile_args, extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args, extra_link_args=extra_link_args,
...@@ -154,5 +155,5 @@ setup( ...@@ -154,5 +155,5 @@ setup(
BuildExtension.with_options(no_python_abi_suffix=True, use_ninja=False) BuildExtension.with_options(no_python_abi_suffix=True, use_ninja=False)
}, },
packages=find_packages(), packages=find_packages(),
include_package_data=False, include_package_data=True,
) )
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment