Unverified Commit 8cc819d5 authored by Daniel Falbel's avatar Daniel Falbel Committed by GitHub
Browse files

Add options to conditionally include Python (#196)

* Add `WITH_PYTHON` to conditionally link to Python.

* Only include `Python.h` when WITH_PYTHON is set.

* Avoid including extensions.h as it includes Python.h.

* Better way to include `getpid()`.

* Define `WITH_PYTHON` when building with setup.py.

* Only include Pyinit when building with Python.

* Only include Pyinit when building with Python.
parent fe8c3ce3
...@@ -4,6 +4,7 @@ set(CMAKE_CXX_STANDARD 14) ...@@ -4,6 +4,7 @@ set(CMAKE_CXX_STANDARD 14)
set(TORCHSPARSE_VERSION 0.6.12) set(TORCHSPARSE_VERSION 0.6.12)
option(WITH_CUDA "Enable CUDA support" OFF) option(WITH_CUDA "Enable CUDA support" OFF)
option(WITH_PYTHON "Link to Python when building" ON)
if(WITH_CUDA) if(WITH_CUDA)
enable_language(CUDA) enable_language(CUDA)
...@@ -12,7 +13,10 @@ if(WITH_CUDA) ...@@ -12,7 +13,10 @@ if(WITH_CUDA)
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr") set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr")
endif() endif()
find_package(Python3 COMPONENTS Development) if (WITH_PYTHON)
add_definitions(-DWITH_PYTHON)
find_package(Python3 COMPONENTS Development)
endif()
find_package(Torch REQUIRED) find_package(Torch REQUIRED)
file(GLOB HEADERS csrc/sparse.h) file(GLOB HEADERS csrc/sparse.h)
...@@ -22,7 +26,10 @@ if(WITH_CUDA) ...@@ -22,7 +26,10 @@ if(WITH_CUDA)
endif() endif()
add_library(${PROJECT_NAME} SHARED ${OPERATOR_SOURCES}) add_library(${PROJECT_NAME} SHARED ${OPERATOR_SOURCES})
target_link_libraries(${PROJECT_NAME} PRIVATE ${TORCH_LIBRARIES} Python3::Python) target_link_libraries(${PROJECT_NAME} PRIVATE ${TORCH_LIBRARIES})
if (WITH_PYTHON)
target_link_libraries(${PROJECT_NAME} PRIVATE Python3::Python)
endif()
set_target_properties(${PROJECT_NAME} PROPERTIES EXPORT_NAME TorchSparse) set_target_properties(${PROJECT_NAME} PROPERTIES EXPORT_NAME TorchSparse)
target_include_directories(${PROJECT_NAME} INTERFACE target_include_directories(${PROJECT_NAME} INTERFACE
......
#ifdef WITH_PYTHON
#include <Python.h> #include <Python.h>
#endif
#include <torch/script.h> #include <torch/script.h>
#include "cpu/convert_cpu.h" #include "cpu/convert_cpu.h"
...@@ -8,12 +10,14 @@ ...@@ -8,12 +10,14 @@
#endif #endif
#ifdef _WIN32 #ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__convert_cuda(void) { return NULL; } PyMODINIT_FUNC PyInit__convert_cuda(void) { return NULL; }
#else #else
PyMODINIT_FUNC PyInit__convert_cpu(void) { return NULL; } PyMODINIT_FUNC PyInit__convert_cpu(void) { return NULL; }
#endif #endif
#endif #endif
#endif
torch::Tensor ind2ptr(torch::Tensor ind, int64_t M) { torch::Tensor ind2ptr(torch::Tensor ind, int64_t M) {
if (ind.device().is_cuda()) { if (ind.device().is_cuda()) {
......
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
torch::Tensor ind2ptr_cpu(torch::Tensor ind, int64_t M); torch::Tensor ind2ptr_cpu(torch::Tensor ind, int64_t M);
torch::Tensor ptr2ind_cpu(torch::Tensor ptr, int64_t E); torch::Tensor ptr2ind_cpu(torch::Tensor ptr, int64_t E);
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
torch::Tensor non_diag_mask_cpu(torch::Tensor row, torch::Tensor col, int64_t M, torch::Tensor non_diag_mask_cpu(torch::Tensor row, torch::Tensor col, int64_t M,
int64_t N, int64_t k); int64_t N, int64_t k);
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor,
torch::Tensor, torch::Tensor> torch::Tensor, torch::Tensor>
......
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
typedef std::string node_t; typedef std::string node_t;
typedef std::string rel_t; typedef std::string rel_t;
......
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
torch::Tensor partition_cpu(torch::Tensor rowptr, torch::Tensor col, torch::Tensor partition_cpu(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value, torch::optional<torch::Tensor> optional_value,
......
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
typedef std::string node_t; typedef std::string node_t;
typedef std::tuple<std::string, std::string, std::string> edge_t; typedef std::tuple<std::string, std::string, std::string> edge_t;
......
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
std::tuple<torch::Tensor, torch::Tensor> relabel_cpu(torch::Tensor col, std::tuple<torch::Tensor, torch::Tensor> relabel_cpu(torch::Tensor col,
torch::Tensor idx); torch::Tensor idx);
......
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
torch::Tensor random_walk_cpu(torch::Tensor rowptr, torch::Tensor col, torch::Tensor random_walk_cpu(torch::Tensor rowptr, torch::Tensor col,
torch::Tensor start, int64_t walk_length); torch::Tensor start, int64_t walk_length);
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
subgraph_cpu(torch::Tensor idx, torch::Tensor rowptr, torch::Tensor row, subgraph_cpu(torch::Tensor idx, torch::Tensor rowptr, torch::Tensor row,
......
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
sample_adj_cpu(torch::Tensor rowptr, torch::Tensor col, torch::Tensor idx, sample_adj_cpu(torch::Tensor rowptr, torch::Tensor col, torch::Tensor idx,
......
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
std::tuple<torch::Tensor, torch::optional<torch::Tensor>> std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
spmm_cpu(torch::Tensor rowptr, torch::Tensor col, spmm_cpu(torch::Tensor rowptr, torch::Tensor col,
......
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>> std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>>
spspmm_cpu(torch::Tensor rowptrA, torch::Tensor colA, spspmm_cpu(torch::Tensor rowptrA, torch::Tensor colA,
......
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
#define CHECK_CPU(x) AT_ASSERTM(x.device().is_cpu(), #x " must be CPU tensor") #define CHECK_CPU(x) AT_ASSERTM(x.device().is_cpu(), #x " must be CPU tensor")
#define CHECK_INPUT(x) AT_ASSERTM(x, "Input mismatch") #define CHECK_INPUT(x) AT_ASSERTM(x, "Input mismatch")
......
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
torch::Tensor ind2ptr_cuda(torch::Tensor ind, int64_t M); torch::Tensor ind2ptr_cuda(torch::Tensor ind, int64_t M);
torch::Tensor ptr2ind_cuda(torch::Tensor ptr, int64_t E); torch::Tensor ptr2ind_cuda(torch::Tensor ptr, int64_t E);
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
torch::Tensor non_diag_mask_cuda(torch::Tensor row, torch::Tensor col, torch::Tensor non_diag_mask_cuda(torch::Tensor row, torch::Tensor col,
int64_t M, int64_t N, int64_t k); int64_t M, int64_t N, int64_t k);
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
torch::Tensor random_walk_cuda(torch::Tensor rowptr, torch::Tensor col, torch::Tensor random_walk_cuda(torch::Tensor rowptr, torch::Tensor col,
torch::Tensor start, int64_t walk_length); torch::Tensor start, int64_t walk_length);
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
std::tuple<torch::Tensor, torch::optional<torch::Tensor>> std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
spmm_cuda(torch::Tensor rowptr, torch::Tensor col, spmm_cuda(torch::Tensor rowptr, torch::Tensor col,
......
#pragma once #pragma once
#include <torch/extension.h> #include "../extensions.h"
std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>> std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>>
spspmm_cuda(torch::Tensor rowptrA, torch::Tensor colA, spspmm_cuda(torch::Tensor rowptrA, torch::Tensor colA,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment