Unverified Commit 8cc819d5 authored by Daniel Falbel's avatar Daniel Falbel Committed by GitHub
Browse files

Add options to conditionally include Python (#196)

* Add `WITH_PYTHON` to conditionally link to Python.

* Only include `Python.h` when WITH_PYTHON is set.

* Avoid including extensions.h as it includes Python.h.

* Better way to include `getpid()`.

* Define `WITH_PYTHON` when building with setup.py.

* Only include Pyinit when building with Python.

* Only include Pyinit when building with Python.
parent fe8c3ce3
#pragma once
#include <torch/extension.h>
#include "../extensions.h"
#define CHECK_CUDA(x) \
AT_ASSERTM(x.device().is_cuda(), #x " must be CUDA tensor")
......
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/diag_cpu.h"
......@@ -8,12 +10,14 @@
#endif
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__diag_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__diag_cpu(void) { return NULL; }
#endif
#endif
#endif
torch::Tensor non_diag_mask(torch::Tensor row, torch::Tensor col, int64_t M,
int64_t N, int64_t k) {
......
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/ego_sample_cpu.h"
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__ego_sample_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__ego_sample_cpu(void) { return NULL; }
#endif
#endif
#endif
// Returns `rowptr`, `col`, `n_id`, `e_id`, `ptr`, `root_n_id`
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor,
......
#include <torch/torch.h>
// for getpid()
#ifdef _WIN32
#include <process.h>
#else
#include <unistd.h>
#endif
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/hgt_sample_cpu.h"
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__hgt_sample_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__hgt_sample_cpu(void) { return NULL; }
#endif
#endif
#endif
// Returns 'output_node_dict', 'row_dict', 'col_dict', 'output_edge_dict'
std::tuple<c10::Dict<node_t, torch::Tensor>, c10::Dict<rel_t, torch::Tensor>,
......
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/metis_cpu.h"
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__metis_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__metis_cpu(void) { return NULL; }
#endif
#endif
#endif
torch::Tensor partition(torch::Tensor rowptr, torch::Tensor col,
torch::optional<torch::Tensor> optional_value,
......
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/neighbor_sample_cpu.h"
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__neighbor_sample_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__neighbor_sample_cpu(void) { return NULL; }
#endif
#endif
#endif
// Returns 'output_node', 'row', 'col', 'output_edge'
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
......
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/relabel_cpu.h"
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__relabel_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__relabel_cpu(void) { return NULL; }
#endif
#endif
#endif
std::tuple<torch::Tensor, torch::Tensor> relabel(torch::Tensor col,
torch::Tensor idx) {
......
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/rw_cpu.h"
......@@ -8,12 +10,14 @@
#endif
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__rw_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__rw_cpu(void) { return NULL; }
#endif
#endif
#endif
torch::Tensor random_walk(torch::Tensor rowptr, torch::Tensor col,
torch::Tensor start, int64_t walk_length) {
......
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/saint_cpu.h"
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__saint_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__saint_cpu(void) { return NULL; }
#endif
#endif
#endif
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
subgraph(torch::Tensor idx, torch::Tensor rowptr, torch::Tensor row,
......
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/sample_cpu.h"
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__sample_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__sample_cpu(void) { return NULL; }
#endif
#endif
#endif
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
sample_adj(torch::Tensor rowptr, torch::Tensor col, torch::Tensor idx,
......
#pragma once
#include <torch/extension.h>
#include <torch/library.h>
int64_t cuda_version();
......
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/spmm_cpu.h"
......@@ -8,12 +10,14 @@
#endif
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__spmm_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__spmm_cpu(void) { return NULL; }
#endif
#endif
#endif
std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
spmm_fw(torch::Tensor rowptr, torch::Tensor col,
......
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/spspmm_cpu.h"
......@@ -8,12 +10,14 @@
#endif
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__spspmm_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__spspmm_cpu(void) { return NULL; }
#endif
#endif
#endif
std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>>
spspmm_sum(torch::Tensor rowptrA, torch::Tensor colA,
......
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#ifdef WITH_CUDA
......@@ -6,12 +8,14 @@
#endif
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__version_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__version_cpu(void) { return NULL; }
#endif
#endif
#endif
int64_t cuda_version() {
#ifdef WITH_CUDA
......
......@@ -34,7 +34,7 @@ def get_extensions():
main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))
for main, suffix in product(main_files, suffices):
define_macros = []
define_macros = [('WITH_PYTHON', None)]
libraries = []
if WITH_METIS:
define_macros += [('WITH_METIS', None)]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment