Commit 6ab5c332 authored by rusty1s's avatar rusty1s
Browse files

build separate cpu and cuda images

parent 39d463b8
......@@ -112,15 +112,16 @@ install:
- conda install pytorch=${TORCH_VERSION} ${TOOLKIT} -c pytorch --yes
- source script/torch.sh
- pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-${TORCH_VERSION}+${IDX}.html
- pip install flake8 codecov
- pip install flake8
- pip install codecov
- pip install scipy==1.4.1
- source script/install.sh
- travis_wait 30 pip install -e .
script:
- flake8 .
- python setup.py test
after_success:
- python setup.py bdist_wheel --dist-dir=dist/torch-${TORCH_VERSION}
- python script/rename_wheel.py ${IDX}
- python setup.py bdist_wheel --dist-dir=dist
- ls -lah dist/
- codecov
deploy:
provider: s3
......@@ -129,8 +130,8 @@ deploy:
access_key_id: ${S3_ACCESS_KEY}
secret_access_key: ${S3_SECRET_ACCESS_KEY}
bucket: pytorch-geometric.com
local_dir: dist/torch-${TORCH_VERSION}
upload_dir: whl/torch-${TORCH_VERSION}
local_dir: dist
upload_dir: whl/torch-${TORCH_VERSION}+${IDX}
acl: public_read
on:
all_branches: true
......
cmake_minimum_required(VERSION 3.0)
project(torchsparse)
set(CMAKE_CXX_STANDARD 14)
set(TORCHSPARSE_VERSION 0.6.8)
set(TORCHSPARSE_VERSION 0.6.9)
option(WITH_CUDA "Enable CUDA support" OFF)
......
......@@ -8,7 +8,11 @@
#endif
#ifdef _WIN32
PyMODINIT_FUNC PyInit__convert(void) { return NULL; }
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__convert_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__convert_cpu(void) { return NULL; }
#endif
#endif
torch::Tensor ind2ptr(torch::Tensor ind, int64_t M) {
......
......@@ -8,7 +8,11 @@
#endif
#ifdef _WIN32
PyMODINIT_FUNC PyInit__diag(void) { return NULL; }
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__diag_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__diag_cpu(void) { return NULL; }
#endif
#endif
torch::Tensor non_diag_mask(torch::Tensor row, torch::Tensor col, int64_t M,
......
......@@ -4,7 +4,11 @@
#include "cpu/metis_cpu.h"
#ifdef _WIN32
PyMODINIT_FUNC PyInit__metis(void) { return NULL; }
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__metis_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__metis_cpu(void) { return NULL; }
#endif
#endif
torch::Tensor partition(torch::Tensor rowptr, torch::Tensor col,
......
......@@ -4,7 +4,11 @@
#include "cpu/relabel_cpu.h"
#ifdef _WIN32
PyMODINIT_FUNC PyInit__relabel(void) { return NULL; }
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__relablel_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__relabel_cpu(void) { return NULL; }
#endif
#endif
std::tuple<torch::Tensor, torch::Tensor> relabel(torch::Tensor col,
......
......@@ -8,7 +8,11 @@
#endif
#ifdef _WIN32
PyMODINIT_FUNC PyInit__rw(void) { return NULL; }
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__rw_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__rw_cpu(void) { return NULL; }
#endif
#endif
torch::Tensor random_walk(torch::Tensor rowptr, torch::Tensor col,
......
......@@ -4,7 +4,11 @@
#include "cpu/saint_cpu.h"
#ifdef _WIN32
PyMODINIT_FUNC PyInit__saint(void) { return NULL; }
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__saint_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__saint_cpu(void) { return NULL; }
#endif
#endif
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
......
......@@ -4,7 +4,11 @@
#include "cpu/sample_cpu.h"
#ifdef _WIN32
PyMODINIT_FUNC PyInit__sample(void) { return NULL; }
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__sample_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__sample_cpu(void) { return NULL; }
#endif
#endif
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
......
......@@ -8,7 +8,11 @@
#endif
#ifdef _WIN32
PyMODINIT_FUNC PyInit__spmm(void) { return NULL; }
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__spmm_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__spmm_cpu(void) { return NULL; }
#endif
#endif
std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
......
......@@ -8,7 +8,11 @@
#endif
#ifdef _WIN32
PyMODINIT_FUNC PyInit__spspmm(void) { return NULL; }
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__spspmm_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__spspmm_cpu(void) { return NULL; }
#endif
#endif
std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>>
......
......@@ -6,7 +6,11 @@
#endif
#ifdef _WIN32
PyMODINIT_FUNC PyInit__version(void) { return NULL; }
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__version_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__version_cpu(void) { return NULL; }
#endif
#endif
int64_t cuda_version() {
......
......@@ -69,7 +69,7 @@ if [ "${TRAVIS_OS_NAME}" = "osx" ] && [ "$IDX" = "cpu" ]; then
fi
if [ "${IDX}" = "cpu" ]; then
export FORCE_CPU=1
export FORCE_ONLY_CPU=1
else
export FORCE_CUDA=1
fi
......
import sys
import os
import os.path as osp
import glob
import shutil
idx = sys.argv[1]
assert idx in ['cpu', 'cu92', 'cu101', 'cu102', 'cu110']
dist_dir = osp.join(osp.dirname(osp.abspath(__file__)), '..', 'dist')
wheels = glob.glob(osp.join('dist', '**', '*.whl'), recursive=True)
for wheel in wheels:
if idx in wheel:
continue
paths = wheel.split(osp.sep)
names = paths[-1].split('-')
name = '-'.join(names[:-4] + ['latest+' + idx] + names[-3:])
shutil.copyfile(wheel, osp.join(*paths[:-1], name))
name = '-'.join(names[:-4] + [names[-4] + '+' + idx] + names[-3:])
os.rename(wheel, osp.join(*paths[:-1], name))
import os
import os.path as osp
import sys
import glob
import os.path as osp
from itertools import product
from setuptools import setup, find_packages
import torch
......@@ -10,10 +11,13 @@ from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CppExtension, CUDAExtension, CUDA_HOME
WITH_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
suffices = ['cpu', 'cuda'] if WITH_CUDA else ['cpu']
if os.getenv('FORCE_CUDA', '0') == '1':
WITH_CUDA = True
if os.getenv('FORCE_CPU', '0') == '1':
WITH_CUDA = False
suffices = ['cuda', 'cpu']
if os.getenv('FORCE_ONLY_CUDA', '0') == '1':
suffices = ['cuda']
if os.getenv('FORCE_ONLY_CPU', '0') == '1':
suffices = ['cpu']
BUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1'
......@@ -22,51 +26,50 @@ WITH_MTMETIS = True if os.getenv('WITH_MTMETIS', '0') == '1' else False
def get_extensions():
Extension = CppExtension
define_macros = []
libraries = []
if WITH_METIS:
define_macros += [('WITH_METIS', None)]
libraries += ['metis']
if WITH_MTMETIS:
define_macros += [('WITH_MTMETIS', None)]
define_macros += [('MTMETIS_64BIT_VERTICES', None)]
define_macros += [('MTMETIS_64BIT_EDGES', None)]
define_macros += [('MTMETIS_64BIT_WEIGHTS', None)]
define_macros += [('MTMETIS_64BIT_PARTITIONS', None)]
libraries += ['mtmetis', 'wildriver']
extra_compile_args = {'cxx': ['-O2']}
extra_link_args = ['-s']
info = parallel_info()
if 'parallel backend: OpenMP' in info and 'OpenMP not found' not in info:
extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP']
if sys.platform == 'win32':
extra_compile_args['cxx'] += ['/openmp']
else:
extra_compile_args['cxx'] += ['-fopenmp']
else:
print('Compiling without OpenMP...')
if WITH_CUDA:
Extension = CUDAExtension
define_macros += [('WITH_CUDA', None)]
nvcc_flags = os.getenv('NVCC_FLAGS', '')
nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ')
nvcc_flags += ['-arch=sm_35', '--expt-relaxed-constexpr', '-O2']
extra_compile_args['nvcc'] = nvcc_flags
if sys.platform == 'win32':
extra_link_args += ['cusparse.lib']
else:
extra_link_args += ['-lcusparse', '-l', 'cusparse']
extensions = []
extensions_dir = osp.join(osp.dirname(osp.abspath(__file__)), 'csrc')
main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))
extensions = []
for main in main_files:
name = main.split(os.sep)[-1][:-4]
for main, suffix in product(main_files, suffices):
define_macros = []
libraries = []
if WITH_METIS:
define_macros += [('WITH_METIS', None)]
libraries += ['metis']
if WITH_MTMETIS:
define_macros += [('WITH_MTMETIS', None)]
define_macros += [('MTMETIS_64BIT_VERTICES', None)]
define_macros += [('MTMETIS_64BIT_EDGES', None)]
define_macros += [('MTMETIS_64BIT_WEIGHTS', None)]
define_macros += [('MTMETIS_64BIT_PARTITIONS', None)]
libraries += ['mtmetis', 'wildriver']
extra_compile_args = {'cxx': ['-O2']}
extra_link_args = ['-s']
info = parallel_info()
if 'backend: OpenMP' in info and 'OpenMP not found' not in info:
extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP']
if sys.platform == 'win32':
extra_compile_args['cxx'] += ['/openmp']
else:
extra_compile_args['cxx'] += ['-fopenmp']
else:
print('Compiling without OpenMP...')
if suffix == 'cuda':
define_macros += [('WITH_CUDA', None)]
nvcc_flags = os.getenv('NVCC_FLAGS', '')
nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ')
nvcc_flags += ['-arch=sm_35', '--expt-relaxed-constexpr', '-O2']
extra_compile_args['nvcc'] = nvcc_flags
if sys.platform == 'win32':
extra_link_args += ['cusparse.lib']
else:
extra_link_args += ['-lcusparse', '-l', 'cusparse']
name = main.split(os.sep)[-1][:-4]
sources = [main]
path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp')
......@@ -74,11 +77,12 @@ def get_extensions():
sources += [path]
path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu')
if WITH_CUDA and osp.exists(path):
if suffix == 'cuda' and osp.exists(path):
sources += [path]
Extension = CppExtension if suffix == 'cpu' else CUDAExtension
extension = Extension(
'torch_sparse._' + name,
f'torch_sparse._{name}_{suffix}',
sources,
include_dirs=[extensions_dir],
define_macros=define_macros,
......@@ -97,7 +101,7 @@ tests_require = ['pytest', 'pytest-cov']
setup(
name='torch_sparse',
version='0.6.8',
version='0.6.9',
author='Matthias Fey',
author_email='matthias.fey@tu-dortmund.de',
url='https://github.com/rusty1s/pytorch_sparse',
......
......@@ -3,16 +3,18 @@ import os.path as osp
import torch
__version__ = '0.6.8'
__version__ = '0.6.9'
suffix = 'cuda' if torch.cuda.is_available() else 'cpu'
for library in [
'_version', '_convert', '_diag', '_spmm', '_spspmm', '_metis', '_rw',
'_saint', '_sample', '_relabel'
]:
torch.ops.load_library(importlib.machinery.PathFinder().find_spec(
library, [osp.dirname(__file__)]).origin)
f'{library}_{suffix}', [osp.dirname(__file__)]).origin)
if torch.cuda.is_available() and torch.version.cuda: # pragma: no cover
if torch.cuda.is_available(): # pragma: no cover
cuda_version = torch.ops.torch_sparse.cuda_version()
if cuda_version == -1:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment