Commit 8a07c869 authored by rusty1s's avatar rusty1s
Browse files

update

parent 19df6430
...@@ -112,13 +112,13 @@ install: ...@@ -112,13 +112,13 @@ install:
- source script/torch.sh - source script/torch.sh
- pip install flake8 - pip install flake8
- pip install codecov - pip install codecov
- pip install .[test] - travis_wait 30 pip install -e .
script: script:
- flake8 . - flake8 .
- python setup.py test - python setup.py test
after_success: after_success:
- python setup.py bdist_wheel --dist-dir=dist/torch-${TORCH_VERSION} - python setup.py bdist_wheel --dist-dir=dist
- python script/rename_wheel.py ${IDX} - ls -lah dist/
- codecov - codecov
deploy: deploy:
provider: s3 provider: s3
...@@ -127,8 +127,8 @@ deploy: ...@@ -127,8 +127,8 @@ deploy:
access_key_id: ${S3_ACCESS_KEY} access_key_id: ${S3_ACCESS_KEY}
secret_access_key: ${S3_SECRET_ACCESS_KEY} secret_access_key: ${S3_SECRET_ACCESS_KEY}
bucket: pytorch-geometric.com bucket: pytorch-geometric.com
local_dir: dist/torch-${TORCH_VERSION} local_dir: dist
upload_dir: whl/torch-${TORCH_VERSION} upload_dir: whl/torch-${TORCH_VERSION}+${IDX}
acl: public_read acl: public_read
on: on:
all_branches: true all_branches: true
......
cmake_minimum_required(VERSION 3.0) cmake_minimum_required(VERSION 3.0)
project(torchscatter) project(torchscatter)
set(CMAKE_CXX_STANDARD 14) set(CMAKE_CXX_STANDARD 14)
set(TORCHSCATTER_VERSION 2.0.5) set(TORCHSCATTER_VERSION 2.0.6)
option(WITH_CUDA "Enable CUDA support" OFF) option(WITH_CUDA "Enable CUDA support" OFF)
......
...@@ -9,7 +9,11 @@ ...@@ -9,7 +9,11 @@
#endif #endif
#ifdef _WIN32 #ifdef _WIN32
PyMODINIT_FUNC PyInit__scatter(void) { return NULL; } #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__scatter_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__scatter_cpu(void) { return NULL; }
#endif
#endif #endif
torch::Tensor broadcast(torch::Tensor src, torch::Tensor other, int64_t dim) { torch::Tensor broadcast(torch::Tensor src, torch::Tensor other, int64_t dim) {
......
...@@ -9,7 +9,11 @@ ...@@ -9,7 +9,11 @@
#endif #endif
#ifdef _WIN32 #ifdef _WIN32
PyMODINIT_FUNC PyInit__segment_coo(void) { return NULL; } #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__segment_coo_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__segment_coo_cpu(void) { return NULL; }
#endif
#endif #endif
std::tuple<torch::Tensor, torch::optional<torch::Tensor>> std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
......
...@@ -9,7 +9,11 @@ ...@@ -9,7 +9,11 @@
#endif #endif
#ifdef _WIN32 #ifdef _WIN32
PyMODINIT_FUNC PyInit__segment_csr(void) { return NULL; } #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__segment_csr_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__segment_csr_cpu(void) { return NULL; }
#endif
#endif #endif
std::tuple<torch::Tensor, torch::optional<torch::Tensor>> std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
......
...@@ -6,7 +6,11 @@ ...@@ -6,7 +6,11 @@
#endif #endif
#ifdef _WIN32 #ifdef _WIN32
PyMODINIT_FUNC PyInit__version(void) { return NULL; } #ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__version_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__version_cpu(void) { return NULL; }
#endif
#endif #endif
int64_t cuda_version() { int64_t cuda_version() {
......
...@@ -69,7 +69,7 @@ if [ "${TRAVIS_OS_NAME}" = "osx" ] && [ "$IDX" = "cpu" ]; then ...@@ -69,7 +69,7 @@ if [ "${TRAVIS_OS_NAME}" = "osx" ] && [ "$IDX" = "cpu" ]; then
fi fi
if [ "${IDX}" = "cpu" ]; then if [ "${IDX}" = "cpu" ]; then
export FORCE_CPU=1 export FORCE_ONLY_CPU=1
else else
export FORCE_CUDA=1 export FORCE_CUDA=1
fi fi
......
import sys
import os
import os.path as osp
import glob
import shutil
idx = sys.argv[1]
assert idx in ['cpu', 'cu92', 'cu101', 'cu102', 'cu110']
dist_dir = osp.join(osp.dirname(osp.abspath(__file__)), '..', 'dist')
wheels = glob.glob(osp.join('dist', '**', '*.whl'), recursive=True)
for wheel in wheels:
if idx in wheel:
continue
paths = wheel.split(osp.sep)
names = paths[-1].split('-')
name = '-'.join(names[:-4] + ['latest+' + idx] + names[-3:])
shutil.copyfile(wheel, osp.join(*paths[:-1], name))
name = '-'.join(names[:-4] + [names[-4] + '+' + idx] + names[-3:])
os.rename(wheel, osp.join(*paths[:-1], name))
import os import os
import os.path as osp import sys
import glob import glob
import os.path as osp
from itertools import product
from setuptools import setup, find_packages from setuptools import setup, find_packages
import torch import torch
from torch.__config__ import parallel_info
from torch.utils.cpp_extension import BuildExtension from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CppExtension, CUDAExtension, CUDA_HOME from torch.utils.cpp_extension import CppExtension, CUDAExtension, CUDA_HOME
WITH_CUDA = CUDA_HOME is not None WITH_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
suffices = ['cpu', 'cuda'] if WITH_CUDA else ['cpu']
if os.getenv('FORCE_CUDA', '0') == '1': if os.getenv('FORCE_CUDA', '0') == '1':
WITH_CUDA = True suffices = ['cuda', 'cpu']
if os.getenv('FORCE_CPU', '0') == '1': if os.getenv('FORCE_ONLY_CUDA', '0') == '1':
WITH_CUDA = False suffices = ['cuda']
if os.getenv('FORCE_ONLY_CPU', '0') == '1':
suffices = ['cpu']
BUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1' BUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1'
def get_extensions(): def get_extensions():
extensions = [] extensions = []
for with_cuda, supername in [
(False, "cpu"), extensions_dir = osp.join(osp.dirname(osp.abspath(__file__)), 'csrc')
(True, "gpu"), main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))
]:
if with_cuda and not WITH_CUDA: for main, suffix in product(main_files, suffices):
continue
Extension = CppExtension
define_macros = [] define_macros = []
extra_compile_args = {'cxx': []} extra_compile_args = {'cxx': ['-O2']}
extra_link_args = ['-s']
info = parallel_info()
if 'backend: OpenMP' in info and 'OpenMP not found' not in info:
extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP']
if sys.platform == 'win32':
extra_compile_args['cxx'] += ['/openmp']
else:
extra_compile_args['cxx'] += ['-fopenmp']
else:
print('Compiling without OpenMP...')
if with_cuda: if suffix == 'cuda':
Extension = CUDAExtension
define_macros += [('WITH_CUDA', None)] define_macros += [('WITH_CUDA', None)]
nvcc_flags = os.getenv('NVCC_FLAGS', '') nvcc_flags = os.getenv('NVCC_FLAGS', '')
nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ') nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ')
nvcc_flags += ['-arch=sm_35', '--expt-relaxed-constexpr'] nvcc_flags += ['-arch=sm_35', '--expt-relaxed-constexpr']
extra_compile_args['nvcc'] = nvcc_flags extra_compile_args['nvcc'] = nvcc_flags
extensions_dir = osp.join(osp.dirname(osp.abspath(__file__)), 'csrc') name = main.split(os.sep)[-1][:-4]
main_files = glob.glob(osp.join(extensions_dir, '*.cpp')) sources = [main]
for main in main_files:
name = main.split(os.sep)[-1][:-4]
sources = [main]
path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp') path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp')
if osp.exists(path): if osp.exists(path):
sources += [path] sources += [path]
path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu') path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu')
if with_cuda and osp.exists(path): if suffix == 'cuda' and osp.exists(path):
sources += [path] sources += [path]
extension = Extension( Extension = CppExtension if suffix == 'cpu' else CUDAExtension
'torch_scatter._%s_%s' % (name, supername), extension = Extension(
sources, f'torch_scatter._{name}_{suffix}',
include_dirs=[extensions_dir], sources,
define_macros=define_macros, include_dirs=[extensions_dir],
extra_compile_args=extra_compile_args, define_macros=define_macros,
) extra_compile_args=extra_compile_args,
extensions += [extension] extra_link_args=extra_link_args,
)
extensions += [extension]
return extensions return extensions
...@@ -69,7 +81,7 @@ tests_require = ['pytest', 'pytest-cov'] ...@@ -69,7 +81,7 @@ tests_require = ['pytest', 'pytest-cov']
setup( setup(
name='torch_scatter', name='torch_scatter',
version='2.0.5', version='2.0.6',
author='Matthias Fey', author='Matthias Fey',
author_email='matthias.fey@tu-dortmund.de', author_email='matthias.fey@tu-dortmund.de',
url='https://github.com/rusty1s/pytorch_scatter', url='https://github.com/rusty1s/pytorch_scatter',
......
...@@ -4,18 +4,14 @@ import os.path as osp ...@@ -4,18 +4,14 @@ import os.path as osp
import torch import torch
__version__ = '2.0.5' __version__ = '2.0.6'
if torch.cuda.is_available(): suffix = 'cuda' if torch.cuda.is_available() else 'cpu'
sublib = "gpu"
else:
sublib = "cpu"
try: try:
for library in ['_version', '_scatter', '_segment_csr', '_segment_coo']: for library in ['_version', '_scatter', '_segment_csr', '_segment_coo']:
library = "%s_%s" % (library, sublib)
torch.ops.load_library(importlib.machinery.PathFinder().find_spec( torch.ops.load_library(importlib.machinery.PathFinder().find_spec(
library, [osp.dirname(__file__)]).origin) f'{library}_{suffix}', [osp.dirname(__file__)]).origin)
except AttributeError as e: except AttributeError as e:
if os.getenv('BUILD_DOCS', '0') != '1': if os.getenv('BUILD_DOCS', '0') != '1':
raise AttributeError(e) raise AttributeError(e)
...@@ -45,7 +41,7 @@ except AttributeError as e: ...@@ -45,7 +41,7 @@ except AttributeError as e:
torch.ops.torch_scatter.segment_max_coo = segment_coo_arg_placeholder torch.ops.torch_scatter.segment_max_coo = segment_coo_arg_placeholder
torch.ops.torch_scatter.gather_coo = gather_coo_placeholder torch.ops.torch_scatter.gather_coo = gather_coo_placeholder
if torch.cuda.is_available() and torch.version.cuda: # pragma: no cover if torch.cuda.is_available(): # pragma: no cover
cuda_version = torch.ops.torch_scatter.cuda_version() cuda_version = torch.ops.torch_scatter.cuda_version()
if cuda_version == -1: if cuda_version == -1:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment