Commit c5f5be51 authored by rusty1s's avatar rusty1s
Browse files

implementing convert

parent 3c6dbfa1
#pragma once
#include <torch/extension.h>
#define CHECK_CUDA(x) \
AT_ASSERTM(x.device().is_cuda(), #x " must be CUDA tensor")
#define CHECK_INPUT(x) AT_ASSERTM(x, "Input mismatch")
#include <torch/script.h>
#define CHECK_CUDA(x) \
AT_ASSERTM(x.device().is_cuda(), #x " must be CUDA tensor")
torch::Tensor ind2ptr_cuda(torch::Tensor ind, int64_t M);
torch::Tensor ptr2ind_cuda(torch::Tensor ptr, int64_t E);
torch::Tensor ind2ptr(torch::Tensor ind, int64_t M) {
CHECK_CUDA(ind);
return ind2ptr_cuda(ind, M);
}
torch::Tensor ptr2ind(torch::Tensor ptr, int64_t E) {
CHECK_CUDA(ptr);
return ptr2ind_cuda(ptr, E);
}
static auto registry =
torch::RegisterOperators("torch_sparse_cuda::ind2ptr", &ind2ptr)
.op("torch_sparse_cuda::ptr2ind", &ptr2ind);
import platform
import os
import os.path as osp
from glob import glob
import sys
import glob
from setuptools import setup, find_packages
from sys import argv
import torch
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CppExtension, CUDAExtension, CUDA_HOME
cxx_extra_compile_args = []
nvcc_extra_compile_args = ['-arch=sm_35', '--expt-relaxed-constexpr']
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
extra_compile_args = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
cxx_extra_compile_args += ['-DVERSION_GE_1_3']
nvcc_extra_compile_args += ['-DVERSION_GE_1_3']
cmdclass = {
'build_ext': BuildExtension.with_options(no_python_abi_suffix=True)
}
ext_modules = []
exts = [e.split(osp.sep)[-1][:-4] for e in glob(osp.join('cpu', '*.cpp'))]
ext_modules += [
CppExtension(f'torch_sparse.{ext}_cpu', [f'cpu/{ext}.cpp'],
extra_compile_args=cxx_extra_compile_args) for ext in exts
]
if CUDA_HOME is not None and '--cpu' not in argv:
if platform.system() == 'Windows':
extra_link_args = ['cusparse.lib']
else:
extra_link_args = ['-lcusparse', '-l', 'cusparse']
exts = [e.split(osp.sep)[-1][:-4] for e in glob(osp.join('cuda', '*.cpp'))]
ext_modules += [
CUDAExtension(
f'torch_sparse.{ext}_cuda',
[f'cuda/{ext}.cpp', f'cuda/{ext}_kernel.cu'],
extra_compile_args={
'cxx': cxx_extra_compile_args,
'nvcc': nvcc_extra_compile_args,
},
WITH_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
if os.getenv('FORCE_CUDA', '0') == '1':
WITH_CUDA = True
if os.getenv('FORCE_NON_CUDA', '0') == '1':
WITH_CUDA = False
BUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1'
def get_extensions():
Extension = CppExtension
define_macros = []
extra_compile_args = {'cxx': [], 'nvcc': []}
extra_link_args = []
# Windows users: Edit both of these to contain your VS include path, i.e.:
# extra_compile_args['cxx'] += ['-I{VISUAL_STUDIO_DIR}\\include']
# extra_compile_args['nvcc'] += ['-I{VISUAL_STUDIO_DIR}\\include']
if WITH_CUDA:
Extension = CUDAExtension
define_macros += [('WITH_CUDA', None)]
nvcc_flags = os.getenv('NVCC_FLAGS', '')
nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ')
nvcc_flags += ['-arch=sm_35', '--expt-relaxed-constexpr']
extra_compile_args['cxx'] += ['-O0']
extra_compile_args['nvcc'] += nvcc_flags
if sys.platform == 'win32':
extra_link_args = ['cusparse.lib']
else:
extra_link_args = ['-lcusparse', '-l', 'cusparse']
if sys.platform == 'win32':
extra_compile_args['cxx'] += ['/MP']
extensions_dir = osp.join(osp.dirname(osp.abspath(__file__)), 'csrc')
main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))
extensions = []
for main in main_files:
name = main.split(os.sep)[-1][:-4]
sources = [main, osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp')]
if WITH_CUDA:
sources += [osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu')]
extension = Extension(
f'torch_sparse._{name}',
sources,
include_dirs=[extensions_dir],
define_macros=define_macros,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
) for ext in exts
]
if '--cpu' in argv:
argv.remove('--cpu')
)
extensions += [extension]
return extensions
__version__ = '0.4.3'
url = 'https://github.com/rusty1s/pytorch_sparse'
__version__ = '1.0.0'
install_requires = ['scipy']
setup_requires = ['pytest-runner']
......@@ -58,18 +75,20 @@ tests_require = ['pytest', 'pytest-cov']
setup(
name='torch_sparse',
version=__version__,
description=('PyTorch Extension Library of Optimized Autograd Sparse '
'Matrix Operations'),
version='1.0.0',
author='Matthias Fey',
author_email='matthias.fey@tu-dortmund.de',
url=url,
download_url='{}/archive/{}.tar.gz'.format(url, __version__),
url='https://github.com/rusty1s/pytorch_sparse',
description=('PyTorch Extension Library of Optimized Autograd Sparse '
'Matrix Operations'),
keywords=['pytorch', 'sparse', 'sparse-matrices', 'autograd'],
license='MIT',
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
ext_modules=ext_modules,
cmdclass=cmdclass,
ext_modules=get_extensions() if not BUILD_DOCS else [],
cmdclass={
'build_ext': BuildExtension.with_options(no_python_abi_suffix=True)
},
packages=find_packages(),
)
from typing import Optional
import torch
import torch_scatter
from torch_scatter import scatter, segment_csr
from torch_sparse.tensor import SparseTensor
......@@ -32,7 +30,6 @@ def reduction(src: SparseTensor, dim: Optional[int] = None,
return torch.tensor(1, dtype=src.dtype(), device=src.device())
else:
raise ValueError
else:
if dim < 0:
dim = src.dim() + dim
......@@ -67,7 +64,6 @@ def reduction(src: SparseTensor, dim: Optional[int] = None,
return value.max(dim=dim - 1)[0]
else:
raise ValueError
else:
raise ValueError
......
import warnings
import os.path as osp
from typing import Optional, List
import torch
from torch_scatter import segment_csr, scatter_add
from torch_sparse.utils import Final
try:
torch.ops.load_library(
osp.join(osp.dirname(osp.abspath(__file__)), '_convert.so'))
except OSError:
warnings.warn('Failed to load `convert` binaries.')
def ind2ptr_placeholder(ind: torch.Tensor, M: int) -> torch.Tensor:
raise ImportError
return ind
def ptr2ind_placeholder(ptr: torch.Tensor, E: int) -> torch.Tensor:
raise ImportError
return ptr
torch.ops.torch_sparse.ind2ptr = ind2ptr_placeholder
torch.ops.torch_sparse.ptr2ind = ptr2ind_placeholder
layouts: Final[List[str]] = ['coo', 'csr', 'csc']
......@@ -147,16 +165,7 @@ class SparseStorage(object):
rowptr = self._rowptr
if rowptr is not None:
if rowptr.is_cuda:
row = torch.ops.torch_sparse_cuda.ptr2ind(
rowptr, self._col.numel())
else:
if rowptr.is_cuda:
row = torch.ops.torch_sparse_cuda.ptr2ind(
rowptr, self._col.numel())
else:
row = torch.ops.torch_sparse_cpu.ptr2ind(
rowptr, self._col.numel())
row = torch.ops.torch_sparse.ptr2ind(rowptr, self._col.numel())
self._row = row
return row
......@@ -172,12 +181,7 @@ class SparseStorage(object):
row = self._row
if row is not None:
if row.is_cuda:
rowptr = torch.ops.torch_sparse_cuda.ind2ptr(
row, self._sparse_sizes[0])
else:
rowptr = torch.ops.torch_sparse_cpu.ind2ptr(
row, self._sparse_sizes[0])
rowptr = torch.ops.torch_sparse.ind2ptr(row, self._sparse_sizes[0])
self._rowptr = rowptr
return rowptr
......@@ -284,8 +288,8 @@ class SparseStorage(object):
csr2csc = self._csr2csc
if csr2csc is not None:
colptr = torch.ops.torch_sparse_cpu.ind2ptr(
self._col[csr2csc], self._sparse_sizes[1])
colptr = torch.ops.torch_sparse.ind2ptr(self._col[csr2csc],
self._sparse_sizes[1])
else:
colptr = self._col.new_zeros(self._sparse_sizes[1] + 1)
torch.cumsum(self.colcount(), dim=0, out=colptr[1:])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment