Commit 97f2f4e9 authored by quyuanhao123's avatar quyuanhao123
Browse files

Initial commit

parents
Pipeline #189 failed with stages
in 0 seconds
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/spspmm_cpu.h"
#ifdef WITH_HIP
#include "hip/spspmm_hip.h"
#endif
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_HIP
PyMODINIT_FUNC PyInit__spspmm_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__spspmm_cpu(void) { return NULL; }
#endif
#endif
#endif
SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>>
spspmm_sum(torch::Tensor rowptrA, torch::Tensor colA,
torch::optional<torch::Tensor> optional_valueA,
torch::Tensor rowptrB, torch::Tensor colB,
torch::optional<torch::Tensor> optional_valueB, int64_t K) {
if (rowptrA.device().is_cuda()) {
#ifdef WITH_HIP
return spspmm_cuda(rowptrA, colA, optional_valueA, rowptrB, colB,
optional_valueB, K, "sum");
#else
AT_ERROR("Not compiled with CUDA support");
#endif
} else {
return spspmm_cpu(rowptrA, colA, optional_valueA, rowptrB, colB,
optional_valueB, K, "sum");
}
}
static auto registry =
torch::RegisterOperators().op("torch_sparse::spspmm_sum", &spspmm_sum);
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "sparse.h"
#ifdef WITH_HIP
#include <hip/hip_runtime.h>
#endif
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_HIP
PyMODINIT_FUNC PyInit__version_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__version_cpu(void) { return NULL; }
#endif
#endif
#endif
SPARSE_API int64_t cuda_version() {
#ifdef WITH_HIP
return TORCH_HIP_VERSION;
#else
return -1;
#endif
}
static auto registry =
torch::RegisterOperators().op("torch_sparse::cuda_version", &cuda_version);
#!/bin/bash
source ~/miniconda3/etc/profile.d/conda.sh
conda activate torch1.10_py39_dtk22.10
module purge
module load compiler/devtoolset/7.3.1 mpi/hpcx/gcc-7.3.1 #compiler/dtk/22.10.1
module list
source ~/dtk-22.10.1/env.sh
export C_INCLUDE_PATH=/public/software/apps/DeepLearning/PyTorch_Lib/gflags-2.1.2-build/include:$C_INCLUDE_PATH
export CPLUS_INCLUDE_PATH=/public/software/apps/DeepLearning/PyTorch_Lib/gflags-2.1.2-build/include:$CPLUS_INCLUDE_PATH
export C_INCLUDE_PATH=/public/software/apps/DeepLearning/PyTorch_Lib/glog-build/include:$C_INCLUDE_PATH
export CPLUS_INCLUDE_PATH=/public/software/apps/DeepLearning/PyTorch_Lib/glog-build/include:$CPLUS_INCLUDE_PATH
export C_INCLUDE_PATH=$ROCM_PATH/rocrand/include:$C_INCLUDE_PATH
export CPLUS_INCLUDE_PATH=$ROCM_PATH/rocrand/include:$CPLUS_INCLUDE_PATH
export LD_LIBRARY_PATH=$ROCM_PATH/rocrand/lib:$LD_LIBRARY_PATH
export FORCE_ONLY_HIP=1
export CC=hipcc
export CXX=hipcc
[metadata]
long_description = file: README.md
long_description_content_type = text/markdown
classifiers =
Development Status :: 5 - Production/Stable
License :: OSI Approved :: MIT License
Programming Language :: Python
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3 :: Only
[aliases]
test = pytest
[tool:pytest]
addopts = --capture=no
[egg_info]
tag_build =
tag_date = 0
import glob
import os
import os.path as osp
import platform
import sys
from itertools import product
import torch
from setuptools import find_packages, setup
from torch.__config__ import parallel_info
from torch.utils.cpp_extension import (CUDA_HOME, BuildExtension, CppExtension,
CUDAExtension)
__version__ = '0.6.13'
URL = 'https://github.com/rusty1s/pytorch_sparse'
WITH_HIP = torch.cuda.is_available() and CUDA_HOME is not None
suffices = ['cpu', 'cuda'] if WITH_HIP else ['cpu']
if os.getenv('FORCE_CUDA', '0') == '1':
suffices = ['cuda', 'cpu']
if os.getenv('FORCE_ONLY_HIP', '0') == '1':
suffices = ['hip']
if os.getenv('FORCE_ONLY_CPU', '0') == '1':
suffices = ['cpu']
ROCM_PATH = os.getenv('ROCM_PATH')
HIPLIB2 = osp.join(ROCM_PATH, 'hiprand', 'include')
HIPLIB1 = osp.join(ROCM_PATH, 'hipsparse', 'include')
BUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1'
WITH_METIS = True if os.getenv('WITH_METIS', '0') == '1' else False
WITH_MTMETIS = True if os.getenv('WITH_MTMETIS', '0') == '1' else False
WITH_SYMBOLS = True if os.getenv('WITH_SYMBOLS', '0') == '1' else False
def get_extensions():
extensions = []
extensions_dir = osp.join('csrc')
main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))
for main, suffix in product(main_files, suffices):
define_macros = [('WITH_PYTHON', None)]
if sys.platform == 'win32':
define_macros += [('torchsparse_EXPORTS', None)]
libraries = []
if WITH_METIS:
define_macros += [('WITH_METIS', None)]
libraries += ['metis']
if WITH_MTMETIS:
define_macros += [('WITH_MTMETIS', None)]
define_macros += [('MTMETIS_64BIT_VERTICES', None)]
define_macros += [('MTMETIS_64BIT_EDGES', None)]
define_macros += [('MTMETIS_64BIT_WEIGHTS', None)]
define_macros += [('MTMETIS_64BIT_PARTITIONS', None)]
libraries += ['mtmetis', 'wildriver']
extra_compile_args = {'cxx': ['-O2']}
if not os.name == 'nt': # Not on Windows:
extra_compile_args['cxx'] += ['-Wno-sign-compare']
extra_link_args = [] if WITH_SYMBOLS else ['-s']
info = parallel_info()
if ('backend: OpenMP' in info and 'OpenMP not found' not in info
and sys.platform != 'darwin'):
extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP']
if sys.platform == 'win32':
extra_compile_args['cxx'] += ['/openmp']
else:
extra_compile_args['cxx'] += ['-fopenmp']
else:
print('Compiling without OpenMP...')
# Compile for mac arm64
if (sys.platform == 'darwin' and platform.machine() == 'arm64'):
extra_compile_args['cxx'] += ['-arch', 'arm64']
extra_link_args += ['-arch', 'arm64']
if suffix == 'hip':
define_macros += [('WITH_HIP', None)]
hipcc_flags = os.getenv('HIPCC_FLAGS', '')
hipcc_flags = [] if hipcc_flags == '' else hipcc_flags.split(' ')
hipcc_flags += ['--expt-relaxed-constexpr', '-O2']
extra_compile_args['hipcc'] = hipcc_flags
if sys.platform == 'win32':
extra_link_args += ['hipsparse.lib']
else:
extra_link_args += ['-lhipsparse', '-l', 'hipsparse']
extra_link_args += ['-fopenmp','-lomp']
name = main.split(os.sep)[-1][:-4]
sources = [main]
path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp')
if osp.exists(path):
sources += [path]
path = osp.join(extensions_dir, 'hip', f'{name}_hip.hip')
if suffix == 'hip' and osp.exists(path):
sources += [path]
Extension = CppExtension if suffix == 'cpu' else CUDAExtension
define_macros += [('TORCH_HIP_VERSION', 10000), ('__HIP__', None), ('__HCC__', None)]
extension = Extension(
f'torch_sparse._{name}_{suffix}',
sources,
include_dirs=[extensions_dir, HIPLIB1, HIPLIB2],
define_macros=define_macros,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=libraries,
)
extensions += [extension]
return extensions
install_requires = [
'scipy',
]
test_requires = [
'pytest',
'pytest-cov',
]
setup(
name='torch_sparse',
version=__version__,
description=('PyTorch Extension Library of Optimized Autograd Sparse '
'Matrix Operations'),
author='Matthias Fey',
author_email='matthias.fey@tu-dortmund.de',
url=URL,
download_url=f'{URL}/archive/{__version__}.tar.gz',
keywords=[
'pytorch',
'sparse',
'sparse-matrices',
'autograd',
],
python_requires='>=3.7',
install_requires=install_requires,
extras_require={
'test': test_requires,
},
ext_modules=get_extensions() if not BUILD_DOCS else [],
cmdclass={
'build_ext':
BuildExtension.with_options(no_python_abi_suffix=True, use_ninja=False)
},
packages=find_packages(),
include_package_data=False,
)
This diff is collapsed.
This diff is collapsed.
scipy
[test]
pytest
pytest-cov
import importlib
import os.path as osp
import torch
__version__ = '0.6.13'
for library in [
'_version', '_convert', '_diag', '_spmm', '_spspmm', '_metis', '_rw',
'_saint', '_sample', '_ego_sample', '_hgt_sample', '_neighbor_sample',
'_relabel'
]:
hip_spec = importlib.machinery.PathFinder().find_spec(
f'{library}_hip', [osp.dirname(__file__)])
cpu_spec = importlib.machinery.PathFinder().find_spec(
f'{library}_cpu', [osp.dirname(__file__)])
spec = hip_spec or cpu_spec
if spec is not None:
torch.ops.load_library(spec.origin)
else: # pragma: no cover
raise ImportError(f"Could not find module '{library}_cpu' in "
f"{osp.dirname(__file__)}")
cuda_version = torch.ops.torch_sparse.cuda_version()
if torch.cuda.is_available() and cuda_version != -1: # pragma: no cover
if cuda_version < 10000:
major, minor = int(str(cuda_version)[0]), int(str(cuda_version)[2])
else:
major, minor = int(str(cuda_version)[0:2]), int(str(cuda_version)[3])
from .storage import SparseStorage # noqa
from .tensor import SparseTensor # noqa
from .transpose import t # noqa
from .narrow import narrow, __narrow_diag__ # noqa
from .select import select # noqa
from .index_select import index_select, index_select_nnz # noqa
from .masked_select import masked_select, masked_select_nnz # noqa
from .permute import permute # noqa
from .diag import remove_diag, set_diag, fill_diag, get_diag # noqa
from .add import add, add_, add_nnz, add_nnz_ # noqa
from .mul import mul, mul_, mul_nnz, mul_nnz_ # noqa
from .reduce import sum, mean, min, max # noqa
from .matmul import matmul # noqa
from .cat import cat # noqa
from .rw import random_walk # noqa
from .metis import partition # noqa
from .bandwidth import reverse_cuthill_mckee # noqa
from .saint import saint_subgraph # noqa
from .padding import padded_index, padded_index_select # noqa
from .sample import sample, sample_adj # noqa
from .convert import to_torch_sparse, from_torch_sparse # noqa
from .convert import to_scipy, from_scipy # noqa
from .coalesce import coalesce # noqa
from .transpose import transpose # noqa
from .eye import eye # noqa
from .spmm import spmm # noqa
from .spspmm import spspmm # noqa
from .spadd import spadd # noqa
__all__ = [
'SparseStorage',
'SparseTensor',
't',
'narrow',
'__narrow_diag__',
'select',
'index_select',
'index_select_nnz',
'masked_select',
'masked_select_nnz',
'permute',
'remove_diag',
'set_diag',
'fill_diag',
'get_diag',
'add',
'add_',
'add_nnz',
'add_nnz_',
'mul',
'mul_',
'mul_nnz',
'mul_nnz_',
'sum',
'mean',
'min',
'max',
'matmul',
'cat',
'random_walk',
'partition',
'reverse_cuthill_mckee',
'saint_subgraph',
'padded_index',
'padded_index_select',
'to_torch_sparse',
'from_torch_sparse',
'to_scipy',
'from_scipy',
'coalesce',
'transpose',
'eye',
'spmm',
'spspmm',
'spadd',
'__version__',
]
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment