Commit 97f2f4e9 authored by quyuanhao123's avatar quyuanhao123
Browse files

Initial commit

parents
Pipeline #189 failed with stages
in 0 seconds
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/spspmm_cpu.h"
#ifdef WITH_HIP
#include "hip/spspmm_hip.h"
#endif
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_HIP
PyMODINIT_FUNC PyInit__spspmm_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__spspmm_cpu(void) { return NULL; }
#endif
#endif
#endif
SPARSE_API std::tuple<torch::Tensor, torch::Tensor, torch::optional<torch::Tensor>>
spspmm_sum(torch::Tensor rowptrA, torch::Tensor colA,
torch::optional<torch::Tensor> optional_valueA,
torch::Tensor rowptrB, torch::Tensor colB,
torch::optional<torch::Tensor> optional_valueB, int64_t K) {
if (rowptrA.device().is_cuda()) {
#ifdef WITH_HIP
return spspmm_cuda(rowptrA, colA, optional_valueA, rowptrB, colB,
optional_valueB, K, "sum");
#else
AT_ERROR("Not compiled with CUDA support");
#endif
} else {
return spspmm_cpu(rowptrA, colA, optional_valueA, rowptrB, colB,
optional_valueB, K, "sum");
}
}
static auto registry =
torch::RegisterOperators().op("torch_sparse::spspmm_sum", &spspmm_sum);
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "sparse.h"
#ifdef WITH_HIP
#include <hip/hip_runtime.h>
#endif
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_HIP
PyMODINIT_FUNC PyInit__version_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__version_cpu(void) { return NULL; }
#endif
#endif
#endif
SPARSE_API int64_t cuda_version() {
#ifdef WITH_HIP
return TORCH_HIP_VERSION;
#else
return -1;
#endif
}
static auto registry =
torch::RegisterOperators().op("torch_sparse::cuda_version", &cuda_version);
#!/bin/bash
source ~/miniconda3/etc/profile.d/conda.sh
conda activate torch1.10_py39_dtk22.10
module purge
module load compiler/devtoolset/7.3.1 mpi/hpcx/gcc-7.3.1 #compiler/dtk/22.10.1
module list
source ~/dtk-22.10.1/env.sh
export C_INCLUDE_PATH=/public/software/apps/DeepLearning/PyTorch_Lib/gflags-2.1.2-build/include:$C_INCLUDE_PATH
export CPLUS_INCLUDE_PATH=/public/software/apps/DeepLearning/PyTorch_Lib/gflags-2.1.2-build/include:$CPLUS_INCLUDE_PATH
export C_INCLUDE_PATH=/public/software/apps/DeepLearning/PyTorch_Lib/glog-build/include:$C_INCLUDE_PATH
export CPLUS_INCLUDE_PATH=/public/software/apps/DeepLearning/PyTorch_Lib/glog-build/include:$CPLUS_INCLUDE_PATH
export C_INCLUDE_PATH=$ROCM_PATH/rocrand/include:$C_INCLUDE_PATH
export CPLUS_INCLUDE_PATH=$ROCM_PATH/rocrand/include:$CPLUS_INCLUDE_PATH
export LD_LIBRARY_PATH=$ROCM_PATH/rocrand/lib:$LD_LIBRARY_PATH
export FORCE_ONLY_HIP=1
export CC=hipcc
export CXX=hipcc
[metadata]
long_description = file: README.md
long_description_content_type = text/markdown
classifiers =
Development Status :: 5 - Production/Stable
License :: OSI Approved :: MIT License
Programming Language :: Python
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3 :: Only
[aliases]
test = pytest
[tool:pytest]
addopts = --capture=no
[egg_info]
tag_build =
tag_date = 0
import glob
import os
import os.path as osp
import platform
import sys
from itertools import product
import torch
from setuptools import find_packages, setup
from torch.__config__ import parallel_info
from torch.utils.cpp_extension import (CUDA_HOME, BuildExtension, CppExtension,
CUDAExtension)
__version__ = '0.6.13'
URL = 'https://github.com/rusty1s/pytorch_sparse'
WITH_HIP = torch.cuda.is_available() and CUDA_HOME is not None
suffices = ['cpu', 'cuda'] if WITH_HIP else ['cpu']
if os.getenv('FORCE_CUDA', '0') == '1':
suffices = ['cuda', 'cpu']
if os.getenv('FORCE_ONLY_HIP', '0') == '1':
suffices = ['hip']
if os.getenv('FORCE_ONLY_CPU', '0') == '1':
suffices = ['cpu']
ROCM_PATH = os.getenv('ROCM_PATH')
HIPLIB2 = osp.join(ROCM_PATH, 'hiprand', 'include')
HIPLIB1 = osp.join(ROCM_PATH, 'hipsparse', 'include')
BUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1'
WITH_METIS = True if os.getenv('WITH_METIS', '0') == '1' else False
WITH_MTMETIS = True if os.getenv('WITH_MTMETIS', '0') == '1' else False
WITH_SYMBOLS = True if os.getenv('WITH_SYMBOLS', '0') == '1' else False
def get_extensions():
extensions = []
extensions_dir = osp.join('csrc')
main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))
for main, suffix in product(main_files, suffices):
define_macros = [('WITH_PYTHON', None)]
if sys.platform == 'win32':
define_macros += [('torchsparse_EXPORTS', None)]
libraries = []
if WITH_METIS:
define_macros += [('WITH_METIS', None)]
libraries += ['metis']
if WITH_MTMETIS:
define_macros += [('WITH_MTMETIS', None)]
define_macros += [('MTMETIS_64BIT_VERTICES', None)]
define_macros += [('MTMETIS_64BIT_EDGES', None)]
define_macros += [('MTMETIS_64BIT_WEIGHTS', None)]
define_macros += [('MTMETIS_64BIT_PARTITIONS', None)]
libraries += ['mtmetis', 'wildriver']
extra_compile_args = {'cxx': ['-O2']}
if not os.name == 'nt': # Not on Windows:
extra_compile_args['cxx'] += ['-Wno-sign-compare']
extra_link_args = [] if WITH_SYMBOLS else ['-s']
info = parallel_info()
if ('backend: OpenMP' in info and 'OpenMP not found' not in info
and sys.platform != 'darwin'):
extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP']
if sys.platform == 'win32':
extra_compile_args['cxx'] += ['/openmp']
else:
extra_compile_args['cxx'] += ['-fopenmp']
else:
print('Compiling without OpenMP...')
# Compile for mac arm64
if (sys.platform == 'darwin' and platform.machine() == 'arm64'):
extra_compile_args['cxx'] += ['-arch', 'arm64']
extra_link_args += ['-arch', 'arm64']
if suffix == 'hip':
define_macros += [('WITH_HIP', None)]
hipcc_flags = os.getenv('HIPCC_FLAGS', '')
hipcc_flags = [] if hipcc_flags == '' else hipcc_flags.split(' ')
hipcc_flags += ['--expt-relaxed-constexpr', '-O2']
extra_compile_args['hipcc'] = hipcc_flags
if sys.platform == 'win32':
extra_link_args += ['hipsparse.lib']
else:
extra_link_args += ['-lhipsparse', '-l', 'hipsparse']
extra_link_args += ['-fopenmp','-lomp']
name = main.split(os.sep)[-1][:-4]
sources = [main]
path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp')
if osp.exists(path):
sources += [path]
path = osp.join(extensions_dir, 'hip', f'{name}_hip.hip')
if suffix == 'hip' and osp.exists(path):
sources += [path]
Extension = CppExtension if suffix == 'cpu' else CUDAExtension
define_macros += [('TORCH_HIP_VERSION', 10000), ('__HIP__', None), ('__HCC__', None)]
extension = Extension(
f'torch_sparse._{name}_{suffix}',
sources,
include_dirs=[extensions_dir, HIPLIB1, HIPLIB2],
define_macros=define_macros,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=libraries,
)
extensions += [extension]
return extensions
install_requires = [
'scipy',
]
test_requires = [
'pytest',
'pytest-cov',
]
setup(
name='torch_sparse',
version=__version__,
description=('PyTorch Extension Library of Optimized Autograd Sparse '
'Matrix Operations'),
author='Matthias Fey',
author_email='matthias.fey@tu-dortmund.de',
url=URL,
download_url=f'{URL}/archive/{__version__}.tar.gz',
keywords=[
'pytorch',
'sparse',
'sparse-matrices',
'autograd',
],
python_requires='>=3.7',
install_requires=install_requires,
extras_require={
'test': test_requires,
},
ext_modules=get_extensions() if not BUILD_DOCS else [],
cmdclass={
'build_ext':
BuildExtension.with_options(no_python_abi_suffix=True, use_ninja=False)
},
packages=find_packages(),
include_package_data=False,
)
Metadata-Version: 2.1
Name: torch-sparse
Version: 0.6.13
Summary: PyTorch Extension Library of Optimized Autograd Sparse Matrix Operations
Home-page: https://github.com/rusty1s/pytorch_sparse
Download-URL: https://github.com/rusty1s/pytorch_sparse/archive/0.6.13.tar.gz
Author: Matthias Fey
Author-email: matthias.fey@tu-dortmund.de
Keywords: pytorch,sparse,sparse-matrices,autograd
Classifier: Development Status :: 5 - Production/Stable
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3 :: Only
Requires-Python: >=3.7
Description-Content-Type: text/markdown
Provides-Extra: test
License-File: LICENSE
[pypi-image]: https://badge.fury.io/py/torch-sparse.svg
[pypi-url]: https://pypi.python.org/pypi/torch-sparse
[testing-image]: https://github.com/rusty1s/pytorch_sparse/actions/workflows/testing.yml/badge.svg
[testing-url]: https://github.com/rusty1s/pytorch_sparse/actions/workflows/testing.yml
[linting-image]: https://github.com/rusty1s/pytorch_sparse/actions/workflows/linting.yml/badge.svg
[linting-url]: https://github.com/rusty1s/pytorch_sparse/actions/workflows/linting.yml
[coverage-image]: https://codecov.io/gh/rusty1s/pytorch_sparse/branch/master/graph/badge.svg
[coverage-url]: https://codecov.io/github/rusty1s/pytorch_sparse?branch=master
# PyTorch Sparse
[![PyPI Version][pypi-image]][pypi-url]
[![Testing Status][testing-image]][testing-url]
[![Linting Status][linting-image]][linting-url]
[![Code Coverage][coverage-image]][coverage-url]
--------------------------------------------------------------------------------
This package consists of a small extension library of optimized sparse matrix operations with autograd support.
This package currently consists of the following methods:
* **[Coalesce](#coalesce)**
* **[Transpose](#transpose)**
* **[Sparse Dense Matrix Multiplication](#sparse-dense-matrix-multiplication)**
* **[Sparse Sparse Matrix Multiplication](#sparse-sparse-matrix-multiplication)**
All included operations work on varying data types and are implemented both for CPU and GPU.
To avoid the hazzle of creating [`torch.sparse_coo_tensor`](https://pytorch.org/docs/stable/torch.html?highlight=sparse_coo_tensor#torch.sparse_coo_tensor), this package defines operations on sparse tensors by simply passing `index` and `value` tensors as arguments ([with same shapes as defined in PyTorch](https://pytorch.org/docs/stable/sparse.html)).
Note that only `value` comes with autograd support, as `index` is discrete and therefore not differentiable.
## Installation
### Anaconda
**Update:** You can now install `pytorch-sparse` via [Anaconda](https://anaconda.org/pyg/pytorch-sparse) for all major OS/PyTorch/CUDA combinations 🤗
Given that you have [`pytorch >= 1.8.0` installed](https://pytorch.org/get-started/locally/), simply run
```
conda install pytorch-sparse -c pyg
```
### Binaries
We alternatively provide pip wheels for all major OS/PyTorch/CUDA combinations, see [here](https://data.pyg.org/whl).
#### PyTorch 1.11
To install the binaries for PyTorch 1.11.0, simply run
```
pip install torch-scatter torch-sparse -f https://data.pyg.org/whl/torch-1.11.0+${CUDA}.html
```
where `${CUDA}` should be replaced by either `cpu`, `cu102`, `cu113`, or `cu115` depending on your PyTorch installation.
| | `cpu` | `cu102` | `cu113` | `cu115` |
|-------------|-------|---------|---------|---------|
| **Linux** | ✅ | ✅ | ✅ | ✅ |
| **Windows** | ✅ | | ✅ | ✅ |
| **macOS** | ✅ | | | |
#### PyTorch 1.10
To install the binaries for PyTorch 1.10.0, PyTorch 1.10.1 and PyTorch 1.10.2, simply run
```
pip install torch-scatter torch-sparse -f https://data.pyg.org/whl/torch-1.10.0+${CUDA}.html
```
where `${CUDA}` should be replaced by either `cpu`, `cu102`, `cu111`, or `cu113` depending on your PyTorch installation.
| | `cpu` | `cu102` | `cu111` | `cu113` |
|-------------|-------|---------|---------|---------|
| **Linux** | ✅ | ✅ | ✅ | ✅ |
| **Windows** | ✅ | ✅ | ✅ | ✅ |
| **macOS** | ✅ | | | |
**Note:** Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1 and PyTorch 1.9.0 (following the same procedure).
For older versions, you might need to explicitly specify the latest supported version number in order to prevent a manual installation from source.
You can look up the latest supported version number [here](https://data.pyg.org/whl).
### From source
Ensure that at least PyTorch 1.7.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*:
```
$ python -c "import torch; print(torch.__version__)"
>>> 1.7.0
$ echo $PATH
>>> /usr/local/cuda/bin:...
$ echo $CPATH
>>> /usr/local/cuda/include:...
```
If you want to additionally build `torch-sparse` with METIS support, *e.g.* for partioning, please download and install the [METIS library](http://glaros.dtc.umn.edu/gkhome/metis/metis/download) by following the instructions in the `Install.txt` file.
Note that METIS needs to be installed with 64 bit `IDXTYPEWIDTH` by changing `include/metis.h`.
Afterwards, set the environment variable `WITH_METIS=1`.
Then run:
```
pip install torch-scatter torch-sparse
```
When running in a docker container without NVIDIA driver, PyTorch needs to evaluate the compute capabilities and may fail.
In this case, ensure that the compute capabilities are set via `TORCH_CUDA_ARCH_LIST`, *e.g.*:
```
export TORCH_CUDA_ARCH_LIST="6.0 6.1 7.2+PTX 7.5+PTX"
```
## Functions
### Coalesce
```
torch_sparse.coalesce(index, value, m, n, op="add") -> (torch.LongTensor, torch.Tensor)
```
Row-wise sorts `index` and removes duplicate entries.
Duplicate entries are removed by scattering them together.
For scattering, any operation of [`torch_scatter`](https://github.com/rusty1s/pytorch_scatter) can be used.
#### Parameters
* **index** *(LongTensor)* - The index tensor of sparse matrix.
* **value** *(Tensor)* - The value tensor of sparse matrix.
* **m** *(int)* - The first dimension of sparse matrix.
* **n** *(int)* - The second dimension of sparse matrix.
* **op** *(string, optional)* - The scatter operation to use. (default: `"add"`)
#### Returns
* **index** *(LongTensor)* - The coalesced index tensor of sparse matrix.
* **value** *(Tensor)* - The coalesced value tensor of sparse matrix.
#### Example
```python
import torch
from torch_sparse import coalesce
index = torch.tensor([[1, 0, 1, 0, 2, 1],
[0, 1, 1, 1, 0, 0]])
value = torch.Tensor([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7]])
index, value = coalesce(index, value, m=3, n=2)
```
```
print(index)
tensor([[0, 1, 1, 2],
[1, 0, 1, 0]])
print(value)
tensor([[6.0, 8.0],
[7.0, 9.0],
[3.0, 4.0],
[5.0, 6.0]])
```
### Transpose
```
torch_sparse.transpose(index, value, m, n) -> (torch.LongTensor, torch.Tensor)
```
Transposes dimensions 0 and 1 of a sparse matrix.
#### Parameters
* **index** *(LongTensor)* - The index tensor of sparse matrix.
* **value** *(Tensor)* - The value tensor of sparse matrix.
* **m** *(int)* - The first dimension of sparse matrix.
* **n** *(int)* - The second dimension of sparse matrix.
* **coalesced** *(bool, optional)* - If set to `False`, will not coalesce the output. (default: `True`)
#### Returns
* **index** *(LongTensor)* - The transposed index tensor of sparse matrix.
* **value** *(Tensor)* - The transposed value tensor of sparse matrix.
#### Example
```python
import torch
from torch_sparse import transpose
index = torch.tensor([[1, 0, 1, 0, 2, 1],
[0, 1, 1, 1, 0, 0]])
value = torch.Tensor([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7]])
index, value = transpose(index, value, 3, 2)
```
```
print(index)
tensor([[0, 0, 1, 1],
[1, 2, 0, 1]])
print(value)
tensor([[7.0, 9.0],
[5.0, 6.0],
[6.0, 8.0],
[3.0, 4.0]])
```
### Sparse Dense Matrix Multiplication
```
torch_sparse.spmm(index, value, m, n, matrix) -> torch.Tensor
```
Matrix product of a sparse matrix with a dense matrix.
#### Parameters
* **index** *(LongTensor)* - The index tensor of sparse matrix.
* **value** *(Tensor)* - The value tensor of sparse matrix.
* **m** *(int)* - The first dimension of sparse matrix.
* **n** *(int)* - The second dimension of sparse matrix.
* **matrix** *(Tensor)* - The dense matrix.
#### Returns
* **out** *(Tensor)* - The dense output matrix.
#### Example
```python
import torch
from torch_sparse import spmm
index = torch.tensor([[0, 0, 1, 2, 2],
[0, 2, 1, 0, 1]])
value = torch.Tensor([1, 2, 4, 1, 3])
matrix = torch.Tensor([[1, 4], [2, 5], [3, 6]])
out = spmm(index, value, 3, 3, matrix)
```
```
print(out)
tensor([[7.0, 16.0],
[8.0, 20.0],
[7.0, 19.0]])
```
### Sparse Sparse Matrix Multiplication
```
torch_sparse.spspmm(indexA, valueA, indexB, valueB, m, k, n) -> (torch.LongTensor, torch.Tensor)
```
Matrix product of two sparse tensors.
Both input sparse matrices need to be **coalesced** (use the `coalesced` attribute to force).
#### Parameters
* **indexA** *(LongTensor)* - The index tensor of first sparse matrix.
* **valueA** *(Tensor)* - The value tensor of first sparse matrix.
* **indexB** *(LongTensor)* - The index tensor of second sparse matrix.
* **valueB** *(Tensor)* - The value tensor of second sparse matrix.
* **m** *(int)* - The first dimension of first sparse matrix.
* **k** *(int)* - The second dimension of first sparse matrix and first dimension of second sparse matrix.
* **n** *(int)* - The second dimension of second sparse matrix.
* **coalesced** *(bool, optional)*: If set to `True`, will coalesce both input sparse matrices. (default: `False`)
#### Returns
* **index** *(LongTensor)* - The output index tensor of sparse matrix.
* **value** *(Tensor)* - The output value tensor of sparse matrix.
#### Example
```python
import torch
from torch_sparse import spspmm
indexA = torch.tensor([[0, 0, 1, 2, 2], [1, 2, 0, 0, 1]])
valueA = torch.Tensor([1, 2, 3, 4, 5])
indexB = torch.tensor([[0, 2], [1, 0]])
valueB = torch.Tensor([2, 4])
indexC, valueC = spspmm(indexA, valueA, indexB, valueB, 3, 3, 2)
```
```
print(indexC)
tensor([[0, 1, 2],
[0, 1, 1]])
print(valueC)
tensor([8.0, 6.0, 8.0])
```
## C++ API
`torch-sparse` also offers a C++ API that contains C++ equivalent of python models.
```
mkdir build
cd build
# Add -DWITH_CUDA=on support for the CUDA if needed
cmake ..
make
make install
```
## Running tests
```
pytest
```
LICENSE
MANIFEST.in
README.md
setup.cfg
setup.py
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/convert.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/diag.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/ego_sample.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/hgt_sample.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/metis.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/neighbor_sample.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/relabel.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/rw.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/saint.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/sample.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/spmm.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/spspmm.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/version.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/cpu/convert_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/cpu/diag_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/cpu/ego_sample_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/cpu/hgt_sample_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/cpu/metis_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/cpu/neighbor_sample_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/cpu/relabel_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/cpu/rw_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/cpu/saint_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/cpu/sample_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/cpu/spmm_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/cpu/spspmm_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/hip/convert_hip_hip.hip
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/hip/diag_hip_hip.hip
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/hip/rw_hip_hip.hip
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/hip/spmm_hip_hip.hip
/work/home/quyuanhao123/software/test_ocp/torch_sparse-0.6.13/csrc/hip/spspmm_hip.hip
csrc/convert.cpp
csrc/diag.cpp
csrc/ego_sample.cpp
csrc/extensions.h
csrc/hgt_sample.cpp
csrc/metis.cpp
csrc/neighbor_sample.cpp
csrc/relabel.cpp
csrc/rw.cpp
csrc/saint.cpp
csrc/sample.cpp
csrc/sparse.h
csrc/spmm.cpp
csrc/spspmm.cpp
csrc/version.cpp
csrc/cpu/convert_cpu.cpp
csrc/cpu/convert_cpu.h
csrc/cpu/diag_cpu.cpp
csrc/cpu/diag_cpu.h
csrc/cpu/ego_sample_cpu.cpp
csrc/cpu/ego_sample_cpu.h
csrc/cpu/hgt_sample_cpu.cpp
csrc/cpu/hgt_sample_cpu.h
csrc/cpu/metis_cpu.cpp
csrc/cpu/metis_cpu.h
csrc/cpu/neighbor_sample_cpu.cpp
csrc/cpu/neighbor_sample_cpu.h
csrc/cpu/reducer.h
csrc/cpu/relabel_cpu.cpp
csrc/cpu/relabel_cpu.h
csrc/cpu/rw_cpu.cpp
csrc/cpu/rw_cpu.h
csrc/cpu/saint_cpu.cpp
csrc/cpu/saint_cpu.h
csrc/cpu/sample_cpu.cpp
csrc/cpu/sample_cpu.h
csrc/cpu/spmm_cpu.cpp
csrc/cpu/spmm_cpu.h
csrc/cpu/spspmm_cpu.cpp
csrc/cpu/spspmm_cpu.h
csrc/cpu/utils.h
csrc/hip/atomics.cuh
csrc/hip/convert_hip.h
csrc/hip/convert_hip.hip
csrc/hip/convert_hip_hip.hip
csrc/hip/diag_hip.h
csrc/hip/diag_hip.hip
csrc/hip/diag_hip_hip.hip
csrc/hip/reducer.cuh
csrc/hip/rw_hip.h
csrc/hip/rw_hip.hip
csrc/hip/rw_hip_hip.hip
csrc/hip/spmm_hip.h
csrc/hip/spmm_hip.hip
csrc/hip/spmm_hip_hip.hip
csrc/hip/spspmm_hip.h
csrc/hip/spspmm_hip.hip
csrc/hip/utils.cuh
torch_sparse/__init__.py
torch_sparse/add.py
torch_sparse/bandwidth.py
torch_sparse/cat.py
torch_sparse/coalesce.py
torch_sparse/convert.py
torch_sparse/diag.py
torch_sparse/eye.py
torch_sparse/index_select.py
torch_sparse/masked_select.py
torch_sparse/matmul.py
torch_sparse/metis.py
torch_sparse/mul.py
torch_sparse/narrow.py
torch_sparse/padding.py
torch_sparse/permute.py
torch_sparse/reduce.py
torch_sparse/rw.py
torch_sparse/saint.py
torch_sparse/sample.py
torch_sparse/select.py
torch_sparse/spadd.py
torch_sparse/spmm.py
torch_sparse/spspmm.py
torch_sparse/storage.py
torch_sparse/tensor.py
torch_sparse/transpose.py
torch_sparse/utils.py
torch_sparse.egg-info/PKG-INFO
torch_sparse.egg-info/SOURCES.txt
torch_sparse.egg-info/dependency_links.txt
torch_sparse.egg-info/requires.txt
torch_sparse.egg-info/top_level.txt
\ No newline at end of file
scipy
[test]
pytest
pytest-cov
import importlib
import os.path as osp
import torch
__version__ = '0.6.13'
for library in [
'_version', '_convert', '_diag', '_spmm', '_spspmm', '_metis', '_rw',
'_saint', '_sample', '_ego_sample', '_hgt_sample', '_neighbor_sample',
'_relabel'
]:
hip_spec = importlib.machinery.PathFinder().find_spec(
f'{library}_hip', [osp.dirname(__file__)])
cpu_spec = importlib.machinery.PathFinder().find_spec(
f'{library}_cpu', [osp.dirname(__file__)])
spec = hip_spec or cpu_spec
if spec is not None:
torch.ops.load_library(spec.origin)
else: # pragma: no cover
raise ImportError(f"Could not find module '{library}_cpu' in "
f"{osp.dirname(__file__)}")
cuda_version = torch.ops.torch_sparse.cuda_version()
if torch.cuda.is_available() and cuda_version != -1: # pragma: no cover
if cuda_version < 10000:
major, minor = int(str(cuda_version)[0]), int(str(cuda_version)[2])
else:
major, minor = int(str(cuda_version)[0:2]), int(str(cuda_version)[3])
from .storage import SparseStorage # noqa
from .tensor import SparseTensor # noqa
from .transpose import t # noqa
from .narrow import narrow, __narrow_diag__ # noqa
from .select import select # noqa
from .index_select import index_select, index_select_nnz # noqa
from .masked_select import masked_select, masked_select_nnz # noqa
from .permute import permute # noqa
from .diag import remove_diag, set_diag, fill_diag, get_diag # noqa
from .add import add, add_, add_nnz, add_nnz_ # noqa
from .mul import mul, mul_, mul_nnz, mul_nnz_ # noqa
from .reduce import sum, mean, min, max # noqa
from .matmul import matmul # noqa
from .cat import cat # noqa
from .rw import random_walk # noqa
from .metis import partition # noqa
from .bandwidth import reverse_cuthill_mckee # noqa
from .saint import saint_subgraph # noqa
from .padding import padded_index, padded_index_select # noqa
from .sample import sample, sample_adj # noqa
from .convert import to_torch_sparse, from_torch_sparse # noqa
from .convert import to_scipy, from_scipy # noqa
from .coalesce import coalesce # noqa
from .transpose import transpose # noqa
from .eye import eye # noqa
from .spmm import spmm # noqa
from .spspmm import spspmm # noqa
from .spadd import spadd # noqa
__all__ = [
'SparseStorage',
'SparseTensor',
't',
'narrow',
'__narrow_diag__',
'select',
'index_select',
'index_select_nnz',
'masked_select',
'masked_select_nnz',
'permute',
'remove_diag',
'set_diag',
'fill_diag',
'get_diag',
'add',
'add_',
'add_nnz',
'add_nnz_',
'mul',
'mul_',
'mul_nnz',
'mul_nnz_',
'sum',
'mean',
'min',
'max',
'matmul',
'cat',
'random_walk',
'partition',
'reverse_cuthill_mckee',
'saint_subgraph',
'padded_index',
'padded_index_select',
'to_torch_sparse',
'from_torch_sparse',
'to_scipy',
'from_scipy',
'coalesce',
'transpose',
'eye',
'spmm',
'spspmm',
'spadd',
'__version__',
]
from typing import Optional
import torch
from torch import Tensor
from torch_scatter import gather_csr
from torch_sparse.tensor import SparseTensor
@torch.jit._overload # noqa: F811
def add(src, other): # noqa: F811
# type: (SparseTensor, Tensor) -> SparseTensor
pass
@torch.jit._overload # noqa: F811
def add(src, other): # noqa: F811
# type: (SparseTensor, SparseTensor) -> SparseTensor
pass
def add(src, other): # noqa: F811
if isinstance(other, Tensor):
rowptr, col, value = src.csr()
if other.size(0) == src.size(0) and other.size(1) == 1: # Row-wise.
other = gather_csr(other.squeeze(1), rowptr)
elif other.size(0) == 1 and other.size(1) == src.size(1): # Col-wise.
other = other.squeeze(0)[col]
else:
raise ValueError(
f'Size mismatch: Expected size ({src.size(0)}, 1, ...) or '
f'(1, {src.size(1)}, ...), but got size {other.size()}.')
if value is not None:
value = other.to(value.dtype).add_(value)
else:
value = other.add_(1)
return src.set_value(value, layout='coo')
elif isinstance(other, SparseTensor):
rowA, colA, valueA = src.coo()
rowB, colB, valueB = other.coo()
row = torch.cat([rowA, rowB], dim=0)
col = torch.cat([colA, colB], dim=0)
value: Optional[Tensor] = None
if valueA is not None and valueB is not None:
value = torch.cat([valueA, valueB], dim=0)
M = max(src.size(0), other.size(0))
N = max(src.size(1), other.size(1))
sparse_sizes = (M, N)
out = SparseTensor(row=row, col=col, value=value,
sparse_sizes=sparse_sizes)
out = out.coalesce(reduce='sum')
return out
else:
raise NotImplementedError
def add_(src: SparseTensor, other: torch.Tensor) -> SparseTensor:
rowptr, col, value = src.csr()
if other.size(0) == src.size(0) and other.size(1) == 1: # Row-wise.
other = gather_csr(other.squeeze(1), rowptr)
elif other.size(0) == 1 and other.size(1) == src.size(1): # Col-wise.
other = other.squeeze(0)[col]
else:
raise ValueError(
f'Size mismatch: Expected size ({src.size(0)}, 1, ...) or '
f'(1, {src.size(1)}, ...), but got size {other.size()}.')
if value is not None:
value = value.add_(other.to(value.dtype))
else:
value = other.add_(1)
return src.set_value_(value, layout='coo')
def add_nnz(src: SparseTensor, other: torch.Tensor,
layout: Optional[str] = None) -> SparseTensor:
value = src.storage.value()
if value is not None:
value = value.add(other.to(value.dtype))
else:
value = other.add(1)
return src.set_value(value, layout=layout)
def add_nnz_(src: SparseTensor, other: torch.Tensor,
layout: Optional[str] = None) -> SparseTensor:
value = src.storage.value()
if value is not None:
value = value.add_(other.to(value.dtype))
else:
value = other.add(1)
return src.set_value_(value, layout=layout)
SparseTensor.add = lambda self, other: add(self, other)
SparseTensor.add_ = lambda self, other: add_(self, other)
SparseTensor.add_nnz = lambda self, other, layout=None: add_nnz(
self, other, layout)
SparseTensor.add_nnz_ = lambda self, other, layout=None: add_nnz_(
self, other, layout)
SparseTensor.__add__ = SparseTensor.add
SparseTensor.__radd__ = SparseTensor.add
SparseTensor.__iadd__ = SparseTensor.add_
import scipy.sparse as sp
from typing import Tuple, Optional
import torch
from torch_sparse.tensor import SparseTensor
from torch_sparse.permute import permute
def reverse_cuthill_mckee(src: SparseTensor,
is_symmetric: Optional[bool] = None
) -> Tuple[SparseTensor, torch.Tensor]:
if is_symmetric is None:
is_symmetric = src.is_symmetric()
if not is_symmetric:
src = src.to_symmetric()
sp_src = src.to_scipy(layout='csr')
perm = sp.csgraph.reverse_cuthill_mckee(sp_src, symmetric_mode=True).copy()
perm = torch.from_numpy(perm).to(torch.long).to(src.device())
out = permute(src, perm)
return out, perm
SparseTensor.reverse_cuthill_mckee = reverse_cuthill_mckee
from typing import Optional, List, Tuple
import torch
from torch_sparse.storage import SparseStorage
from torch_sparse.tensor import SparseTensor
@torch.jit._overload # noqa: F811
def cat(tensors, dim): # noqa: F811
# type: (List[SparseTensor], int) -> SparseTensor
pass
@torch.jit._overload # noqa: F811
def cat(tensors, dim): # noqa: F811
# type: (List[SparseTensor], Tuple[int, int]) -> SparseTensor
pass
@torch.jit._overload # noqa: F811
def cat(tensors, dim): # noqa: F811
# type: (List[SparseTensor], List[int]) -> SparseTensor
pass
def cat(tensors, dim): # noqa: F811
assert len(tensors) > 0
if isinstance(dim, int):
dim = tensors[0].dim() + dim if dim < 0 else dim
if dim == 0:
return cat_first(tensors)
elif dim == 1:
return cat_second(tensors)
pass
elif dim > 1 and dim < tensors[0].dim():
values = []
for tensor in tensors:
value = tensor.storage.value()
assert value is not None
values.append(value)
value = torch.cat(values, dim=dim - 1)
return tensors[0].set_value(value, layout='coo')
else:
raise IndexError(
(f'Dimension out of range: Expected to be in range of '
f'[{-tensors[0].dim()}, {tensors[0].dim() - 1}], but got '
f'{dim}.'))
else:
assert isinstance(dim, (tuple, list))
assert len(dim) == 2
assert sorted(dim) == [0, 1]
return cat_diag(tensors)
def cat_first(tensors: List[SparseTensor]) -> SparseTensor:
rows: List[torch.Tensor] = []
rowptrs: List[torch.Tensor] = []
cols: List[torch.Tensor] = []
values: List[torch.Tensor] = []
sparse_sizes: List[int] = [0, 0]
rowcounts: List[torch.Tensor] = []
nnz: int = 0
for tensor in tensors:
row = tensor.storage._row
if row is not None:
rows.append(row + sparse_sizes[0])
rowptr = tensor.storage._rowptr
if rowptr is not None:
rowptrs.append(rowptr[1:] + nnz if len(rowptrs) > 0 else rowptr)
cols.append(tensor.storage._col)
value = tensor.storage._value
if value is not None:
values.append(value)
rowcount = tensor.storage._rowcount
if rowcount is not None:
rowcounts.append(rowcount)
sparse_sizes[0] += tensor.sparse_size(0)
sparse_sizes[1] = max(sparse_sizes[1], tensor.sparse_size(1))
nnz += tensor.nnz()
row: Optional[torch.Tensor] = None
if len(rows) == len(tensors):
row = torch.cat(rows, dim=0)
rowptr: Optional[torch.Tensor] = None
if len(rowptrs) == len(tensors):
rowptr = torch.cat(rowptrs, dim=0)
col = torch.cat(cols, dim=0)
value: Optional[torch.Tensor] = None
if len(values) == len(tensors):
value = torch.cat(values, dim=0)
rowcount: Optional[torch.Tensor] = None
if len(rowcounts) == len(tensors):
rowcount = torch.cat(rowcounts, dim=0)
storage = SparseStorage(row=row, rowptr=rowptr, col=col, value=value,
sparse_sizes=(sparse_sizes[0], sparse_sizes[1]),
rowcount=rowcount, colptr=None, colcount=None,
csr2csc=None, csc2csr=None, is_sorted=True)
return tensors[0].from_storage(storage)
def cat_second(tensors: List[SparseTensor]) -> SparseTensor:
rows: List[torch.Tensor] = []
cols: List[torch.Tensor] = []
values: List[torch.Tensor] = []
sparse_sizes: List[int] = [0, 0]
colptrs: List[torch.Tensor] = []
colcounts: List[torch.Tensor] = []
nnz: int = 0
for tensor in tensors:
row, col, value = tensor.coo()
rows.append(row)
cols.append(tensor.storage._col + sparse_sizes[1])
if value is not None:
values.append(value)
colptr = tensor.storage._colptr
if colptr is not None:
colptrs.append(colptr[1:] + nnz if len(colptrs) > 0 else colptr)
colcount = tensor.storage._colcount
if colcount is not None:
colcounts.append(colcount)
sparse_sizes[0] = max(sparse_sizes[0], tensor.sparse_size(0))
sparse_sizes[1] += tensor.sparse_size(1)
nnz += tensor.nnz()
row = torch.cat(rows, dim=0)
col = torch.cat(cols, dim=0)
value: Optional[torch.Tensor] = None
if len(values) == len(tensors):
value = torch.cat(values, dim=0)
colptr: Optional[torch.Tensor] = None
if len(colptrs) == len(tensors):
colptr = torch.cat(colptrs, dim=0)
colcount: Optional[torch.Tensor] = None
if len(colcounts) == len(tensors):
colcount = torch.cat(colcounts, dim=0)
storage = SparseStorage(row=row, rowptr=None, col=col, value=value,
sparse_sizes=(sparse_sizes[0], sparse_sizes[1]),
rowcount=None, colptr=colptr, colcount=colcount,
csr2csc=None, csc2csr=None, is_sorted=False)
return tensors[0].from_storage(storage)
def cat_diag(tensors: List[SparseTensor]) -> SparseTensor:
assert len(tensors) > 0
rows: List[torch.Tensor] = []
rowptrs: List[torch.Tensor] = []
cols: List[torch.Tensor] = []
values: List[torch.Tensor] = []
sparse_sizes: List[int] = [0, 0]
rowcounts: List[torch.Tensor] = []
colptrs: List[torch.Tensor] = []
colcounts: List[torch.Tensor] = []
csr2cscs: List[torch.Tensor] = []
csc2csrs: List[torch.Tensor] = []
nnz: int = 0
for tensor in tensors:
row = tensor.storage._row
if row is not None:
rows.append(row + sparse_sizes[0])
rowptr = tensor.storage._rowptr
if rowptr is not None:
rowptrs.append(rowptr[1:] + nnz if len(rowptrs) > 0 else rowptr)
cols.append(tensor.storage._col + sparse_sizes[1])
value = tensor.storage._value
if value is not None:
values.append(value)
rowcount = tensor.storage._rowcount
if rowcount is not None:
rowcounts.append(rowcount)
colptr = tensor.storage._colptr
if colptr is not None:
colptrs.append(colptr[1:] + nnz if len(colptrs) > 0 else colptr)
colcount = tensor.storage._colcount
if colcount is not None:
colcounts.append(colcount)
csr2csc = tensor.storage._csr2csc
if csr2csc is not None:
csr2cscs.append(csr2csc + nnz)
csc2csr = tensor.storage._csc2csr
if csc2csr is not None:
csc2csrs.append(csc2csr + nnz)
sparse_sizes[0] += tensor.sparse_size(0)
sparse_sizes[1] += tensor.sparse_size(1)
nnz += tensor.nnz()
row: Optional[torch.Tensor] = None
if len(rows) == len(tensors):
row = torch.cat(rows, dim=0)
rowptr: Optional[torch.Tensor] = None
if len(rowptrs) == len(tensors):
rowptr = torch.cat(rowptrs, dim=0)
col = torch.cat(cols, dim=0)
value: Optional[torch.Tensor] = None
if len(values) == len(tensors):
value = torch.cat(values, dim=0)
rowcount: Optional[torch.Tensor] = None
if len(rowcounts) == len(tensors):
rowcount = torch.cat(rowcounts, dim=0)
colptr: Optional[torch.Tensor] = None
if len(colptrs) == len(tensors):
colptr = torch.cat(colptrs, dim=0)
colcount: Optional[torch.Tensor] = None
if len(colcounts) == len(tensors):
colcount = torch.cat(colcounts, dim=0)
csr2csc: Optional[torch.Tensor] = None
if len(csr2cscs) == len(tensors):
csr2csc = torch.cat(csr2cscs, dim=0)
csc2csr: Optional[torch.Tensor] = None
if len(csc2csrs) == len(tensors):
csc2csr = torch.cat(csc2csrs, dim=0)
storage = SparseStorage(row=row, rowptr=rowptr, col=col, value=value,
sparse_sizes=(sparse_sizes[0], sparse_sizes[1]),
rowcount=rowcount, colptr=colptr,
colcount=colcount, csr2csc=csr2csc,
csc2csr=csc2csr, is_sorted=True)
return tensors[0].from_storage(storage)
import torch
from torch_sparse.storage import SparseStorage
def coalesce(index, value, m, n, op="add"):
"""Row-wise sorts :obj:`value` and removes duplicate entries. Duplicate
entries are removed by scattering them together. For scattering, any
operation of `"torch_scatter"<https://github.com/rusty1s/pytorch_scatter>`_
can be used.
Args:
index (:class:`LongTensor`): The index tensor of sparse matrix.
value (:class:`Tensor`): The value tensor of sparse matrix.
m (int): The first dimension of sparse matrix.
n (int): The second dimension of sparse matrix.
op (string, optional): The scatter operation to use. (default:
:obj:`"add"`)
:rtype: (:class:`LongTensor`, :class:`Tensor`)
"""
storage = SparseStorage(row=index[0], col=index[1], value=value,
sparse_sizes=(m, n), is_sorted=False)
storage = storage.coalesce(reduce=op)
return torch.stack([storage.row(), storage.col()], dim=0), storage.value()
import numpy as np
import scipy.sparse
import torch
from torch import from_numpy
def to_torch_sparse(index, value, m, n):
return torch.sparse_coo_tensor(index.detach(), value, (m, n))
def from_torch_sparse(A):
return A.indices().detach(), A.values()
def to_scipy(index, value, m, n):
assert not index.is_cuda and not value.is_cuda
(row, col), data = index.detach(), value.detach()
return scipy.sparse.coo_matrix((data, (row, col)), (m, n))
def from_scipy(A):
A = A.tocoo()
row, col, value = A.row.astype(np.int64), A.col.astype(np.int64), A.data
row, col, value = from_numpy(row), from_numpy(col), from_numpy(value)
index = torch.stack([row, col], dim=0)
return index, value
from typing import Optional
import torch
from torch import Tensor
from torch_sparse.storage import SparseStorage
from torch_sparse.tensor import SparseTensor
def remove_diag(src: SparseTensor, k: int = 0) -> SparseTensor:
row, col, value = src.coo()
inv_mask = row != col if k == 0 else row != (col - k)
new_row, new_col = row[inv_mask], col[inv_mask]
if value is not None:
value = value[inv_mask]
rowcount = src.storage._rowcount
colcount = src.storage._colcount
if rowcount is not None or colcount is not None:
mask = ~inv_mask
if rowcount is not None:
rowcount = rowcount.clone()
rowcount[row[mask]] -= 1
if colcount is not None:
colcount = colcount.clone()
colcount[col[mask]] -= 1
storage = SparseStorage(row=new_row, rowptr=None, col=new_col, value=value,
sparse_sizes=src.sparse_sizes(), rowcount=rowcount,
colptr=None, colcount=colcount, csr2csc=None,
csc2csr=None, is_sorted=True)
return src.from_storage(storage)
def set_diag(src: SparseTensor, values: Optional[Tensor] = None,
k: int = 0) -> SparseTensor:
src = remove_diag(src, k=k)
row, col, value = src.coo()
mask = torch.ops.torch_sparse.non_diag_mask(row, col, src.size(0),
src.size(1), k)
inv_mask = ~mask
start, num_diag = -k if k < 0 else 0, mask.numel() - row.numel()
diag = torch.arange(start, start + num_diag, device=row.device)
new_row = row.new_empty(mask.size(0))
new_row[mask] = row
new_row[inv_mask] = diag
new_col = col.new_empty(mask.size(0))
new_col[mask] = col
new_col[inv_mask] = diag.add_(k)
new_value: Optional[Tensor] = None
if value is not None:
new_value = value.new_empty((mask.size(0), ) + value.size()[1:])
new_value[mask] = value
if values is not None:
new_value[inv_mask] = values
else:
new_value[inv_mask] = torch.ones((num_diag, ), dtype=value.dtype,
device=value.device)
rowcount = src.storage._rowcount
if rowcount is not None:
rowcount = rowcount.clone()
rowcount[start:start + num_diag] += 1
colcount = src.storage._colcount
if colcount is not None:
colcount = colcount.clone()
colcount[start + k:start + num_diag + k] += 1
storage = SparseStorage(row=new_row, rowptr=None, col=new_col,
value=new_value, sparse_sizes=src.sparse_sizes(),
rowcount=rowcount, colptr=None, colcount=colcount,
csr2csc=None, csc2csr=None, is_sorted=True)
return src.from_storage(storage)
def fill_diag(src: SparseTensor, fill_value: float,
k: int = 0) -> SparseTensor:
num_diag = min(src.sparse_size(0), src.sparse_size(1) - k)
if k < 0:
num_diag = min(src.sparse_size(0) + k, src.sparse_size(1))
value = src.storage.value()
if value is not None:
sizes = [num_diag] + src.sizes()[2:]
return set_diag(src, value.new_full(sizes, fill_value), k)
else:
return set_diag(src, None, k)
def get_diag(src: SparseTensor) -> Tensor:
row, col, value = src.coo()
if value is None:
value = torch.ones(row.size(0))
sizes = list(value.size())
sizes[0] = min(src.size(0), src.size(1))
out = value.new_zeros(sizes)
mask = row == col
out[row[mask]] = value[mask]
return out
SparseTensor.remove_diag = lambda self, k=0: remove_diag(self, k)
SparseTensor.set_diag = lambda self, values=None, k=0: set_diag(
self, values, k)
SparseTensor.fill_diag = lambda self, fill_value, k=0: fill_diag(
self, fill_value, k)
SparseTensor.get_diag = lambda self: get_diag(self)
import torch
def eye(m, dtype=None, device=None):
"""Returns a sparse matrix with ones on the diagonal and zeros elsewhere.
Args:
m (int): The first dimension of sparse matrix.
dtype (`torch.dtype`, optional): The desired data type of returned
value vector. (default is set by `torch.set_default_tensor_type()`)
device (`torch.device`, optional): The desired device of returned
tensors. (default is set by `torch.set_default_tensor_type()`)
:rtype: (:class:`LongTensor`, :class:`Tensor`)
"""
row = torch.arange(m, dtype=torch.long, device=device)
index = torch.stack([row, row], dim=0)
value = torch.ones(m, dtype=dtype, device=device)
return index, value
from typing import Optional
import torch
from torch_scatter import gather_csr
from torch_sparse.storage import SparseStorage, get_layout
from torch_sparse.tensor import SparseTensor
def index_select(src: SparseTensor, dim: int,
idx: torch.Tensor) -> SparseTensor:
dim = src.dim() + dim if dim < 0 else dim
assert idx.dim() == 1
if dim == 0:
old_rowptr, col, value = src.csr()
rowcount = src.storage.rowcount()
rowcount = rowcount[idx]
rowptr = col.new_zeros(idx.size(0) + 1)
torch.cumsum(rowcount, dim=0, out=rowptr[1:])
row = torch.arange(idx.size(0),
device=col.device).repeat_interleave(rowcount)
perm = torch.arange(row.size(0), device=row.device)
perm += gather_csr(old_rowptr[idx] - rowptr[:-1], rowptr)
col = col[perm]
if value is not None:
value = value[perm]
sparse_sizes = (idx.size(0), src.sparse_size(1))
storage = SparseStorage(row=row, rowptr=rowptr, col=col, value=value,
sparse_sizes=sparse_sizes, rowcount=rowcount,
colptr=None, colcount=None, csr2csc=None,
csc2csr=None, is_sorted=True)
return src.from_storage(storage)
elif dim == 1:
old_colptr, row, value = src.csc()
colcount = src.storage.colcount()
colcount = colcount[idx]
colptr = row.new_zeros(idx.size(0) + 1)
torch.cumsum(colcount, dim=0, out=colptr[1:])
col = torch.arange(idx.size(0),
device=row.device).repeat_interleave(colcount)
perm = torch.arange(col.size(0), device=col.device)
perm += gather_csr(old_colptr[idx] - colptr[:-1], colptr)
row = row[perm]
csc2csr = (idx.size(0) * row + col).argsort()
row, col = row[csc2csr], col[csc2csr]
if value is not None:
value = value[perm][csc2csr]
sparse_sizes = (src.sparse_size(0), idx.size(0))
storage = SparseStorage(row=row, rowptr=None, col=col, value=value,
sparse_sizes=sparse_sizes, rowcount=None,
colptr=colptr, colcount=colcount, csr2csc=None,
csc2csr=csc2csr, is_sorted=True)
return src.from_storage(storage)
else:
value = src.storage.value()
if value is not None:
return src.set_value(value.index_select(dim - 1, idx),
layout='coo')
else:
raise ValueError
def index_select_nnz(src: SparseTensor, idx: torch.Tensor,
layout: Optional[str] = None) -> SparseTensor:
assert idx.dim() == 1
if get_layout(layout) == 'csc':
idx = src.storage.csc2csr()[idx]
row, col, value = src.coo()
row, col = row[idx], col[idx]
if value is not None:
value = value[idx]
return SparseTensor(row=row, rowptr=None, col=col, value=value,
sparse_sizes=src.sparse_sizes(), is_sorted=True)
SparseTensor.index_select = lambda self, dim, idx: index_select(self, dim, idx)
tmp = lambda self, idx, layout=None: index_select_nnz( # noqa
self, idx, layout)
SparseTensor.index_select_nnz = tmp
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment