Commit 4d4e064b authored by yangzhong's avatar yangzhong
Browse files

push 1.6.0 version

parent 6907f8b7
......@@ -14,17 +14,14 @@ from torch.utils.cpp_extension import (CUDA_HOME, BuildExtension, CppExtension,
__version__ = '1.6.0'
URL = 'https://github.com/rusty1s/pytorch_cluster'
WITH_HIP = torch.cuda.is_available() and CUDA_HOME is not None
suffices = ['cpu', 'cuda'] if WITH_HIP else ['cpu']
WITH_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
suffices = ['cpu', 'cuda'] if WITH_CUDA else ['cpu']
if os.getenv('FORCE_CUDA', '0') == '1':
suffices = ['cuda', 'cpu']
if os.getenv('FORCE_ONLY_HIP', '0') == '1':
suffices = ['hip']
if os.getenv('FORCE_ONLY_CUDA', '0') == '1':
suffices = ['cuda']
if os.getenv('FORCE_ONLY_CPU', '0') == '1':
suffices = ['cpu']
ROCM_PATH = os.getenv('ROCM_PATH')
HIPLIB2 = osp.join(ROCM_PATH, 'hiprand', 'include')
HIPLIB1 = osp.join(ROCM_PATH, 'hipsparse', 'include')
BUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1'
......@@ -41,7 +38,6 @@ def get_extensions():
if not os.name == 'nt': # Not on Windows:
extra_compile_args['cxx'] += ['-Wno-sign-compare']
extra_link_args = ['-s']
extra_link_args += ['-fopenmp','-lomp']
info = parallel_info()
if ('backend: OpenMP' in info and 'OpenMP not found' not in info
......@@ -59,12 +55,12 @@ def get_extensions():
extra_compile_args['cxx'] += ['-arch', 'arm64']
extra_link_args += ['-arch', 'arm64']
if suffix == 'hip':
define_macros += [('WITH_HIP', None)]
hipcc_flags = os.getenv('HIPCC_FLAGS', '')
hipcc_flags = [] if hipcc_flags == '' else hipcc_flags.split(' ')
hipcc_flags += ['--expt-relaxed-constexpr', '-O2']
extra_compile_args['hipcc'] = hipcc_flags
if suffix == 'cuda':
define_macros += [('WITH_CUDA', None)]
nvcc_flags = os.getenv('NVCC_FLAGS', '')
nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ')
nvcc_flags += ['--expt-relaxed-constexpr', '-O2']
extra_compile_args['nvcc'] = nvcc_flags
name = main.split(os.sep)[-1][:-4]
sources = [main]
......@@ -73,16 +69,15 @@ def get_extensions():
if osp.exists(path):
sources += [path]
path = osp.join(extensions_dir, 'hip', f'{name}_hip.hip')
if suffix == 'hip' and osp.exists(path):
path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu')
if suffix == 'cuda' and osp.exists(path):
sources += [path]
Extension = CppExtension if suffix == 'cpu' else CUDAExtension
define_macros += [('TORCH_HIP_VERSION', 10000), ('__HIP__', None), ('__HCC__', None)]
extension = Extension(
f'torch_cluster._{name}_{suffix}',
sources,
include_dirs=[extensions_dir, HIPLIB1, HIPLIB2],
include_dirs=[extensions_dir],
define_macros=define_macros,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
......@@ -126,5 +121,5 @@ setup(
BuildExtension.with_options(no_python_abi_suffix=True, use_ninja=False)
},
packages=find_packages(),
include_package_data=False,
include_package_data=True,
)
from itertools import product
import pytest
import torch
from torch import Tensor
from torch_cluster import fps
from .utils import grad_dtypes, devices, tensor
@torch.jit.script
def fps2(x: Tensor, ratio: Tensor) -> Tensor:
return fps(x, None, ratio, False)
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_fps(dtype, device):
x = tensor([
[-1, -1],
[-1, +1],
[+1, +1],
[+1, -1],
[-2, -2],
[-2, +2],
[+2, +2],
[+2, -2],
], dtype, device)
batch = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device)
out = fps(x, batch, random_start=False)
assert out.tolist() == [0, 2, 4, 6]
out = fps(x, batch, ratio=0.5, random_start=False)
assert out.tolist() == [0, 2, 4, 6]
out = fps(x, batch, ratio=torch.tensor(0.5, device=device),
random_start=False)
assert out.tolist() == [0, 2, 4, 6]
out = fps(x, batch, ratio=torch.tensor([0.5, 0.5], device=device),
random_start=False)
assert out.tolist() == [0, 2, 4, 6]
out = fps(x, random_start=False)
assert out.sort()[0].tolist() == [0, 5, 6, 7]
out = fps(x, ratio=0.5, random_start=False)
assert out.sort()[0].tolist() == [0, 5, 6, 7]
out = fps(x, ratio=torch.tensor(0.5, device=device), random_start=False)
assert out.sort()[0].tolist() == [0, 5, 6, 7]
out = fps(x, ratio=torch.tensor([0.5], device=device), random_start=False)
assert out.sort()[0].tolist() == [0, 5, 6, 7]
out = fps2(x, torch.tensor([0.5], device=device))
assert out.sort()[0].tolist() == [0, 5, 6, 7]
@pytest.mark.parametrize('device', devices)
def test_random_fps(device):
N = 1024
for _ in range(5):
pos = torch.randn((2 * N, 3), device=device)
batch_1 = torch.zeros(N, dtype=torch.long, device=device)
batch_2 = torch.ones(N, dtype=torch.long, device=device)
batch = torch.cat([batch_1, batch_2])
idx = fps(pos, batch, ratio=0.5)
assert idx.min() >= 0 and idx.max() < 2 * N
from itertools import product
import pytest
import torch
from torch_cluster import graclus_cluster
from .utils import dtypes, devices, tensor
tests = [{
'row': [0, 0, 1, 1, 1, 2, 2, 2, 3, 3],
'col': [1, 2, 0, 2, 3, 0, 1, 3, 1, 2],
}, {
'row': [0, 0, 1, 1, 1, 2, 2, 2, 3, 3],
'col': [1, 2, 0, 2, 3, 0, 1, 3, 1, 2],
'weight': [1, 2, 1, 3, 2, 2, 3, 1, 2, 1],
}]
def assert_correct(row, col, cluster):
row, col, cluster = row.to('cpu'), col.to('cpu'), cluster.to('cpu')
n = cluster.size(0)
# Every node was assigned a cluster.
assert cluster.min() >= 0
# There are no more than two nodes in each cluster.
_, index = torch.unique(cluster, return_inverse=True)
count = torch.zeros_like(cluster)
count.scatter_add_(0, index, torch.ones_like(cluster))
assert (count > 2).max() == 0
# Cluster value is minimal.
assert (cluster <= torch.arange(n, dtype=cluster.dtype)).sum() == n
# Corresponding clusters must be adjacent.
for i in range(n):
x = cluster[col[row == i]] == cluster[i] # Neighbors with same cluster
y = cluster == cluster[i] # Nodes with same cluster.
y[i] = 0 # Do not look at cluster of `i`.
assert x.sum() == y.sum()
@pytest.mark.parametrize('test,dtype,device', product(tests, dtypes, devices))
def test_graclus_cluster(test, dtype, device):
row = tensor(test['row'], torch.long, device)
col = tensor(test['col'], torch.long, device)
weight = tensor(test.get('weight'), dtype, device)
cluster = graclus_cluster(row, col, weight)
assert_correct(row, col, cluster)
from itertools import product
import pytest
from torch_cluster import grid_cluster
from .utils import dtypes, devices, tensor
tests = [{
'pos': [2, 6],
'size': [5],
'cluster': [0, 0],
}, {
'pos': [2, 6],
'size': [5],
'start': [0],
'cluster': [0, 1],
}, {
'pos': [[0, 0], [11, 9], [2, 8], [2, 2], [8, 3]],
'size': [5, 5],
'cluster': [0, 5, 3, 0, 1],
}, {
'pos': [[0, 0], [11, 9], [2, 8], [2, 2], [8, 3]],
'size': [5, 5],
'end': [19, 19],
'cluster': [0, 6, 4, 0, 1],
}]
@pytest.mark.parametrize('test,dtype,device', product(tests, dtypes, devices))
def test_grid_cluster(test, dtype, device):
pos = tensor(test['pos'], dtype, device)
size = tensor(test['size'], dtype, device)
start = tensor(test.get('start'), dtype, device)
end = tensor(test.get('end'), dtype, device)
cluster = grid_cluster(pos, size, start, end)
assert cluster.tolist() == test['cluster']
from itertools import product
import pytest
import torch
import scipy.spatial
from torch_cluster import knn, knn_graph
from .utils import grad_dtypes, devices, tensor
def to_set(edge_index):
return set([(i, j) for i, j in edge_index.t().tolist()])
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_knn(dtype, device):
x = tensor([
[-1, -1],
[-1, +1],
[+1, +1],
[+1, -1],
[-1, -1],
[-1, +1],
[+1, +1],
[+1, -1],
], dtype, device)
y = tensor([
[1, 0],
[-1, 0],
], dtype, device)
batch_x = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device)
batch_y = tensor([0, 1], torch.long, device)
edge_index = knn(x, y, 2)
assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 0), (1, 1)])
edge_index = knn(x, y, 2, batch_x, batch_y)
assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)])
if x.is_cuda:
edge_index = knn(x, y, 2, batch_x, batch_y, cosine=True)
assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)])
# Skipping a batch
batch_x = tensor([0, 0, 0, 0, 2, 2, 2, 2], torch.long, device)
batch_y = tensor([0, 2], torch.long, device)
edge_index = knn(x, y, 2, batch_x, batch_y)
assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)])
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_knn_graph(dtype, device):
x = tensor([
[-1, -1],
[-1, +1],
[+1, +1],
[+1, -1],
], dtype, device)
edge_index = knn_graph(x, k=2, flow='target_to_source')
assert to_set(edge_index) == set([(0, 1), (0, 3), (1, 0), (1, 2), (2, 1),
(2, 3), (3, 0), (3, 2)])
edge_index = knn_graph(x, k=2, flow='source_to_target')
assert to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2),
(3, 2), (0, 3), (2, 3)])
@pytest.mark.parametrize('dtype,device', product([torch.float], devices))
def test_knn_graph_large(dtype, device):
x = torch.randn(1000, 3, dtype=dtype, device=device)
edge_index = knn_graph(x, k=5, flow='target_to_source', loop=True)
tree = scipy.spatial.cKDTree(x.cpu().numpy())
_, col = tree.query(x.cpu(), k=5)
truth = set([(i, j) for i, ns in enumerate(col) for j in ns])
assert to_set(edge_index.cpu()) == truth
from itertools import product
import pytest
import torch
from torch_cluster import nearest
from .utils import grad_dtypes, devices, tensor
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_nearest(dtype, device):
x = tensor([
[-1, -1],
[-1, +1],
[+1, +1],
[+1, -1],
[-2, -2],
[-2, +2],
[+2, +2],
[+2, -2],
], dtype, device)
y = tensor([
[-1, 0],
[+1, 0],
[-2, 0],
[+2, 0],
], dtype, device)
batch_x = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device)
batch_y = tensor([0, 0, 1, 1], torch.long, device)
out = nearest(x, y, batch_x, batch_y)
assert out.tolist() == [0, 0, 1, 1, 2, 2, 3, 3]
out = nearest(x, y)
assert out.tolist() == [0, 0, 1, 1, 2, 2, 3, 3]
from itertools import product
import pytest
import torch
import scipy.spatial
from torch_cluster import radius, radius_graph
from .utils import grad_dtypes, devices, tensor
def to_set(edge_index):
return set([(i, j) for i, j in edge_index.t().tolist()])
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_radius(dtype, device):
x = tensor([
[-1, -1],
[-1, +1],
[+1, +1],
[+1, -1],
[-1, -1],
[-1, +1],
[+1, +1],
[+1, -1],
], dtype, device)
y = tensor([
[0, 0],
[0, 1],
], dtype, device)
batch_x = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device)
batch_y = tensor([0, 1], torch.long, device)
edge_index = radius(x, y, 2, max_num_neighbors=4)
assert to_set(edge_index) == set([(0, 0), (0, 1), (0, 2), (0, 3), (1, 1),
(1, 2), (1, 5), (1, 6)])
edge_index = radius(x, y, 2, batch_x, batch_y, max_num_neighbors=4)
assert to_set(edge_index) == set([(0, 0), (0, 1), (0, 2), (0, 3), (1, 5),
(1, 6)])
# Skipping a batch
batch_x = tensor([0, 0, 0, 0, 2, 2, 2, 2], torch.long, device)
batch_y = tensor([0, 2], torch.long, device)
edge_index = radius(x, y, 2, batch_x, batch_y, max_num_neighbors=4)
assert to_set(edge_index) == set([(0, 0), (0, 1), (0, 2), (0, 3), (1, 5),
(1, 6)])
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_radius_graph(dtype, device):
x = tensor([
[-1, -1],
[-1, +1],
[+1, +1],
[+1, -1],
], dtype, device)
edge_index = radius_graph(x, r=2.5, flow='target_to_source')
assert to_set(edge_index) == set([(0, 1), (0, 3), (1, 0), (1, 2), (2, 1),
(2, 3), (3, 0), (3, 2)])
edge_index = radius_graph(x, r=2.5, flow='source_to_target')
assert to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2),
(3, 2), (0, 3), (2, 3)])
@pytest.mark.parametrize('dtype,device', product([torch.float], devices))
def test_radius_graph_large(dtype, device):
x = torch.randn(1000, 3, dtype=dtype, device=device)
edge_index = radius_graph(x, r=0.5, flow='target_to_source', loop=True,
max_num_neighbors=2000)
tree = scipy.spatial.cKDTree(x.cpu().numpy())
col = tree.query_ball_point(x.cpu(), r=0.5)
truth = set([(i, j) for i, ns in enumerate(col) for j in ns])
assert to_set(edge_index.cpu()) == truth
import pytest
import torch
from torch_cluster import random_walk
from .utils import devices, tensor
@pytest.mark.parametrize('device', devices)
def test_rw(device):
row = tensor([0, 1, 1, 1, 2, 2, 3, 3, 4, 4], torch.long, device)
col = tensor([1, 0, 2, 3, 1, 4, 1, 4, 2, 3], torch.long, device)
start = tensor([0, 1, 2, 3, 4], torch.long, device)
walk_length = 10
out = random_walk(row, col, start, walk_length)
assert out[:, 0].tolist() == start.tolist()
for n in range(start.size(0)):
cur = start[n].item()
for i in range(1, walk_length):
assert out[n, i].item() in col[row == cur].tolist()
cur = out[n, i].item()
row = tensor([0, 1], torch.long, device)
col = tensor([1, 0], torch.long, device)
start = tensor([0, 1, 2], torch.long, device)
walk_length = 4
out = random_walk(row, col, start, walk_length, num_nodes=3)
assert out.tolist() == [[0, 1, 0, 1, 0], [1, 0, 1, 0, 1], [2, 2, 2, 2, 2]]
import torch
from torch_cluster import neighbor_sampler
def test_neighbor_sampler():
torch.manual_seed(1234)
start = torch.tensor([0, 1])
cumdeg = torch.tensor([0, 3, 7])
e_id = neighbor_sampler(start, cumdeg, size=1.0)
assert e_id.tolist() == [0, 2, 1, 5, 6, 3, 4]
e_id = neighbor_sampler(start, cumdeg, size=3)
assert e_id.tolist() == [1, 0, 2, 4, 5, 6]
import torch
dtypes = [torch.half, torch.float, torch.double, torch.int, torch.long]
grad_dtypes = [torch.half, torch.float, torch.double]
devices = [torch.device('cpu')]
if torch.cuda.is_available():
devices += [torch.device(f'cuda:{torch.cuda.current_device()}')]
def tensor(x, dtype, device):
return None if x is None else torch.tensor(x, dtype=dtype, device=device)
Metadata-Version: 2.1
Name: torch-cluster
Version: 1.6.0
Summary: PyTorch Extension Library of Optimized Graph Cluster Algorithms
Home-page: https://github.com/rusty1s/pytorch_cluster
Download-URL: https://github.com/rusty1s/pytorch_cluster/archive/1.6.0.tar.gz
Author: Matthias Fey
Author-email: matthias.fey@tu-dortmund.de
Keywords: pytorch,geometric-deep-learning,graph-neural-networks,cluster-algorithms
Classifier: Development Status :: 5 - Production/Stable
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3 :: Only
Requires-Python: >=3.7
Description-Content-Type: text/markdown
Provides-Extra: test
License-File: LICENSE
[pypi-image]: https://badge.fury.io/py/torch-cluster.svg
[pypi-url]: https://pypi.python.org/pypi/torch-cluster
[testing-image]: https://github.com/rusty1s/pytorch_cluster/actions/workflows/testing.yml/badge.svg
[testing-url]: https://github.com/rusty1s/pytorch_cluster/actions/workflows/testing.yml
[linting-image]: https://github.com/rusty1s/pytorch_cluster/actions/workflows/linting.yml/badge.svg
[linting-url]: https://github.com/rusty1s/pytorch_cluster/actions/workflows/linting.yml
[coverage-image]: https://codecov.io/gh/rusty1s/pytorch_cluster/branch/master/graph/badge.svg
[coverage-url]: https://codecov.io/github/rusty1s/pytorch_cluster?branch=master
# PyTorch Cluster
[![PyPI Version][pypi-image]][pypi-url]
[![Testing Status][testing-image]][testing-url]
[![Linting Status][linting-image]][linting-url]
[![Code Coverage][coverage-image]][coverage-url]
--------------------------------------------------------------------------------
This package consists of a small extension library of highly optimized graph cluster algorithms for the use in [PyTorch](http://pytorch.org/).
The package consists of the following clustering algorithms:
* **[Graclus](#graclus)** from Dhillon *et al.*: [Weighted Graph Cuts without Eigenvectors: A Multilevel Approach](http://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf) (PAMI 2007)
* **[Voxel Grid Pooling](#voxelgrid)** from, *e.g.*, Simonovsky and Komodakis: [Dynamic Edge-Conditioned Filters in Convolutional Neural Networks on Graphs](https://arxiv.org/abs/1704.02901) (CVPR 2017)
* **[Iterative Farthest Point Sampling](#farthestpointsampling)** from, *e.g.* Qi *et al.*: [PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space](https://arxiv.org/abs/1706.02413) (NIPS 2017)
* **[k-NN](#knn-graph)** and **[Radius](#radius-graph)** graph generation
* Clustering based on **[Nearest](#nearest)** points
* **[Random Walk Sampling](#randomwalk-sampling)** from, *e.g.*, Grover and Leskovec: [node2vec: Scalable Feature Learning for Networks](https://arxiv.org/abs/1607.00653) (KDD 2016)
All included operations work on varying data types and are implemented both for CPU and GPU.
## Installation
### Anaconda
**Update:** You can now install `pytorch-cluster` via [Anaconda](https://anaconda.org/pyg/pytorch-cluster) for all major OS/PyTorch/CUDA combinations 🤗
Given that you have [`pytorch >= 1.8.0` installed](https://pytorch.org/get-started/locally/), simply run
```
conda install pytorch-cluster -c pyg
```
### Binaries
We alternatively provide pip wheels for all major OS/PyTorch/CUDA combinations, see [here](https://data.pyg.org/whl).
#### PyTorch 1.11
To install the binaries for PyTorch 1.11.0, simply run
```
pip install torch-cluster -f https://data.pyg.org/whl/torch-1.11.0+${CUDA}.html
```
where `${CUDA}` should be replaced by either `cpu`, `cu102`, `cu113`, or `cu115` depending on your PyTorch installation.
| | `cpu` | `cu102` | `cu113` | `cu115` |
|-------------|-------|---------|---------|---------|
| **Linux** | ✅ | ✅ | ✅ | ✅ |
| **Windows** | ✅ | | ✅ | ✅ |
| **macOS** | ✅ | | | |
#### PyTorch 1.10
To install the binaries for PyTorch 1.10.0, PyTorch 1.10.1 and PyTorch 1.10.2, simply run
```
pip install torch-cluster -f https://data.pyg.org/whl/torch-1.10.0+${CUDA}.html
```
where `${CUDA}` should be replaced by either `cpu`, `cu102`, `cu111`, or `cu113` depending on your PyTorch installation.
| | `cpu` | `cu102` | `cu111` | `cu113` |
|-------------|-------|---------|---------|---------|
| **Linux** | ✅ | ✅ | ✅ | ✅ |
| **Windows** | ✅ | ✅ | ✅ | ✅ |
| **macOS** | ✅ | | | |
**Note:** Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1 and PyTorch 1.9.0 (following the same procedure).
For older versions, you might need to explicitly specify the latest supported version number in order to prevent a manual installation from source.
You can look up the latest supported version number [here](https://data.pyg.org/whl).
### From source
Ensure that at least PyTorch 1.4.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*:
```
$ python -c "import torch; print(torch.__version__)"
>>> 1.4.0
$ python -c "import torch; print(torch.__version__)"
>>> 1.1.0
$ echo $PATH
>>> /usr/local/cuda/bin:...
$ echo $CPATH
>>> /usr/local/cuda/include:...
```
Then run:
```
pip install torch-cluster
```
When running in a docker container without NVIDIA driver, PyTorch needs to evaluate the compute capabilities and may fail.
In this case, ensure that the compute capabilities are set via `TORCH_CUDA_ARCH_LIST`, *e.g.*:
```
export TORCH_CUDA_ARCH_LIST = "6.0 6.1 7.2+PTX 7.5+PTX"
```
## Functions
### Graclus
A greedy clustering algorithm of picking an unmarked vertex and matching it with one its unmarked neighbors (that maximizes its edge weight).
The GPU algorithm is adapted from Fagginger Auer and Bisseling: [A GPU Algorithm for Greedy Graph Matching](http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf) (LNCS 2012)
```python
import torch
from torch_cluster import graclus_cluster
row = torch.tensor([0, 1, 1, 2])
col = torch.tensor([1, 0, 2, 1])
weight = torch.tensor([1., 1., 1., 1.]) # Optional edge weights.
cluster = graclus_cluster(row, col, weight)
```
```
print(cluster)
tensor([0, 0, 1])
```
### VoxelGrid
A clustering algorithm, which overlays a regular grid of user-defined size over a point cloud and clusters all points within a voxel.
```python
import torch
from torch_cluster import grid_cluster
pos = torch.tensor([[0., 0.], [11., 9.], [2., 8.], [2., 2.], [8., 3.]])
size = torch.Tensor([5, 5])
cluster = grid_cluster(pos, size)
```
```
print(cluster)
tensor([0, 5, 3, 0, 1])
```
### FarthestPointSampling
A sampling algorithm, which iteratively samples the most distant point with regard to the rest points.
```python
import torch
from torch_cluster import fps
x = torch.tensor([[-1., -1.], [-1., 1.], [1., -1.], [1., 1.]])
batch = torch.tensor([0, 0, 0, 0])
index = fps(x, batch, ratio=0.5, random_start=False)
```
```
print(index)
tensor([0, 3])
```
### kNN-Graph
Computes graph edges to the nearest *k* points.
**Args:**
* **x** *(Tensor)*: Node feature matrix of shape `[N, F]`.
* **k** *(int)*: The number of neighbors.
* **batch** *(LongTensor, optional)*: Batch vector of shape `[N]`, which assigns each node to a specific example. `batch` needs to be sorted. (default: `None`)
* **loop** *(bool, optional)*: If `True`, the graph will contain self-loops. (default: `False`)
* **flow** *(string, optional)*: The flow direction when using in combination with message passing (`"source_to_target"` or `"target_to_source"`). (default: `"source_to_target"`)
* **cosine** *(boolean, optional)*: If `True`, will use the Cosine distance instead of Euclidean distance to find nearest neighbors. (default: `False`)
* **num_workers** *(int)*: Number of workers to use for computation. Has no effect in case `batch` is not `None`, or the input lies on the GPU. (default: `1`)
```python
import torch
from torch_cluster import knn_graph
x = torch.tensor([[-1., -1.], [-1., 1.], [1., -1.], [1., 1.]])
batch = torch.tensor([0, 0, 0, 0])
edge_index = knn_graph(x, k=2, batch=batch, loop=False)
```
```
print(edge_index)
tensor([[1, 2, 0, 3, 0, 3, 1, 2],
[0, 0, 1, 1, 2, 2, 3, 3]])
```
### Radius-Graph
Computes graph edges to all points within a given distance.
**Args:**
* **x** *(Tensor)*: Node feature matrix of shape `[N, F]`.
* **r** *(float)*: The radius.
* **batch** *(LongTensor, optional)*: Batch vector of shape `[N]`, which assigns each node to a specific example. `batch` needs to be sorted. (default: `None`)
* **loop** *(bool, optional)*: If `True`, the graph will contain self-loops. (default: `False`)
* **max_num_neighbors** *(int, optional)*: The maximum number of neighbors to return for each element. If the number of actual neighbors is greater than `max_num_neighbors`, returned neighbors are picked randomly. (default: `32`)
* **flow** *(string, optional)*: The flow direction when using in combination with message passing (`"source_to_target"` or `"target_to_source"`). (default: `"source_to_target"`)
* **num_workers** *(int)*: Number of workers to use for computation. Has no effect in case `batch` is not `None`, or the input lies on the GPU. (default: `1`)
```python
import torch
from torch_cluster import radius_graph
x = torch.tensor([[-1., -1.], [-1., 1.], [1., -1.], [1., 1.]])
batch = torch.tensor([0, 0, 0, 0])
edge_index = radius_graph(x, r=2.5, batch=batch, loop=False)
```
```
print(edge_index)
tensor([[1, 2, 0, 3, 0, 3, 1, 2],
[0, 0, 1, 1, 2, 2, 3, 3]])
```
### Nearest
Clusters points in *x* together which are nearest to a given query point in *y*.
`batch_{x,y}` vectors need to be sorted.
```python
import torch
from torch_cluster import nearest
x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
batch_x = torch.tensor([0, 0, 0, 0])
y = torch.Tensor([[-1, 0], [1, 0]])
batch_y = torch.tensor([0, 0])
cluster = nearest(x, y, batch_x, batch_y)
```
```
print(cluster)
tensor([0, 0, 1, 1])
```
### RandomWalk-Sampling
Samples random walks of length `walk_length` from all node indices in `start` in the graph given by `(row, col)`.
```python
import torch
from torch_cluster import random_walk
row = torch.tensor([0, 1, 1, 1, 2, 2, 3, 3, 4, 4])
col = torch.tensor([1, 0, 2, 3, 1, 4, 1, 4, 2, 3])
start = torch.tensor([0, 1, 2, 3, 4])
walk = random_walk(row, col, start, walk_length=3)
```
```
print(walk)
tensor([[0, 1, 2, 4],
[1, 3, 4, 2],
[2, 4, 2, 1],
[3, 4, 2, 4],
[4, 3, 1, 0]])
```
## Running tests
```
pytest
```
## C++ API
`torch-cluster` also offers a C++ API that contains C++ equivalent of python models.
```
mkdir build
cd build
# Add -DWITH_CUDA=on support for the CUDA if needed
cmake ..
make
make install
```
LICENSE
MANIFEST.in
README.md
setup.cfg
setup.py
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/fps.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/graclus.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/grid.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/knn.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/nearest.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/radius.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/rw.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/sampler.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/version.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/cpu/fps_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/cpu/graclus_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/cpu/grid_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/cpu/knn_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/cpu/radius_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/cpu/rw_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/cpu/sampler_cpu.cpp
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/hip/fps_hip_hip.hip
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/hip/graclus_hip_hip.hip
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/hip/grid_hip_hip.hip
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/hip/knn_hip_hip.hip
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/hip/nearest_hip_hip.hip
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/hip/radius_hip_hip.hip
/work/home/quyuanhao123/software/test_ocp/torch_cluster-1.6.0/csrc/hip/rw_hip_hip.hip
csrc/cluster.h
csrc/fps.cpp
csrc/graclus.cpp
csrc/grid.cpp
csrc/knn.cpp
csrc/nearest.cpp
csrc/radius.cpp
csrc/rw.cpp
csrc/sampler.cpp
csrc/version.cpp
csrc/cpu/fps_cpu.cpp
csrc/cpu/fps_cpu.h
csrc/cpu/graclus_cpu.cpp
csrc/cpu/graclus_cpu.h
csrc/cpu/grid_cpu.cpp
csrc/cpu/grid_cpu.h
csrc/cpu/knn_cpu.cpp
csrc/cpu/knn_cpu.h
csrc/cpu/radius_cpu.cpp
csrc/cpu/radius_cpu.h
csrc/cpu/rw_cpu.cpp
csrc/cpu/rw_cpu.h
csrc/cpu/sampler_cpu.cpp
csrc/cpu/sampler_cpu.h
csrc/cpu/utils.h
csrc/cpu/utils/KDTreeVectorOfVectorsAdaptor.h
csrc/cpu/utils/nanoflann.hpp
csrc/hip/fps_hip.h
csrc/hip/fps_hip.hip
csrc/hip/fps_hip_hip.hip
csrc/hip/graclus_hip.h
csrc/hip/graclus_hip.hip
csrc/hip/graclus_hip_hip.hip
csrc/hip/grid_hip.h
csrc/hip/grid_hip.hip
csrc/hip/grid_hip_hip.hip
csrc/hip/knn_hip.h
csrc/hip/knn_hip.hip
csrc/hip/knn_hip_hip.hip
csrc/hip/nearest_hip.h
csrc/hip/nearest_hip.hip
csrc/hip/nearest_hip_hip.hip
csrc/hip/radius_hip.h
csrc/hip/radius_hip.hip
csrc/hip/radius_hip_hip.hip
csrc/hip/rw_hip.h
csrc/hip/rw_hip.hip
csrc/hip/rw_hip_hip.hip
csrc/hip/utils.cuh
torch_cluster/__init__.py
torch_cluster/fps.py
torch_cluster/graclus.py
torch_cluster/grid.py
torch_cluster/knn.py
torch_cluster/nearest.py
torch_cluster/radius.py
torch_cluster/rw.py
torch_cluster/sampler.py
torch_cluster.egg-info/PKG-INFO
torch_cluster.egg-info/SOURCES.txt
torch_cluster.egg-info/dependency_links.txt
torch_cluster.egg-info/requires.txt
torch_cluster.egg-info/top_level.txt
\ No newline at end of file
[test]
pytest
pytest-cov
scipy
......@@ -9,11 +9,11 @@ for library in [
'_version', '_grid', '_graclus', '_fps', '_rw', '_sampler', '_nearest',
'_knn', '_radius'
]:
hip_spec = importlib.machinery.PathFinder().find_spec(
f'{library}_hip', [osp.dirname(__file__)])
cuda_spec = importlib.machinery.PathFinder().find_spec(
f'{library}_cuda', [osp.dirname(__file__)])
cpu_spec = importlib.machinery.PathFinder().find_spec(
f'{library}_cpu', [osp.dirname(__file__)])
spec = hip_spec or cpu_spec
spec = cuda_spec or cpu_spec
if spec is not None:
torch.ops.load_library(spec.origin)
else: # pragma: no cover
......@@ -26,6 +26,15 @@ if torch.cuda.is_available() and cuda_version != -1: # pragma: no cover
major, minor = int(str(cuda_version)[0]), int(str(cuda_version)[2])
else:
major, minor = int(str(cuda_version)[0:2]), int(str(cuda_version)[3])
t_major, t_minor = [int(x) for x in torch.version.cuda.split('.')]
if t_major != major:
raise RuntimeError(
f'Detected that PyTorch and torch_cluster were compiled with '
f'different CUDA versions. PyTorch has CUDA version '
f'{t_major}.{t_minor} and torch_cluster has CUDA version '
f'{major}.{minor}. Please reinstall the torch_cluster that '
f'matches your PyTorch install.')
from .fps import fps # noqa
from .graclus import graclus_cluster # noqa
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment