Commit 79b935c7 authored by rusty1s's avatar rusty1s
Browse files

added sparse and dense voxel cluster

parent ce9a53d1
...@@ -7,4 +7,4 @@ SRC_DIR=torch_cluster/kernel ...@@ -7,4 +7,4 @@ SRC_DIR=torch_cluster/kernel
BUILD_DIR=torch_cluster/build BUILD_DIR=torch_cluster/build
mkdir -p $BUILD_DIR mkdir -p $BUILD_DIR
nvcc -c -o $BUILD_DIR/kernel.so $SRC_DIR/kernel.cu -arch=sm_35 -Xcompiler -fPIC -shared -I$TORCH/lib/include/TH -I$TORCH/lib/include/THC -I$SRC_DIR $(which nvcc) -c -o $BUILD_DIR/kernel.so $SRC_DIR/kernel.cu -arch=sm_35 -Xcompiler -fPIC -shared -I$TORCH/lib/include/TH -I$TORCH/lib/include/THC -I$SRC_DIR
...@@ -2,7 +2,7 @@ from os import path as osp ...@@ -2,7 +2,7 @@ from os import path as osp
from setuptools import setup, find_packages from setuptools import setup, find_packages
__version__ = '0.2.1' __version__ = '0.2.2'
url = 'https://github.com/rusty1s/pytorch_cluster' url = 'https://github.com/rusty1s/pytorch_cluster'
install_requires = ['cffi', 'torch-unique'] install_requires = ['cffi', 'torch-unique']
......
[
{
"name": "One-dimensional positions without start/end",
"position": [2, 16],
"size": [5],
"expected": [0, 2],
"expected_C": 3
},
{
"name": "Start parameter",
"position": [2, 16],
"size": [5],
"start": 0,
"expected": [0, 3],
"expected_C": 4
},
{
"name": "End parameter",
"position": [2, 16],
"size": [5],
"start": 0,
"end": 30,
"expected": [0, 3],
"expected_C": 6
},
{
"name": "Two-dimensional positions",
"position": [[0, 0], [11, 9], [2, 8], [2, 2], [8, 3]],
"size": [5, 5],
"expected": [0, 5, 1, 0, 2],
"expected_C": 6
},
{
"name": "Batch",
"position": [[0, 0], [11, 9], [2, 8], [2, 2], [8, 3], [1, 1], [6, 6]],
"size": [5, 5],
"batch": [0, 0, 0, 0, 0, 1, 1],
"expected": [0, 5, 1, 0, 2, 6, 9],
"expected_C": 6
}
]
[
{
"name": "One-dimensional positions",
"position": [2, 16],
"size": [5],
"expected": [0, 1]
},
{
"name": "Start parameter",
"position": [2, 6],
"size": [5],
"start": 0,
"expected": [0, 1]
},
{
"name": "Consecutive cluster",
"position": [0, 17, 2, 8, 3],
"size": [5],
"expected": [0, 2, 0, 1, 0]
},
{
"name": "Two-dimensional positions",
"position": [[0, 0], [11, 9], [2, 8], [2, 2], [8, 3]],
"size": [5, 5],
"expected": [0, 3, 1, 0, 2]
},
{
"name": "Batch",
"position": [[0, 0], [11, 9], [2, 8], [2, 2], [8, 3], [1, 1], [6, 6]],
"size": [5, 5],
"batch": [0, 0, 0, 0, 0, 1, 1],
"expected": [0, 3, 1, 0, 2, 4, 5],
"expected_batch": [0, 0, 0, 0, 1, 1]
}
]
from os import path as osp
from itertools import product
import pytest
import json
import torch
from torch_cluster import dense_grid_cluster
from .utils import tensors, Tensor
f = open(osp.join(osp.dirname(__file__), 'dense_grid.json'), 'r')
data = json.load(f)
f.close()
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
def test_dense_grid_cluster_cpu(tensor, i):
position = Tensor(tensor, data[i]['position'])
size = torch.LongTensor(data[i]['size'])
batch = data[i].get('batch')
batch = None if batch is None else torch.LongTensor(batch)
start = data[i].get('start')
end = data[i].get('end')
expected = torch.LongTensor(data[i]['expected'])
expected_C = data[i]['expected_C']
output = dense_grid_cluster(position, size, batch, start, end)
assert output[0].tolist() == expected.tolist()
assert output[1] == expected_C
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
def test_dense_grid_cluster_gpu(tensor, i): # pragma: no cover
position = Tensor(tensor, data[i]['position']).cuda()
size = torch.cuda.LongTensor(data[i]['size'])
batch = data[i].get('batch')
batch = None if batch is None else torch.cuda.LongTensor(batch)
start = data[i].get('start')
end = data[i].get('end')
expected = torch.LongTensor(data[i]['expected'])
expected_C = data[i]['expected_C']
output = dense_grid_cluster(position, size, batch, start, end)
assert output[0].cpu().tolist() == expected.tolist()
assert output[1] == expected_C
import pytest
import torch
from torch_cluster import grid_cluster
from .utils import tensors, Tensor
@pytest.mark.parametrize('tensor', tensors)
def test_grid_cluster_cpu(tensor):
position = Tensor(tensor, [2, 6])
size = torch.LongTensor([5])
expected = torch.LongTensor([0, 0])
output, _ = grid_cluster(position, size)
assert output.tolist() == expected.tolist()
expected = torch.LongTensor([0, 1])
output, _ = grid_cluster(position, size, origin=0)
assert output.tolist() == expected.tolist()
position = Tensor(tensor, [0, 17, 2, 8, 3])
expected = torch.LongTensor([0, 2, 0, 1, 0])
output, _ = grid_cluster(position, size)
assert output.tolist() == expected.tolist()
output, _ = grid_cluster(position, size, fake_nodes=True)
expected = torch.LongTensor([0, 3, 0, 1, 0])
assert output.tolist() == expected.tolist()
position = Tensor(tensor, [[0, 0], [9, 9], [2, 8], [2, 2], [8, 3]])
size = torch.LongTensor([5, 5])
expected = torch.LongTensor([0, 3, 1, 0, 2])
output, _ = grid_cluster(position, size)
assert output.tolist() == expected.tolist()
position = Tensor(tensor, [[0, 11, 2, 2, 8], [0, 9, 8, 2, 3]]).t()
output, _ = grid_cluster(position, size)
assert output.tolist() == expected.tolist()
output, _ = grid_cluster(position.expand(2, 5, 2), size)
assert output.tolist() == expected.expand(2, 5).tolist()
position = position.repeat(2, 1)
batch = torch.LongTensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
expected = torch.LongTensor([0, 3, 1, 0, 2, 4, 7, 5, 4, 6])
expected_batch2 = torch.LongTensor([0, 0, 0, 0, 1, 1, 1, 1])
output, batch2 = grid_cluster(position, size, batch)
assert output.tolist() == expected.tolist()
assert batch2.tolist() == expected_batch2.tolist()
output, C = grid_cluster(position, size, batch, fake_nodes=True)
expected = torch.LongTensor([0, 5, 1, 0, 2, 6, 11, 7, 6, 8])
assert output.tolist() == expected.tolist()
assert C == 12
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
@pytest.mark.parametrize('tensor', tensors)
def test_grid_cluster_gpu(tensor): # pragma: no cover
position = Tensor(tensor, [2, 6]).cuda()
size = torch.cuda.LongTensor([5])
expected = torch.LongTensor([0, 0])
output, _ = grid_cluster(position, size)
assert output.cpu().tolist() == expected.tolist()
expected = torch.LongTensor([0, 1])
output, _ = grid_cluster(position, size, origin=0)
assert output.cpu().tolist() == expected.tolist()
position = Tensor(tensor, [0, 17, 2, 8, 3]).cuda()
expected = torch.LongTensor([0, 2, 0, 1, 0])
output, _ = grid_cluster(position, size)
assert output.cpu().tolist() == expected.tolist()
output, _ = grid_cluster(position, size, fake_nodes=True)
expected = torch.LongTensor([0, 3, 0, 1, 0])
assert output.cpu().tolist() == expected.tolist()
position = Tensor(tensor, [[0, 0], [9, 9], [2, 8], [2, 2], [8, 3]])
position = position.cuda()
size = torch.cuda.LongTensor([5, 5])
expected = torch.LongTensor([0, 3, 1, 0, 2])
output, _ = grid_cluster(position, size)
assert output.cpu().tolist() == expected.tolist()
position = Tensor(tensor, [[0, 11, 2, 2, 8], [0, 9, 8, 2, 3]])
position = position.cuda().t()
output, _ = grid_cluster(position, size)
assert output.cpu().tolist() == expected.tolist()
output, _ = grid_cluster(position.expand(2, 5, 2), size)
assert output.tolist() == expected.expand(2, 5).tolist()
position = position.repeat(2, 1)
batch = torch.cuda.LongTensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
expected = torch.LongTensor([0, 3, 1, 0, 2, 4, 7, 5, 4, 6])
expected_batch2 = torch.LongTensor([0, 0, 0, 0, 1, 1, 1, 1])
output, batch2 = grid_cluster(position, size, batch)
assert output.cpu().tolist() == expected.tolist()
assert batch2.cpu().tolist() == expected_batch2.tolist()
output, C = grid_cluster(position, size, batch, fake_nodes=True)
expected = torch.LongTensor([0, 5, 1, 0, 2, 6, 11, 7, 6, 8])
assert output.cpu().tolist() == expected.tolist()
assert C == 12
from os import path as osp
from itertools import product
import pytest
import json
import torch
from torch_cluster import sparse_grid_cluster
from .utils import tensors, Tensor
f = open(osp.join(osp.dirname(__file__), 'sparse_grid.json'), 'r')
data = json.load(f)
f.close()
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
def test_sparse_grid_cluster_cpu(tensor, i):
position = Tensor(tensor, data[i]['position'])
size = torch.LongTensor(data[i]['size'])
batch = data[i].get('batch')
start = data[i].get('start')
expected = torch.LongTensor(data[i]['expected'])
if batch is None:
output = sparse_grid_cluster(position, size, batch, start)
assert output.tolist() == expected.tolist()
else:
batch = torch.LongTensor(batch)
expected_batch = torch.LongTensor(data[i]['expected_batch'])
output = sparse_grid_cluster(position, size, batch, start)
assert output[0].tolist() == expected.tolist()
assert output[1].tolist() == expected_batch.tolist()
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
@pytest.mark.parametrize('tensor,i', product(tensors, range(len(data))))
def test_sparse_grid_cluster_gpu(tensor, i): # pragma: no cover
position = Tensor(tensor, data[i]['position']).cuda()
size = torch.cuda.LongTensor(data[i]['size'])
batch = data[i].get('batch')
start = data[i].get('start')
expected = torch.LongTensor(data[i]['expected'])
if batch is None:
output = sparse_grid_cluster(position, size, batch, start)
assert output.cpu().tolist() == expected.tolist()
else:
batch = torch.cuda.LongTensor(batch)
expected_batch = torch.LongTensor(data[i]['expected_batch'])
output = sparse_grid_cluster(position, size, batch, start)
assert output[0].cpu().tolist() == expected.tolist()
assert output[1].cpu().tolist() == expected_batch.tolist()
from .functions.grid import grid_cluster from .functions.grid import sparse_grid_cluster, dense_grid_cluster
__version__ = '0.2.1' __version__ = '0.2.2'
__all__ = ['grid_cluster', '__version__'] __all__ = ['sparse_grid_cluster', 'dense_grid_cluster', '__version__']
from __future__ import division
import torch import torch
from .utils import get_func, consecutive from .utils import get_func, consecutive
def grid_cluster(position, size, batch=None, origin=None, fake_nodes=False): def _preprocess(position, size, batch=None, start=None):
size = size.type_as(position)
# Allow one-dimensional positions. # Allow one-dimensional positions.
if position.dim() == 1: if position.dim() == 1:
position = position.unsqueeze(-1) position = position.unsqueeze(-1)
...@@ -12,6 +16,14 @@ def grid_cluster(position, size, batch=None, origin=None, fake_nodes=False): ...@@ -12,6 +16,14 @@ def grid_cluster(position, size, batch=None, origin=None, fake_nodes=False):
assert position.size(-1) == size.size(-1), ( assert position.size(-1) == size.size(-1), (
'Last dimension of position tensor must have same size as size tensor') 'Last dimension of position tensor must have same size as size tensor')
# Translate to minimal positive positions if no start was passed.
if start is None:
position = position - position.min(dim=-2, keepdim=True)[0]
else:
position = position - start
assert position.min() >= 0, (
'Passed origin resulting in unallowed negative positions')
# If given, append batch to position tensor. # If given, append batch to position tensor.
if batch is not None: if batch is not None:
batch = batch.unsqueeze(-1).type_as(position) batch = batch.unsqueeze(-1).type_as(position)
...@@ -21,36 +33,63 @@ def grid_cluster(position, size, batch=None, origin=None, fake_nodes=False): ...@@ -21,36 +33,63 @@ def grid_cluster(position, size, batch=None, origin=None, fake_nodes=False):
position = torch.cat([batch, position], dim=-1) position = torch.cat([batch, position], dim=-1)
size = torch.cat([size.new(1).fill_(1), size], dim=-1) size = torch.cat([size.new(1).fill_(1), size], dim=-1)
# Translate to minimal positive positions if no origin was passed. return position, size
if origin is None:
min = position.min(dim=-2, keepdim=True)[0]
position = position - min
else:
position = position + origin
assert position.min() >= 0, (
'Passed origin resulting in unallowed negative positions')
# Compute cluster count for each dimension. def _minimal_cluster_size(position, size):
max = position.max(dim=0)[0] max = position.max(dim=0)[0]
while max.dim() > 1: while max.dim() > 1:
max = max.max(dim=0)[0] max = max.max(dim=0)[0]
c_max = torch.floor(max.double() / size.double() + 1).long() cluster_size = (max / size).long() + 1
c_max = torch.clamp(c_max, min=1) return cluster_size
C = c_max.prod()
# Generate cluster tensor.
s = list(position.size())
s[-1] = 1
cluster = c_max.new(torch.Size(s))
# Fill cluster tensor and reshape. def _fixed_cluster_size(position, size, batch=None, end=None):
size = size.type_as(position) if end is None:
return _minimal_cluster_size(position, size)
eps = 0.000001 # Model [start, end).
if batch is None:
cluster_size = ((end / size).float() - eps).long() + 1
else:
cluster_size = ((end / size[1:]).float() - eps).long() + 1
cluster_size = torch.cat([batch.max() + 1, cluster_size], dim=0)
return cluster_size
def _grid_cluster(position, size, cluster_size):
C = cluster_size.prod()
cluster = cluster_size.new(torch.Size(list(position.size())[:-1]))
cluster = cluster.unsqueeze(dim=-1)
func = get_func('grid', position) func = get_func('grid', position)
func(C, cluster, position, size, c_max) func(C, cluster, position, size, cluster_size)
cluster = cluster.squeeze(dim=-1) cluster = cluster.squeeze(dim=-1)
return cluster, C
if fake_nodes:
return cluster, C
def sparse_grid_cluster(position, size, batch=None, start=None):
position, size = _preprocess(position, size, batch, start)
cluster_size = _minimal_cluster_size(position, size)
cluster, C = _grid_cluster(position, size, cluster_size)
cluster, u = consecutive(cluster) cluster, u = consecutive(cluster)
return cluster, None if batch is None else (u / (C // c_max[0])).long()
if batch is None:
return cluster
else:
batch = u / (C // cluster_size[0])
return cluster, batch
def dense_grid_cluster(position, size, batch=None, start=None, end=None):
position, size = _preprocess(position, size, batch, start)
cluster_size = _fixed_cluster_size(position, size, batch, end)
cluster, C = _grid_cluster(position, size, cluster_size)
if batch is None:
return cluster, C
else:
C = C // cluster_size[0]
return cluster, C
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment