Commit 72b1ce14 authored by rusty1s's avatar rusty1s
Browse files

bugfix, added tests

parent b1589f14
import pytest
import torch
from torch_cluster import grid_cluster
from .utils import tensors, Tensor
@pytest.mark.parametrize('tensor', tensors)
def test_grid_cluster_cpu(tensor):
position = Tensor(tensor, [[0, 0], [9, 9], [2, 8], [2, 2], [8, 3]])
size = torch.LongTensor([5, 5])
expected = torch.LongTensor([0, 3, 1, 0, 2])
output = grid_cluster(position, size)
assert output.tolist() == expected.tolist()
output = grid_cluster(position.expand(2, 5, 2), size)
assert output.tolist() == expected.expand(2, 5).tolist()
expected = torch.LongTensor([0, 1, 3, 2, 4])
batch = torch.LongTensor([0, 0, 1, 1, 1])
output = grid_cluster(position, size, batch)
assert output.tolist() == expected.tolist()
output = grid_cluster(position.expand(2, 5, 2), size, batch.expand(2, 5))
assert output.tolist() == expected.expand(2, 5).tolist()
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
@pytest.mark.parametrize('tensor', tensors)
def test_grid_cluster_gpu(tensor): # pragma: no cover
pass
...@@ -29,7 +29,7 @@ def grid_cluster(position, size, batch=None): ...@@ -29,7 +29,7 @@ def grid_cluster(position, size, batch=None):
max = position.max(dim=0)[0] max = position.max(dim=0)[0]
while max.dim() > 1: while max.dim() > 1:
max = max.max(dim=0)[0] max = max.max(dim=0)[0]
c_max = torch.ceil(max / size.type_as(max)).long() c_max = torch.floor(max.double() / size.double() + 1).long()
c_max = torch.clamp(c_max, min=1) c_max = torch.clamp(c_max, min=1)
C = c_max.prod() C = c_max.prod()
......
...@@ -2,6 +2,7 @@ import torch ...@@ -2,6 +2,7 @@ import torch
from torch_unique import unique from torch_unique import unique
from .._ext import ffi from .._ext import ffi
print(ffi.__dict__)
def get_func(name, tensor): def get_func(name, tensor):
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#else #else
void cluster_(grid)(int C, THCudaLongTensor *output, THCTensor *position, THCTensor *size, THCudaLongTensor *count) { void cluster_(grid)(int C, THCudaLongTensor *output, THCTensor *position, THCTensor *size, THCudaLongTensor *count) {
return cluster_kernel_(grid)(state, C, output, position, size, count); /* return cluster_kernel_(grid)(state, C, output, position, size, count); */
} }
#endif #endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment