"...git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "074d281ae0d821175718ce435610ce78c27c5fbf"
Commit 5f633fcb authored by rusty1s's avatar rusty1s
Browse files

reset batch comp

parent c20e6b58
...@@ -28,9 +28,9 @@ def test_sparse_grid_cluster_cpu(tensor, i): ...@@ -28,9 +28,9 @@ def test_sparse_grid_cluster_cpu(tensor, i):
if batch is None: if batch is None:
assert output.tolist() == expected.tolist() assert output.tolist() == expected.tolist()
else: else:
expected_batch = torch.LongTensor(data[i]['expected_batch']) # expected_batch = torch.LongTensor(data[i]['expected_batch'])
assert output[0].tolist() == expected.tolist() assert output[0].tolist() == expected.tolist()
assert output[1].tolist() == expected_batch.tolist() # assert output[1].tolist() == expected_batch.tolist()
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA') @pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
...@@ -49,6 +49,6 @@ def test_sparse_grid_cluster_gpu(tensor, i): # pragma: no cover ...@@ -49,6 +49,6 @@ def test_sparse_grid_cluster_gpu(tensor, i): # pragma: no cover
if batch is None: if batch is None:
assert output.cpu().tolist() == expected.tolist() assert output.cpu().tolist() == expected.tolist()
else: else:
expected_batch = torch.LongTensor(data[i]['expected_batch']) # expected_batch = torch.LongTensor(data[i]['expected_batch'])
assert output[0].cpu().tolist() == expected.tolist() assert output[0].cpu().tolist() == expected.tolist()
assert output[1].cpu().tolist() == expected_batch.tolist() # assert output[1].cpu().tolist() == expected_batch.tolist()
...@@ -3,17 +3,13 @@ import torch ...@@ -3,17 +3,13 @@ import torch
from torch_cluster.functions.utils.consecutive import consecutive from torch_cluster.functions.utils.consecutive import consecutive
def test_consecutive(): def test_consecutive_cpu():
vec = torch.LongTensor([0, 2, 3]) vec = torch.LongTensor([0, 2, 3])
assert consecutive(vec).tolist() == [0, 1, 2] assert consecutive(vec).tolist() == [0, 1, 2]
vec = torch.LongTensor([0, 3, 2, 2, 3]) vec = torch.LongTensor([0, 3, 2, 2, 3])
assert consecutive(vec).tolist() == [0, 2, 1, 1, 2] assert consecutive(vec).tolist() == [0, 2, 1, 1, 2]
vec = torch.LongTensor([0, 3, 2, 2, 3])
assert consecutive(vec, True)[0].tolist() == [0, 2, 1, 1, 2]
assert consecutive(vec, True)[1].tolist() == [0, 2, 3]
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA') @pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
def test_consecutive_gpu(): # pragma: no cover def test_consecutive_gpu(): # pragma: no cover
...@@ -22,7 +18,3 @@ def test_consecutive_gpu(): # pragma: no cover ...@@ -22,7 +18,3 @@ def test_consecutive_gpu(): # pragma: no cover
vec = torch.cuda.LongTensor([0, 3, 2, 2, 3]) vec = torch.cuda.LongTensor([0, 3, 2, 2, 3])
assert consecutive(vec).cpu().tolist() == [0, 2, 1, 1, 2] assert consecutive(vec).cpu().tolist() == [0, 2, 1, 1, 2]
vec = torch.cuda.LongTensor([0, 3, 2, 2, 3])
assert consecutive(vec, True)[0].cpu().tolist() == [0, 2, 1, 1, 2]
assert consecutive(vec, True)[1].cpu().tolist() == [0, 2, 3]
...@@ -4,25 +4,25 @@ from torch_cluster.functions.utils.degree import node_degree ...@@ -4,25 +4,25 @@ from torch_cluster.functions.utils.degree import node_degree
def test_node_degree_cpu(): def test_node_degree_cpu():
target = torch.LongTensor([0, 1, 1, 0, 0, 3, 0]) index = torch.LongTensor([0, 1, 1, 0, 0, 3, 0])
degree = node_degree(target, 4) degree = node_degree(index, 4)
expected_degree = [4, 2, 0, 1] expected_degree = [4, 2, 0, 1]
assert degree.type() == torch.LongTensor().type() assert degree.type() == torch.LongTensor().type()
assert degree.tolist() == expected_degree assert degree.tolist() == expected_degree
degree = node_degree(target, 4, out=torch.FloatTensor()) degree = node_degree(index, 4, out=torch.FloatTensor())
assert degree.type() == torch.FloatTensor().type() assert degree.type() == torch.FloatTensor().type()
assert degree.tolist() == expected_degree assert degree.tolist() == expected_degree
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA') @pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
def test_node_degree_gpu(): # pragma: no cover def test_node_degree_gpu(): # pragma: no cover
target = torch.cuda.LongTensor([0, 1, 1, 0, 0, 3, 0]) index = torch.cuda.LongTensor([0, 1, 1, 0, 0, 3, 0])
degree = node_degree(target, 4) degree = node_degree(index, 4)
expected_degree = [4, 2, 0, 1] expected_degree = [4, 2, 0, 1]
assert degree.type() == torch.cuda.LongTensor().type() assert degree.type() == torch.cuda.LongTensor().type()
assert degree.cpu().tolist() == expected_degree assert degree.cpu().tolist() == expected_degree
degree = node_degree(target, 4, out=torch.cuda.FloatTensor()) degree = node_degree(index, 4, out=torch.cuda.FloatTensor())
assert degree.type() == torch.cuda.FloatTensor().type() assert degree.type() == torch.cuda.FloatTensor().type()
assert degree.cpu().tolist() == expected_degree assert degree.cpu().tolist() == expected_degree
...@@ -81,12 +81,12 @@ def sparse_grid_cluster(position, size, batch=None, start=None): ...@@ -81,12 +81,12 @@ def sparse_grid_cluster(position, size, batch=None, start=None):
position, size, start = _preprocess(position, size, batch, start) position, size, start = _preprocess(position, size, batch, start)
cluster_size = _minimal_cluster_size(position, size) cluster_size = _minimal_cluster_size(position, size)
cluster, C = _grid_cluster(position, size, cluster_size) cluster, C = _grid_cluster(position, size, cluster_size)
cluster, u = consecutive(cluster, return_unique=True) cluster = consecutive(cluster)
if batch is None: if batch is None:
return cluster return cluster
else: else:
batch = u / (C // cluster_size[0]) # batch = u / (C // cluster_size[0])
return cluster, batch return cluster, batch
......
...@@ -2,26 +2,25 @@ import torch ...@@ -2,26 +2,25 @@ import torch
from torch_unique import unique from torch_unique import unique
def _get_type(max, cuda): def _get_type(max_value, cuda):
if max <= 255: if max_value <= 255:
return torch.cuda.ByteTensor if cuda else torch.ByteTensor return torch.cuda.ByteTensor if cuda else torch.ByteTensor
elif max <= 32767: # pragma: no cover elif max_value <= 32767: # pragma: no cover
return torch.cuda.ShortTensor if cuda else torch.ShortTensor return torch.cuda.ShortTensor if cuda else torch.ShortTensor
elif max <= 2147483647: # pragma: no cover elif max_value <= 2147483647: # pragma: no cover
return torch.cuda.IntTensor if cuda else torch.IntTensor return torch.cuda.IntTensor if cuda else torch.IntTensor
else: # pragma: no cover else: # pragma: no cover
return torch.cuda.LongTensor if cuda else torch.LongTensor return torch.cuda.LongTensor if cuda else torch.LongTensor
def consecutive(tensor, return_unique=False): def consecutive(x):
size = tensor.size() size = x.size()
u = unique(tensor.view(-1)) u = unique(x.view(-1))
len = u[-1] + 1 len = u[-1] + 1
max = u.size(0) max = u.size(0)
type = _get_type(max, tensor.is_cuda) type = _get_type(max, x.is_cuda)
arg = type(len) arg = type(len)
arg[u] = torch.arange(0, max, out=type(max)) arg[u] = torch.arange(0, max, out=type(max))
tensor = arg[tensor.view(-1)] x = arg[x.view(-1)]
tensor = tensor.view(size).long() x = x.view(size).long()
return x
return (tensor, u) if return_unique else tensor
import torch import torch
def node_degree(target, num_nodes, out=None): def node_degree(index, num_nodes, out=None):
out = target.new(num_nodes) if out is None else out out = index.new(num_nodes) if out is None else out
zero = torch.zeros(num_nodes, out=out) zero = torch.zeros(num_nodes, out=out)
one = torch.ones(target.size(0), out=zero.new(target.size(0))) one = torch.ones(index.size(0), out=zero.new(index.size(0)))
return zero.scatter_add_(0, target, one) return zero.scatter_add_(0, index, one)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment