Commit 19374a65 authored by rusty1s's avatar rusty1s
Browse files

new strucutre

parent 6a01b87b
import pytest
import torch
from torch_cluster.functions.utils.degree import node_degree
def test_node_degree_cpu():
index = torch.LongTensor([0, 1, 1, 0, 0, 3, 0])
degree = node_degree(index, 4)
expected_degree = [4, 2, 0, 1]
assert degree.type() == torch.LongTensor().type()
assert degree.tolist() == expected_degree
degree = node_degree(index, 4, out=torch.FloatTensor())
assert degree.type() == torch.FloatTensor().type()
assert degree.tolist() == expected_degree
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
def test_node_degree_gpu(): # pragma: no cover
index = torch.cuda.LongTensor([0, 1, 1, 0, 0, 3, 0])
degree = node_degree(index, 4)
expected_degree = [4, 2, 0, 1]
assert degree.type() == torch.cuda.LongTensor().type()
assert degree.cpu().tolist() == expected_degree
degree = node_degree(index, 4, out=torch.cuda.FloatTensor())
assert degree.type() == torch.cuda.FloatTensor().type()
assert degree.cpu().tolist() == expected_degree
import pytest
import torch
from torch_cluster.functions.utils.ffi import ffi_serial, ffi_grid, _get_func
def test_serial_cpu():
row = torch.LongTensor([0, 0, 1, 1, 1, 2, 2, 2, 3, 3])
col = torch.LongTensor([1, 2, 0, 2, 3, 0, 1, 3, 1, 2])
degree = torch.LongTensor([2, 3, 3, 2])
cluster = ffi_serial(row, col, degree)
expected_cluster = [0, 0, 2, 2]
assert cluster.tolist() == expected_cluster
weight = torch.Tensor([1, 2, 1, 3, 2, 2, 3, 3, 2, 3])
cluster = ffi_serial(row, col, degree, weight)
expected_cluster = [0, 1, 0, 1]
assert cluster.tolist() == expected_cluster
def test_grid_cpu():
position = torch.Tensor([[0, 0], [11, 9], [2, 8], [2, 2], [8, 3]])
size = torch.Tensor([5, 5])
count = torch.LongTensor([3, 2])
cluster = ffi_grid(position, size, count)
expected_cluster = [0, 5, 1, 0, 2]
assert cluster.tolist() == expected_cluster
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
def test_assign_color_gpu():
output = torch.cuda.LongTensor(60000).fill_(-1)
func = _get_func('serial', output)
func(output, output, output, output)
print((output + 2).sum() / output.size(0))
print((output + 2)[:10])
# print(torch.initial_seed())
# torch.cuda.manual_seed(2)
# bla = torch.bernoulli(torch.cuda.FloatTensor(10).fill_(0.2))
# print(bla)
# print(bla.sum() / bla.size(0))
# func = ffi.()
# # return getattr(ffi, 'cluster_{}{}'.format(name, cuda))
# print('drin')
# pass
import pytest
import torch
from torch_cluster.functions.utils.permute import sort, permute
def equal_neighbors(row, col, expected_col, degree):
e, test = 0, True
while e < len(row):
i = row[e]
neighbors = sorted(col[e:e + degree[i]])
expected_neighbors = sorted(expected_col[e:e + degree[i]])
if neighbors != expected_neighbors:
test = False
e += degree[i]
return test
def test_sort_cpu():
row = torch.LongTensor([0, 1, 0, 2, 1, 2, 1, 3, 2, 3])
col = torch.LongTensor([1, 0, 2, 0, 2, 1, 3, 1, 3, 2])
row, col = sort(row, col)
expected_row = [0, 0, 1, 1, 1, 2, 2, 2, 3, 3]
expected_col = [1, 2, 0, 2, 3, 0, 1, 3, 1, 2]
assert row.tolist() == expected_row
assert col.tolist() == expected_col
def test_permute_cpu():
row = torch.LongTensor([0, 1, 0, 2, 1, 2, 1, 3, 2, 3])
col = torch.LongTensor([1, 0, 2, 0, 2, 1, 3, 1, 3, 2])
node_rid = torch.LongTensor([2, 1, 3, 0])
edge_rid = torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
row, col = permute(row, col, 4, node_rid, edge_rid)
expected_row = [3, 3, 1, 1, 1, 0, 0, 2, 2, 2]
expected_col = [1, 2, 0, 2, 3, 1, 2, 0, 1, 3]
assert row.tolist() == expected_row
assert col.tolist() == expected_col
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
def test_sort_gpu(): # pragma: no cover
# Note that `sort` is not stable on the GPU, so it does not preserve the
# relative ordering of equivalent row elements. Thus, the expected column
# vector differs from the CPU version.
row = torch.cuda.LongTensor([0, 1, 0, 2, 1, 2, 1, 3, 2, 3])
col = torch.cuda.LongTensor([1, 0, 2, 0, 2, 1, 3, 1, 3, 2])
row, col = sort(row, col)
row, col = row.cpu().tolist(), col.cpu().tolist()
expected_row = [0, 0, 1, 1, 1, 2, 2, 2, 3, 3]
expected_col = [1, 2, 0, 2, 3, 0, 1, 3, 1, 2]
assert row == expected_row
assert equal_neighbors(row, col, expected_col, [2, 3, 3, 2])
@pytest.mark.skipif(not torch.cuda.is_available(), reason='no CUDA')
def test_permute_gpu(): # pragma: no cover
# Equivalent to `sort`, `permute` is not stable on the GPU (see above).
row = torch.cuda.LongTensor([0, 1, 0, 2, 1, 2, 1, 3, 2, 3])
col = torch.cuda.LongTensor([1, 0, 2, 0, 2, 1, 3, 1, 3, 2])
node_rid = torch.cuda.LongTensor([2, 1, 3, 0])
edge_rid = torch.cuda.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
row, col = permute(row, col, 4, node_rid, edge_rid)
row, col = row.cpu().tolist(), col.cpu().tolist()
expected_row = [3, 3, 1, 1, 1, 0, 0, 2, 2, 2]
expected_col = [1, 2, 0, 2, 3, 1, 2, 0, 1, 3]
assert row == expected_row
assert equal_neighbors(row, col, expected_col, [2, 3, 3, 2])
from .functions.serial import serial_cluster
from .functions.grid import sparse_grid_cluster, dense_grid_cluster
from .graclus import graclus_cluster
from .grid import grid_cluster
__version__ = '1.0.0'
__all__ = [
'serial_cluster', 'sparse_grid_cluster', 'dense_grid_cluster',
'__version__'
]
__all__ = ['graclus_cluster', 'grid_cluster', '__version__']
from .utils.perm import randperm, sort_row, randperm_sort_row
from .utils.ffi import graclus
def graclus_cluster(row, col, weight=None, num_nodes=None):
num_nodes = row.max() + 1 if num_nodes is None else num_nodes
row, col = randperm(row, col)
if row.is_cuda:
row, col = sort_row(row, col)
else:
row, col = randperm_sort_row(row, col, num_nodes)
cluster = row.new(num_nodes)
graclus(cluster, row, col, weight)
return cluster
from .utils.ffi import grid
def grid_cluster(pos, size, batch=None, start=None, end=None):
pos = pos.unsqueeze(-1) if pos.dim() == 1 else pos
grid(None, None, None, None)
from .._ext import ffi
def get_func(name, is_cuda, tensor=None):
prefix = 'THCC' if is_cuda else 'TH'
prefix += 'Tensor' if tensor is None else type(tensor).__name__
return getattr(ffi, '{}_{}'.format(prefix, name))
def graclus(self, row, col, weight=None):
func = get_func('graclus', self.is_cuda, weight)
func(self, row, col) if weight is None else func(self, row, col, weight)
def grid(self, pos, size, count):
func = get_func('grid', self.is_cuda, pos)
func(self, pos, size, count)
import torch
def randperm(row, col):
# Randomly reorder row and column indices.
edge_rid = torch.randperm(row.size(0)).type_as(row)
return row[edge_rid], col[edge_rid]
def sort_row(row, col):
# Sort row and column indices row-wise.
row, perm = row.sort()
col = col[perm]
return row, col
def randperm_sort_row(row, col, num_nodes):
# Randomly change row indices to new values.
node_rid = torch.randperm(num_nodes).type_as(row)
row = node_rid[row]
# Sort row and column indices row-wise.
row, col = sort_row(row, col)
# Revert previous row value changes to old indices.
row = node_rid.sort()[1][row]
return row, col
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment