Commit dfe188ab authored by rusty1s's avatar rusty1s
Browse files

year up, restricted coverage, nested extensions

parent a353f274
[run]
source=torch_cluster
[report] [report]
exclude_lines = exclude_lines =
pragma: no cover pragma: no cover
......
Copyright (c) 2018 Matthias Fey <matthias.fey@tu-dortmund.de> Copyright (c) 2019 Matthias Fey <matthias.fey@tu-dortmund.de>
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
......
#include <torch/torch.h> #include <torch/extension.h>
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be CUDA tensor") #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be CUDA tensor")
#define IS_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " is not contiguous"); #define IS_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " is not contiguous");
......
#include <torch/torch.h> #include <torch/extension.h>
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be CUDA tensor") #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be CUDA tensor")
......
#include <torch/torch.h> #include <torch/extension.h>
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be CUDA tensor") #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be CUDA tensor")
......
#include <torch/torch.h> #include <torch/extension.h>
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be CUDA tensor") #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be CUDA tensor")
#define IS_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " is not contiguous"); #define IS_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " is not contiguous");
......
#include <torch/torch.h> #include <torch/extension.h>
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be CUDA tensor") #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be CUDA tensor")
#define IS_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " is not contiguous"); #define IS_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " is not contiguous");
......
#include <torch/torch.h> #include <torch/extension.h>
#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be CUDA tensor") #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be CUDA tensor")
#define IS_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " is not contiguous"); #define IS_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " is not contiguous");
......
...@@ -3,26 +3,29 @@ import torch ...@@ -3,26 +3,29 @@ import torch
from torch.utils.cpp_extension import CppExtension, CUDAExtension, CUDA_HOME from torch.utils.cpp_extension import CppExtension, CUDAExtension, CUDA_HOME
ext_modules = [ ext_modules = [
CppExtension('graclus_cpu', ['cpu/graclus.cpp']), CppExtension('torch_cluster.graclus_cpu', ['cpu/graclus.cpp']),
CppExtension('grid_cpu', ['cpu/grid.cpp']), CppExtension('torch_cluster.grid_cpu', ['cpu/grid.cpp']),
CppExtension('fps_cpu', ['cpu/fps.cpp']), CppExtension('torch_cluster.fps_cpu', ['cpu/fps.cpp']),
] ]
cmdclass = {'build_ext': torch.utils.cpp_extension.BuildExtension} cmdclass = {'build_ext': torch.utils.cpp_extension.BuildExtension}
if CUDA_HOME is not None: if CUDA_HOME is not None:
ext_modules += [ ext_modules += [
CUDAExtension('graclus_cuda', CUDAExtension('torch_cluster.graclus_cuda',
['cuda/graclus.cpp', 'cuda/graclus_kernel.cu']), ['cuda/graclus.cpp', 'cuda/graclus_kernel.cu']),
CUDAExtension('grid_cuda', ['cuda/grid.cpp', 'cuda/grid_kernel.cu']), CUDAExtension('torch_cluster.grid_cuda',
CUDAExtension('fps_cuda', ['cuda/fps.cpp', 'cuda/fps_kernel.cu']), ['cuda/grid.cpp', 'cuda/grid_kernel.cu']),
CUDAExtension('nearest_cuda', CUDAExtension('torch_cluster.fps_cuda',
['cuda/fps.cpp', 'cuda/fps_kernel.cu']),
CUDAExtension('torch_cluster.nearest_cuda',
['cuda/nearest.cpp', 'cuda/nearest_kernel.cu']), ['cuda/nearest.cpp', 'cuda/nearest_kernel.cu']),
CUDAExtension('knn_cuda', ['cuda/knn.cpp', 'cuda/knn_kernel.cu']), CUDAExtension('torch_cluster.knn_cuda',
CUDAExtension('radius_cuda', ['cuda/knn.cpp', 'cuda/knn_kernel.cu']),
CUDAExtension('torch_cluster.radius_cuda',
['cuda/radius.cpp', 'cuda/radius_kernel.cu']), ['cuda/radius.cpp', 'cuda/radius_kernel.cu']),
] ]
__version__ = '1.2.2' __version__ = '1.2.3'
url = 'https://github.com/rusty1s/pytorch_cluster' url = 'https://github.com/rusty1s/pytorch_cluster'
install_requires = ['scipy'] install_requires = ['scipy']
...@@ -32,8 +35,8 @@ tests_require = ['pytest', 'pytest-cov'] ...@@ -32,8 +35,8 @@ tests_require = ['pytest', 'pytest-cov']
setup( setup(
name='torch_cluster', name='torch_cluster',
version=__version__, version=__version__,
description='PyTorch Extension Library of Optimized Graph Cluster ' description=('PyTorch Extension Library of Optimized Graph Cluster '
'Algorithms', 'Algorithms'),
author='Matthias Fey', author='Matthias Fey',
author_email='matthias.fey@tu-dortmund.de', author_email='matthias.fey@tu-dortmund.de',
url=url, url=url,
...@@ -44,4 +47,5 @@ setup( ...@@ -44,4 +47,5 @@ setup(
tests_require=tests_require, tests_require=tests_require,
ext_modules=ext_modules, ext_modules=ext_modules,
cmdclass=cmdclass, cmdclass=cmdclass,
packages=find_packages(), ) packages=find_packages(),
)
...@@ -5,7 +5,7 @@ from .nearest import nearest ...@@ -5,7 +5,7 @@ from .nearest import nearest
from .knn import knn, knn_graph from .knn import knn, knn_graph
from .radius import radius, radius_graph from .radius import radius, radius_graph
__version__ = '1.2.2' __version__ = '1.2.3'
__all__ = [ __all__ = [
'graclus_cluster', 'graclus_cluster',
......
import torch import torch
import fps_cpu import torch_cluster.fps_cpu
if torch.cuda.is_available(): if torch.cuda.is_available():
import fps_cuda import torch_cluster.fps_cuda
def fps(x, batch=None, ratio=0.5, random_start=True): def fps(x, batch=None, ratio=0.5, random_start=True):
...@@ -45,6 +45,6 @@ def fps(x, batch=None, ratio=0.5, random_start=True): ...@@ -45,6 +45,6 @@ def fps(x, batch=None, ratio=0.5, random_start=True):
assert ratio > 0 and ratio < 1 assert ratio > 0 and ratio < 1
if x.is_cuda: if x.is_cuda:
return fps_cuda.fps(x, batch, ratio, random_start) return torch_cluster.fps_cuda.fps(x, batch, ratio, random_start)
else: else:
return fps_cpu.fps(x, batch, ratio, random_start) return torch_cluster.fps_cpu.fps(x, batch, ratio, random_start)
import torch import torch
import graclus_cpu import torch_cluster.graclus_cpu
if torch.cuda.is_available(): if torch.cuda.is_available():
import graclus_cuda import torch_cluster.graclus_cuda
def graclus_cluster(row, col, weight=None, num_nodes=None): def graclus_cluster(row, col, weight=None, num_nodes=None):
...@@ -28,7 +28,10 @@ def graclus_cluster(row, col, weight=None, num_nodes=None): ...@@ -28,7 +28,10 @@ def graclus_cluster(row, col, weight=None, num_nodes=None):
if num_nodes is None: if num_nodes is None:
num_nodes = max(row.max().item(), col.max().item()) + 1 num_nodes = max(row.max().item(), col.max().item()) + 1
op = graclus_cuda if row.is_cuda else graclus_cpu if row.is_cuda:
op = torch_cluster.graclus_cuda
else:
op = torch_cluster.graclus_cpu
if weight is None: if weight is None:
cluster = op.graclus(row, col, num_nodes) cluster = op.graclus(row, col, num_nodes)
......
import torch import torch
import grid_cpu import torch_cluster.grid_cpu
if torch.cuda.is_available(): if torch.cuda.is_available():
import grid_cuda import torch_cluster.grid_cuda
def grid_cluster(pos, size, start=None, end=None): def grid_cluster(pos, size, start=None, end=None):
...@@ -30,7 +30,11 @@ def grid_cluster(pos, size, start=None, end=None): ...@@ -30,7 +30,11 @@ def grid_cluster(pos, size, start=None, end=None):
start = pos.t().min(dim=1)[0] if start is None else start start = pos.t().min(dim=1)[0] if start is None else start
end = pos.t().max(dim=1)[0] if end is None else end end = pos.t().max(dim=1)[0] if end is None else end
op = grid_cuda if pos.is_cuda else grid_cpu if pos.is_cuda:
op = torch_cluster.grid_cuda
else:
op = torch_cluster.grid_cpu
cluster = op.grid(pos, size, start, end) cluster = op.grid(pos, size, start, end)
return cluster return cluster
...@@ -2,7 +2,7 @@ import torch ...@@ -2,7 +2,7 @@ import torch
import scipy.spatial import scipy.spatial
if torch.cuda.is_available(): if torch.cuda.is_available():
import knn_cuda import torch_cluster.knn_cuda
def knn(x, y, k, batch_x=None, batch_y=None): def knn(x, y, k, batch_x=None, batch_y=None):
...@@ -54,7 +54,7 @@ def knn(x, y, k, batch_x=None, batch_y=None): ...@@ -54,7 +54,7 @@ def knn(x, y, k, batch_x=None, batch_y=None):
assert y.size(0) == batch_y.size(0) assert y.size(0) == batch_y.size(0)
if x.is_cuda: if x.is_cuda:
return knn_cuda.knn(x, y, k, batch_x, batch_y) return torch_cluster.knn_cuda.knn(x, y, k, batch_x, batch_y)
# Rescale x and y. # Rescale x and y.
min_xy = min(x.min().item(), y.min().item()) min_xy = min(x.min().item(), y.min().item())
......
...@@ -2,7 +2,7 @@ import torch ...@@ -2,7 +2,7 @@ import torch
import scipy.cluster import scipy.cluster
if torch.cuda.is_available(): if torch.cuda.is_available():
import nearest_cuda import torch_cluster.nearest_cuda
def nearest(x, y, batch_x=None, batch_y=None): def nearest(x, y, batch_x=None, batch_y=None):
...@@ -51,7 +51,7 @@ def nearest(x, y, batch_x=None, batch_y=None): ...@@ -51,7 +51,7 @@ def nearest(x, y, batch_x=None, batch_y=None):
assert y.size(0) == batch_y.size(0) assert y.size(0) == batch_y.size(0)
if x.is_cuda: if x.is_cuda:
return nearest_cuda.nearest(x, y, batch_x, batch_y) return torch_cluster.nearest_cuda.nearest(x, y, batch_x, batch_y)
# Rescale x and y. # Rescale x and y.
min_xy = min(x.min().item(), y.min().item()) min_xy = min(x.min().item(), y.min().item())
......
...@@ -2,7 +2,7 @@ import torch ...@@ -2,7 +2,7 @@ import torch
import scipy.spatial import scipy.spatial
if torch.cuda.is_available(): if torch.cuda.is_available():
import radius_cuda import torch_cluster.radius_cuda
def radius(x, y, r, batch_x=None, batch_y=None, max_num_neighbors=32): def radius(x, y, r, batch_x=None, batch_y=None, max_num_neighbors=32):
...@@ -57,7 +57,8 @@ def radius(x, y, r, batch_x=None, batch_y=None, max_num_neighbors=32): ...@@ -57,7 +57,8 @@ def radius(x, y, r, batch_x=None, batch_y=None, max_num_neighbors=32):
assert y.size(0) == batch_y.size(0) assert y.size(0) == batch_y.size(0)
if x.is_cuda: if x.is_cuda:
return radius_cuda.radius(x, y, r, batch_x, batch_y, max_num_neighbors) return torch_cluster.radius_cuda.radius(x, y, r, batch_x, batch_y,
max_num_neighbors)
x = torch.cat([x, 2 * r * batch_x.view(-1, 1).to(x.dtype)], dim=-1) x = torch.cat([x, 2 * r * batch_x.view(-1, 1).to(x.dtype)], dim=-1)
y = torch.cat([y, 2 * r * batch_y.view(-1, 1).to(y.dtype)], dim=-1) y = torch.cat([y, 2 * r * batch_y.view(-1, 1).to(y.dtype)], dim=-1)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment