Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-cluster
Commits
9725bf76
Commit
9725bf76
authored
Dec 15, 2018
by
rusty1s
Browse files
radius cpu version
parent
4e6cb0cf
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
15 additions
and
10 deletions
+15
-10
test/test_radius.py
test/test_radius.py
+1
-4
torch_cluster/knn.py
torch_cluster/knn.py
+1
-2
torch_cluster/radius.py
torch_cluster/radius.py
+13
-4
No files found.
test/test_radius.py
View file @
9725bf76
...
@@ -4,12 +4,9 @@ import pytest
...
@@ -4,12 +4,9 @@ import pytest
import
torch
import
torch
from
torch_cluster
import
radius
from
torch_cluster
import
radius
from
.utils
import
tensor
,
grad_dtypes
from
.utils
import
grad_dtypes
,
devices
,
tensor
devices
=
[
torch
.
device
(
'cuda'
)]
@
pytest
.
mark
.
skipif
(
not
torch
.
cuda
.
is_available
(),
reason
=
'CUDA not available'
)
@
pytest
.
mark
.
parametrize
(
'dtype,device'
,
product
(
grad_dtypes
,
devices
))
@
pytest
.
mark
.
parametrize
(
'dtype,device'
,
product
(
grad_dtypes
,
devices
))
def
test_radius
(
dtype
,
device
):
def
test_radius
(
dtype
,
device
):
x
=
tensor
([
x
=
tensor
([
...
...
torch_cluster/knn.py
View file @
9725bf76
...
@@ -46,8 +46,7 @@ def knn(x, y, k, batch_x=None, batch_y=None):
...
@@ -46,8 +46,7 @@ def knn(x, y, k, batch_x=None, batch_y=None):
assert
y
.
size
(
0
)
==
batch_y
.
size
(
0
)
assert
y
.
size
(
0
)
==
batch_y
.
size
(
0
)
if
x
.
is_cuda
:
if
x
.
is_cuda
:
assign_index
=
knn_cuda
.
knn
(
x
,
y
,
k
,
batch_x
,
batch_y
)
return
knn_cuda
.
knn
(
x
,
y
,
k
,
batch_x
,
batch_y
)
return
assign_index
# Rescale x and y.
# Rescale x and y.
min_xy
=
min
(
x
.
min
().
item
(),
y
.
min
().
item
())
min_xy
=
min
(
x
.
min
().
item
(),
y
.
min
().
item
())
...
...
torch_cluster/radius.py
View file @
9725bf76
import
torch
import
torch
import
scipy.spatial
if
torch
.
cuda
.
is_available
():
if
torch
.
cuda
.
is_available
():
import
radius_cuda
import
radius_cuda
...
@@ -40,17 +41,25 @@ def radius(x, y, r, batch_x=None, batch_y=None, max_num_neighbors=32):
...
@@ -40,17 +41,25 @@ def radius(x, y, r, batch_x=None, batch_y=None, max_num_neighbors=32):
x
=
x
.
view
(
-
1
,
1
)
if
x
.
dim
()
==
1
else
x
x
=
x
.
view
(
-
1
,
1
)
if
x
.
dim
()
==
1
else
x
y
=
y
.
view
(
-
1
,
1
)
if
y
.
dim
()
==
1
else
y
y
=
y
.
view
(
-
1
,
1
)
if
y
.
dim
()
==
1
else
y
assert
x
.
is_cuda
assert
x
.
dim
()
==
2
and
batch_x
.
dim
()
==
1
assert
x
.
dim
()
==
2
and
batch_x
.
dim
()
==
1
assert
y
.
dim
()
==
2
and
batch_y
.
dim
()
==
1
assert
y
.
dim
()
==
2
and
batch_y
.
dim
()
==
1
assert
x
.
size
(
1
)
==
y
.
size
(
1
)
assert
x
.
size
(
1
)
==
y
.
size
(
1
)
assert
x
.
size
(
0
)
==
batch_x
.
size
(
0
)
assert
x
.
size
(
0
)
==
batch_x
.
size
(
0
)
assert
y
.
size
(
0
)
==
batch_y
.
size
(
0
)
assert
y
.
size
(
0
)
==
batch_y
.
size
(
0
)
op
=
radius_cuda
.
radius
if
x
.
is_cuda
else
None
if
x
.
is_cuda
:
assign_index
=
op
(
x
,
y
,
r
,
batch_x
,
batch_y
,
max_num_neighbors
)
return
radius_cuda
.
radius
(
x
,
y
,
r
,
batch_x
,
batch_y
,
max_num_neighbors
)
return
assign_index
x
=
torch
.
cat
([
x
,
2
*
r
*
batch_x
.
view
(
-
1
,
1
).
to
(
x
.
dtype
)],
dim
=-
1
)
y
=
torch
.
cat
([
y
,
2
*
r
*
batch_y
.
view
(
-
1
,
1
).
to
(
y
.
dtype
)],
dim
=-
1
)
tree
=
scipy
.
spatial
.
cKDTree
(
x
)
col
=
tree
.
query_ball_point
(
y
,
r
)
col
=
[
torch
.
tensor
(
c
)
for
c
in
col
]
row
=
[
torch
.
full_like
(
c
,
i
)
for
i
,
c
in
enumerate
(
col
)]
row
,
col
=
torch
.
cat
(
row
,
dim
=
0
),
torch
.
cat
(
col
,
dim
=
0
)
return
torch
.
stack
([
row
,
col
],
dim
=
0
)
def
radius_graph
(
x
,
r
,
batch
=
None
,
loop
=
False
,
max_num_neighbors
=
32
):
def
radius_graph
(
x
,
r
,
batch
=
None
,
loop
=
False
,
max_num_neighbors
=
32
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment