Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-cluster
Commits
72b1ce14
Commit
72b1ce14
authored
Jan 31, 2018
by
rusty1s
Browse files
bugfix, added tests
parent
b1589f14
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
35 additions
and
2 deletions
+35
-2
test/test_grid.py
test/test_grid.py
+32
-0
torch_cluster/functions/grid.py
torch_cluster/functions/grid.py
+1
-1
torch_cluster/functions/utils.py
torch_cluster/functions/utils.py
+1
-0
torch_cluster/src/generic/cuda.c
torch_cluster/src/generic/cuda.c
+1
-1
No files found.
test/test_grid.py
View file @
72b1ce14
import
pytest
import
torch
from
torch_cluster
import
grid_cluster
from
.utils
import
tensors
,
Tensor
@
pytest
.
mark
.
parametrize
(
'tensor'
,
tensors
)
def
test_grid_cluster_cpu
(
tensor
):
position
=
Tensor
(
tensor
,
[[
0
,
0
],
[
9
,
9
],
[
2
,
8
],
[
2
,
2
],
[
8
,
3
]])
size
=
torch
.
LongTensor
([
5
,
5
])
expected
=
torch
.
LongTensor
([
0
,
3
,
1
,
0
,
2
])
output
=
grid_cluster
(
position
,
size
)
assert
output
.
tolist
()
==
expected
.
tolist
()
output
=
grid_cluster
(
position
.
expand
(
2
,
5
,
2
),
size
)
assert
output
.
tolist
()
==
expected
.
expand
(
2
,
5
).
tolist
()
expected
=
torch
.
LongTensor
([
0
,
1
,
3
,
2
,
4
])
batch
=
torch
.
LongTensor
([
0
,
0
,
1
,
1
,
1
])
output
=
grid_cluster
(
position
,
size
,
batch
)
assert
output
.
tolist
()
==
expected
.
tolist
()
output
=
grid_cluster
(
position
.
expand
(
2
,
5
,
2
),
size
,
batch
.
expand
(
2
,
5
))
assert
output
.
tolist
()
==
expected
.
expand
(
2
,
5
).
tolist
()
@
pytest
.
mark
.
skipif
(
not
torch
.
cuda
.
is_available
(),
reason
=
'no CUDA'
)
@
pytest
.
mark
.
parametrize
(
'tensor'
,
tensors
)
def
test_grid_cluster_gpu
(
tensor
):
# pragma: no cover
pass
torch_cluster/functions/grid.py
View file @
72b1ce14
...
@@ -29,7 +29,7 @@ def grid_cluster(position, size, batch=None):
...
@@ -29,7 +29,7 @@ def grid_cluster(position, size, batch=None):
max
=
position
.
max
(
dim
=
0
)[
0
]
max
=
position
.
max
(
dim
=
0
)[
0
]
while
max
.
dim
()
>
1
:
while
max
.
dim
()
>
1
:
max
=
max
.
max
(
dim
=
0
)[
0
]
max
=
max
.
max
(
dim
=
0
)[
0
]
c_max
=
torch
.
ceil
(
max
/
size
.
type_as
(
max
)
).
long
()
c_max
=
torch
.
floor
(
max
.
double
()
/
size
.
double
()
+
1
).
long
()
c_max
=
torch
.
clamp
(
c_max
,
min
=
1
)
c_max
=
torch
.
clamp
(
c_max
,
min
=
1
)
C
=
c_max
.
prod
()
C
=
c_max
.
prod
()
...
...
torch_cluster/functions/utils.py
View file @
72b1ce14
...
@@ -2,6 +2,7 @@ import torch
...
@@ -2,6 +2,7 @@ import torch
from
torch_unique
import
unique
from
torch_unique
import
unique
from
.._ext
import
ffi
from
.._ext
import
ffi
print
(
ffi
.
__dict__
)
def
get_func
(
name
,
tensor
):
def
get_func
(
name
,
tensor
):
...
...
torch_cluster/src/generic/cuda.c
View file @
72b1ce14
...
@@ -3,7 +3,7 @@
...
@@ -3,7 +3,7 @@
#else
#else
void
cluster_
(
grid
)(
int
C
,
THCudaLongTensor
*
output
,
THCTensor
*
position
,
THCTensor
*
size
,
THCudaLongTensor
*
count
)
{
void
cluster_
(
grid
)(
int
C
,
THCudaLongTensor
*
output
,
THCTensor
*
position
,
THCTensor
*
size
,
THCudaLongTensor
*
count
)
{
return
cluster_kernel_
(
grid
)(
state
,
C
,
output
,
position
,
size
,
count
);
/*
return cluster_kernel_(grid)(state, C, output, position, size, count);
*/
}
}
#endif
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment