Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-cluster
Commits
023450c0
"tests/vscode:/vscode.git/clone" did not exist on "799c1cd21beff84e50ac4ab7a480e715780da2de"
Commit
023450c0
authored
May 01, 2019
by
rusty1s
Browse files
pytorch 1.1.0 update
parent
9e048aad
Changes
13
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
15 additions
and
14 deletions
+15
-14
.travis.yml
.travis.yml
+2
-2
README.md
README.md
+2
-2
cpu/graclus.cpp
cpu/graclus.cpp
+1
-1
cuda/fps_kernel.cu
cuda/fps_kernel.cu
+1
-1
cuda/grid_kernel.cu
cuda/grid_kernel.cu
+1
-1
cuda/knn_kernel.cu
cuda/knn_kernel.cu
+1
-1
cuda/nearest_kernel.cu
cuda/nearest_kernel.cu
+1
-1
cuda/proposal.cuh
cuda/proposal.cuh
+1
-1
cuda/radius_kernel.cu
cuda/radius_kernel.cu
+1
-1
cuda/response.cuh
cuda/response.cuh
+1
-1
setup.py
setup.py
+1
-1
test/utils.py
test/utils.py
+1
-0
torch_cluster/__init__.py
torch_cluster/__init__.py
+1
-1
No files found.
.travis.yml
View file @
023450c0
...
@@ -16,8 +16,8 @@ before_install:
...
@@ -16,8 +16,8 @@ before_install:
-
export CC="gcc-4.9"
-
export CC="gcc-4.9"
-
export CXX="g++-4.9"
-
export CXX="g++-4.9"
install
:
install
:
-
if [[ $TRAVIS_PYTHON_VERSION == 3.5 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.
0
.0-cp35-cp35m-linux_x86_64.whl; fi
-
if [[ $TRAVIS_PYTHON_VERSION == 3.5 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.
1
.0-cp35-cp35m-linux_x86_64.whl; fi
-
if [[ $TRAVIS_PYTHON_VERSION == 3.6 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.
0
.0-cp36-cp36m-linux_x86_64.whl; fi
-
if [[ $TRAVIS_PYTHON_VERSION == 3.6 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.
1
.0-cp36-cp36m-linux_x86_64.whl; fi
-
pip install pycodestyle
-
pip install pycodestyle
-
pip install flake8
-
pip install flake8
-
pip install codecov
-
pip install codecov
...
...
README.md
View file @
023450c0
...
@@ -26,11 +26,11 @@ All included operations work on varying data types and are implemented both for
...
@@ -26,11 +26,11 @@ All included operations work on varying data types and are implemented both for
## Installation
## Installation
Ensure that at least PyTorch 1.
0
.0 is installed and verify that
`cuda/bin`
and
`cuda/include`
are in your
`$PATH`
and
`$CPATH`
respectively,
*e.g.*
:
Ensure that at least PyTorch 1.
1
.0 is installed and verify that
`cuda/bin`
and
`cuda/include`
are in your
`$PATH`
and
`$CPATH`
respectively,
*e.g.*
:
```
```
$ python -c "import torch; print(torch.__version__)"
$ python -c "import torch; print(torch.__version__)"
>>> 1.
0
.0
>>> 1.
1
.0
$ echo $PATH
$ echo $PATH
>>> /usr/local/cuda/bin:...
>>> /usr/local/cuda/bin:...
...
...
cpu/graclus.cpp
View file @
023450c0
...
@@ -49,7 +49,7 @@ at::Tensor weighted_graclus(at::Tensor row, at::Tensor col, at::Tensor weight,
...
@@ -49,7 +49,7 @@ at::Tensor weighted_graclus(at::Tensor row, at::Tensor col, at::Tensor weight,
auto
cluster
=
at
::
full
(
num_nodes
,
-
1
,
row
.
options
());
auto
cluster
=
at
::
full
(
num_nodes
,
-
1
,
row
.
options
());
auto
cluster_data
=
cluster
.
data
<
int64_t
>
();
auto
cluster_data
=
cluster
.
data
<
int64_t
>
();
AT_DISPATCH_ALL_TYPES
(
weight
.
type
(),
"weighted_graclus"
,
[
&
]
{
AT_DISPATCH_ALL_TYPES
(
weight
.
scalar_
type
(),
"weighted_graclus"
,
[
&
]
{
auto
weight_data
=
weight
.
data
<
scalar_t
>
();
auto
weight_data
=
weight
.
data
<
scalar_t
>
();
for
(
int64_t
i
=
0
;
i
<
num_nodes
;
i
++
)
{
for
(
int64_t
i
=
0
;
i
<
num_nodes
;
i
++
)
{
...
...
cuda/fps_kernel.cu
View file @
023450c0
...
@@ -189,7 +189,7 @@ at::Tensor fps_cuda(at::Tensor x, at::Tensor batch, float ratio, bool random) {
...
@@ -189,7 +189,7 @@ at::Tensor fps_cuda(at::Tensor x, at::Tensor batch, float ratio, bool random) {
cudaMemcpyDeviceToHost
);
cudaMemcpyDeviceToHost
);
auto
out
=
at
::
empty
(
k_sum
[
0
],
k
.
options
());
auto
out
=
at
::
empty
(
k_sum
[
0
],
k
.
options
());
AT_DISPATCH_FLOATING_TYPES
(
x
.
type
(),
"fps_kernel"
,
[
&
]
{
AT_DISPATCH_FLOATING_TYPES
(
x
.
scalar_
type
(),
"fps_kernel"
,
[
&
]
{
FPS_KERNEL
(
x
.
size
(
1
),
x
.
data
<
scalar_t
>
(),
cum_deg
.
data
<
int64_t
>
(),
FPS_KERNEL
(
x
.
size
(
1
),
x
.
data
<
scalar_t
>
(),
cum_deg
.
data
<
int64_t
>
(),
cum_k
.
data
<
int64_t
>
(),
start
.
data
<
int64_t
>
(),
cum_k
.
data
<
int64_t
>
(),
start
.
data
<
int64_t
>
(),
dist
.
data
<
scalar_t
>
(),
tmp_dist
.
data
<
scalar_t
>
(),
dist
.
data
<
scalar_t
>
(),
tmp_dist
.
data
<
scalar_t
>
(),
...
...
cuda/grid_kernel.cu
View file @
023450c0
...
@@ -29,7 +29,7 @@ at::Tensor grid_cuda(at::Tensor pos, at::Tensor size, at::Tensor start,
...
@@ -29,7 +29,7 @@ at::Tensor grid_cuda(at::Tensor pos, at::Tensor size, at::Tensor start,
cudaSetDevice
(
pos
.
get_device
());
cudaSetDevice
(
pos
.
get_device
());
auto
cluster
=
at
::
empty
(
pos
.
size
(
0
),
pos
.
options
().
dtype
(
at
::
kLong
));
auto
cluster
=
at
::
empty
(
pos
.
size
(
0
),
pos
.
options
().
dtype
(
at
::
kLong
));
AT_DISPATCH_ALL_TYPES
(
pos
.
type
(),
"grid_kernel"
,
[
&
]
{
AT_DISPATCH_ALL_TYPES
(
pos
.
scalar_
type
(),
"grid_kernel"
,
[
&
]
{
grid_kernel
<
scalar_t
><<<
BLOCKS
(
cluster
.
numel
()),
THREADS
>>>
(
grid_kernel
<
scalar_t
><<<
BLOCKS
(
cluster
.
numel
()),
THREADS
>>>
(
cluster
.
data
<
int64_t
>
(),
cluster
.
data
<
int64_t
>
(),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
pos
),
at
::
cuda
::
detail
::
getTensorInfo
<
scalar_t
,
int64_t
>
(
pos
),
...
...
cuda/knn_kernel.cu
View file @
023450c0
...
@@ -67,7 +67,7 @@ at::Tensor knn_cuda(at::Tensor x, at::Tensor y, size_t k, at::Tensor batch_x,
...
@@ -67,7 +67,7 @@ at::Tensor knn_cuda(at::Tensor x, at::Tensor y, size_t k, at::Tensor batch_x,
auto
row
=
at
::
empty
(
y
.
size
(
0
)
*
k
,
batch_y
.
options
());
auto
row
=
at
::
empty
(
y
.
size
(
0
)
*
k
,
batch_y
.
options
());
auto
col
=
at
::
full
(
y
.
size
(
0
)
*
k
,
-
1
,
batch_y
.
options
());
auto
col
=
at
::
full
(
y
.
size
(
0
)
*
k
,
-
1
,
batch_y
.
options
());
AT_DISPATCH_FLOATING_TYPES
(
x
.
type
(),
"knn_kernel"
,
[
&
]
{
AT_DISPATCH_FLOATING_TYPES
(
x
.
scalar_
type
(),
"knn_kernel"
,
[
&
]
{
knn_kernel
<
scalar_t
><<<
batch_size
,
THREADS
>>>
(
knn_kernel
<
scalar_t
><<<
batch_size
,
THREADS
>>>
(
x
.
data
<
scalar_t
>
(),
y
.
data
<
scalar_t
>
(),
batch_x
.
data
<
int64_t
>
(),
x
.
data
<
scalar_t
>
(),
y
.
data
<
scalar_t
>
(),
batch_x
.
data
<
int64_t
>
(),
batch_y
.
data
<
int64_t
>
(),
dist
.
data
<
scalar_t
>
(),
row
.
data
<
int64_t
>
(),
batch_y
.
data
<
int64_t
>
(),
dist
.
data
<
scalar_t
>
(),
row
.
data
<
int64_t
>
(),
...
...
cuda/nearest_kernel.cu
View file @
023450c0
...
@@ -71,7 +71,7 @@ at::Tensor nearest_cuda(at::Tensor x, at::Tensor y, at::Tensor batch_x,
...
@@ -71,7 +71,7 @@ at::Tensor nearest_cuda(at::Tensor x, at::Tensor y, at::Tensor batch_x,
auto
out
=
at
::
empty_like
(
batch_x
);
auto
out
=
at
::
empty_like
(
batch_x
);
AT_DISPATCH_FLOATING_TYPES
(
x
.
type
(),
"nearest_kernel"
,
[
&
]
{
AT_DISPATCH_FLOATING_TYPES
(
x
.
scalar_
type
(),
"nearest_kernel"
,
[
&
]
{
nearest_kernel
<
scalar_t
><<<
x
.
size
(
0
),
THREADS
>>>
(
nearest_kernel
<
scalar_t
><<<
x
.
size
(
0
),
THREADS
>>>
(
x
.
data
<
scalar_t
>
(),
y
.
data
<
scalar_t
>
(),
batch_x
.
data
<
int64_t
>
(),
x
.
data
<
scalar_t
>
(),
y
.
data
<
scalar_t
>
(),
batch_x
.
data
<
int64_t
>
(),
batch_y
.
data
<
int64_t
>
(),
out
.
data
<
int64_t
>
(),
x
.
size
(
1
));
batch_y
.
data
<
int64_t
>
(),
out
.
data
<
int64_t
>
(),
x
.
size
(
1
));
...
...
cuda/proposal.cuh
View file @
023450c0
...
@@ -77,7 +77,7 @@ __global__ void propose_kernel(int64_t *__restrict__ cluster, int64_t *proposal,
...
@@ -77,7 +77,7 @@ __global__ void propose_kernel(int64_t *__restrict__ cluster, int64_t *proposal,
void
propose
(
at
::
Tensor
cluster
,
at
::
Tensor
proposal
,
at
::
Tensor
row
,
void
propose
(
at
::
Tensor
cluster
,
at
::
Tensor
proposal
,
at
::
Tensor
row
,
at
::
Tensor
col
,
at
::
Tensor
weight
)
{
at
::
Tensor
col
,
at
::
Tensor
weight
)
{
AT_DISPATCH_ALL_TYPES
(
weight
.
type
(),
"propose_kernel"
,
[
&
]
{
AT_DISPATCH_ALL_TYPES
(
weight
.
scalar_
type
(),
"propose_kernel"
,
[
&
]
{
propose_kernel
<
scalar_t
><<<
BLOCKS
(
cluster
.
numel
()),
THREADS
>>>
(
propose_kernel
<
scalar_t
><<<
BLOCKS
(
cluster
.
numel
()),
THREADS
>>>
(
cluster
.
data
<
int64_t
>
(),
proposal
.
data
<
int64_t
>
(),
row
.
data
<
int64_t
>
(),
cluster
.
data
<
int64_t
>
(),
proposal
.
data
<
int64_t
>
(),
row
.
data
<
int64_t
>
(),
col
.
data
<
int64_t
>
(),
weight
.
data
<
scalar_t
>
(),
cluster
.
numel
());
col
.
data
<
int64_t
>
(),
weight
.
data
<
scalar_t
>
(),
cluster
.
numel
());
...
...
cuda/radius_kernel.cu
View file @
023450c0
...
@@ -62,7 +62,7 @@ at::Tensor radius_cuda(at::Tensor x, at::Tensor y, float radius,
...
@@ -62,7 +62,7 @@ at::Tensor radius_cuda(at::Tensor x, at::Tensor y, float radius,
auto
row
=
at
::
full
(
y
.
size
(
0
)
*
max_num_neighbors
,
-
1
,
batch_y
.
options
());
auto
row
=
at
::
full
(
y
.
size
(
0
)
*
max_num_neighbors
,
-
1
,
batch_y
.
options
());
auto
col
=
at
::
full
(
y
.
size
(
0
)
*
max_num_neighbors
,
-
1
,
batch_y
.
options
());
auto
col
=
at
::
full
(
y
.
size
(
0
)
*
max_num_neighbors
,
-
1
,
batch_y
.
options
());
AT_DISPATCH_FLOATING_TYPES
(
x
.
type
(),
"radius_kernel"
,
[
&
]
{
AT_DISPATCH_FLOATING_TYPES
(
x
.
scalar_
type
(),
"radius_kernel"
,
[
&
]
{
radius_kernel
<
scalar_t
><<<
batch_size
,
THREADS
>>>
(
radius_kernel
<
scalar_t
><<<
batch_size
,
THREADS
>>>
(
x
.
data
<
scalar_t
>
(),
y
.
data
<
scalar_t
>
(),
batch_x
.
data
<
int64_t
>
(),
x
.
data
<
scalar_t
>
(),
y
.
data
<
scalar_t
>
(),
batch_x
.
data
<
int64_t
>
(),
batch_y
.
data
<
int64_t
>
(),
row
.
data
<
int64_t
>
(),
col
.
data
<
int64_t
>
(),
batch_y
.
data
<
int64_t
>
(),
row
.
data
<
int64_t
>
(),
col
.
data
<
int64_t
>
(),
...
...
cuda/response.cuh
View file @
023450c0
...
@@ -82,7 +82,7 @@ __global__ void respond_kernel(int64_t *__restrict__ cluster, int64_t *proposal,
...
@@ -82,7 +82,7 @@ __global__ void respond_kernel(int64_t *__restrict__ cluster, int64_t *proposal,
void
respond
(
at
::
Tensor
cluster
,
at
::
Tensor
proposal
,
at
::
Tensor
row
,
void
respond
(
at
::
Tensor
cluster
,
at
::
Tensor
proposal
,
at
::
Tensor
row
,
at
::
Tensor
col
,
at
::
Tensor
weight
)
{
at
::
Tensor
col
,
at
::
Tensor
weight
)
{
AT_DISPATCH_ALL_TYPES
(
weight
.
type
(),
"respond_kernel"
,
[
&
]
{
AT_DISPATCH_ALL_TYPES
(
weight
.
scalar_
type
(),
"respond_kernel"
,
[
&
]
{
respond_kernel
<
scalar_t
><<<
BLOCKS
(
cluster
.
numel
()),
THREADS
>>>
(
respond_kernel
<
scalar_t
><<<
BLOCKS
(
cluster
.
numel
()),
THREADS
>>>
(
cluster
.
data
<
int64_t
>
(),
proposal
.
data
<
int64_t
>
(),
row
.
data
<
int64_t
>
(),
cluster
.
data
<
int64_t
>
(),
proposal
.
data
<
int64_t
>
(),
row
.
data
<
int64_t
>
(),
col
.
data
<
int64_t
>
(),
weight
.
data
<
scalar_t
>
(),
cluster
.
numel
());
col
.
data
<
int64_t
>
(),
weight
.
data
<
scalar_t
>
(),
cluster
.
numel
());
...
...
setup.py
View file @
023450c0
...
@@ -27,7 +27,7 @@ if CUDA_HOME is not None:
...
@@ -27,7 +27,7 @@ if CUDA_HOME is not None:
[
'cuda/rw.cpp'
,
'cuda/rw_kernel.cu'
]),
[
'cuda/rw.cpp'
,
'cuda/rw_kernel.cu'
]),
]
]
__version__
=
'1.
2.4
'
__version__
=
'1.
3.0
'
url
=
'https://github.com/rusty1s/pytorch_cluster'
url
=
'https://github.com/rusty1s/pytorch_cluster'
install_requires
=
[
'scipy'
]
install_requires
=
[
'scipy'
]
...
...
test/utils.py
View file @
023450c0
...
@@ -3,6 +3,7 @@ from torch.testing import get_all_dtypes
...
@@ -3,6 +3,7 @@ from torch.testing import get_all_dtypes
dtypes
=
get_all_dtypes
()
dtypes
=
get_all_dtypes
()
dtypes
.
remove
(
torch
.
half
)
dtypes
.
remove
(
torch
.
half
)
dtypes
.
remove
(
torch
.
bool
)
grad_dtypes
=
[
torch
.
float
,
torch
.
double
]
grad_dtypes
=
[
torch
.
float
,
torch
.
double
]
...
...
torch_cluster/__init__.py
View file @
023450c0
...
@@ -6,7 +6,7 @@ from .knn import knn, knn_graph
...
@@ -6,7 +6,7 @@ from .knn import knn, knn_graph
from
.radius
import
radius
,
radius_graph
from
.radius
import
radius
,
radius_graph
from
.rw
import
random_walk
from
.rw
import
random_walk
__version__
=
'1.
2.4
'
__version__
=
'1.
3.0
'
__all__
=
[
__all__
=
[
'graclus_cluster'
,
'graclus_cluster'
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment