Commit 023450c0 authored by rusty1s's avatar rusty1s
Browse files

pytorch 1.1.0 update

parent 9e048aad
......@@ -16,8 +16,8 @@ before_install:
- export CC="gcc-4.9"
- export CXX="g++-4.9"
install:
- if [[ $TRAVIS_PYTHON_VERSION == 3.5 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.0.0-cp35-cp35m-linux_x86_64.whl; fi
- if [[ $TRAVIS_PYTHON_VERSION == 3.6 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.0.0-cp36-cp36m-linux_x86_64.whl; fi
- if [[ $TRAVIS_PYTHON_VERSION == 3.5 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.1.0-cp35-cp35m-linux_x86_64.whl; fi
- if [[ $TRAVIS_PYTHON_VERSION == 3.6 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.1.0-cp36-cp36m-linux_x86_64.whl; fi
- pip install pycodestyle
- pip install flake8
- pip install codecov
......
......@@ -26,11 +26,11 @@ All included operations work on varying data types and are implemented both for
## Installation
Ensure that at least PyTorch 1.0.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*:
Ensure that at least PyTorch 1.1.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*:
```
$ python -c "import torch; print(torch.__version__)"
>>> 1.0.0
>>> 1.1.0
$ echo $PATH
>>> /usr/local/cuda/bin:...
......
......@@ -49,7 +49,7 @@ at::Tensor weighted_graclus(at::Tensor row, at::Tensor col, at::Tensor weight,
auto cluster = at::full(num_nodes, -1, row.options());
auto cluster_data = cluster.data<int64_t>();
AT_DISPATCH_ALL_TYPES(weight.type(), "weighted_graclus", [&] {
AT_DISPATCH_ALL_TYPES(weight.scalar_type(), "weighted_graclus", [&] {
auto weight_data = weight.data<scalar_t>();
for (int64_t i = 0; i < num_nodes; i++) {
......
......@@ -189,7 +189,7 @@ at::Tensor fps_cuda(at::Tensor x, at::Tensor batch, float ratio, bool random) {
cudaMemcpyDeviceToHost);
auto out = at::empty(k_sum[0], k.options());
AT_DISPATCH_FLOATING_TYPES(x.type(), "fps_kernel", [&] {
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "fps_kernel", [&] {
FPS_KERNEL(x.size(1), x.data<scalar_t>(), cum_deg.data<int64_t>(),
cum_k.data<int64_t>(), start.data<int64_t>(),
dist.data<scalar_t>(), tmp_dist.data<scalar_t>(),
......
......@@ -29,7 +29,7 @@ at::Tensor grid_cuda(at::Tensor pos, at::Tensor size, at::Tensor start,
cudaSetDevice(pos.get_device());
auto cluster = at::empty(pos.size(0), pos.options().dtype(at::kLong));
AT_DISPATCH_ALL_TYPES(pos.type(), "grid_kernel", [&] {
AT_DISPATCH_ALL_TYPES(pos.scalar_type(), "grid_kernel", [&] {
grid_kernel<scalar_t><<<BLOCKS(cluster.numel()), THREADS>>>(
cluster.data<int64_t>(),
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(pos),
......
......@@ -67,7 +67,7 @@ at::Tensor knn_cuda(at::Tensor x, at::Tensor y, size_t k, at::Tensor batch_x,
auto row = at::empty(y.size(0) * k, batch_y.options());
auto col = at::full(y.size(0) * k, -1, batch_y.options());
AT_DISPATCH_FLOATING_TYPES(x.type(), "knn_kernel", [&] {
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "knn_kernel", [&] {
knn_kernel<scalar_t><<<batch_size, THREADS>>>(
x.data<scalar_t>(), y.data<scalar_t>(), batch_x.data<int64_t>(),
batch_y.data<int64_t>(), dist.data<scalar_t>(), row.data<int64_t>(),
......
......@@ -71,7 +71,7 @@ at::Tensor nearest_cuda(at::Tensor x, at::Tensor y, at::Tensor batch_x,
auto out = at::empty_like(batch_x);
AT_DISPATCH_FLOATING_TYPES(x.type(), "nearest_kernel", [&] {
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "nearest_kernel", [&] {
nearest_kernel<scalar_t><<<x.size(0), THREADS>>>(
x.data<scalar_t>(), y.data<scalar_t>(), batch_x.data<int64_t>(),
batch_y.data<int64_t>(), out.data<int64_t>(), x.size(1));
......
......@@ -77,7 +77,7 @@ __global__ void propose_kernel(int64_t *__restrict__ cluster, int64_t *proposal,
void propose(at::Tensor cluster, at::Tensor proposal, at::Tensor row,
at::Tensor col, at::Tensor weight) {
AT_DISPATCH_ALL_TYPES(weight.type(), "propose_kernel", [&] {
AT_DISPATCH_ALL_TYPES(weight.scalar_type(), "propose_kernel", [&] {
propose_kernel<scalar_t><<<BLOCKS(cluster.numel()), THREADS>>>(
cluster.data<int64_t>(), proposal.data<int64_t>(), row.data<int64_t>(),
col.data<int64_t>(), weight.data<scalar_t>(), cluster.numel());
......
......@@ -62,7 +62,7 @@ at::Tensor radius_cuda(at::Tensor x, at::Tensor y, float radius,
auto row = at::full(y.size(0) * max_num_neighbors, -1, batch_y.options());
auto col = at::full(y.size(0) * max_num_neighbors, -1, batch_y.options());
AT_DISPATCH_FLOATING_TYPES(x.type(), "radius_kernel", [&] {
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "radius_kernel", [&] {
radius_kernel<scalar_t><<<batch_size, THREADS>>>(
x.data<scalar_t>(), y.data<scalar_t>(), batch_x.data<int64_t>(),
batch_y.data<int64_t>(), row.data<int64_t>(), col.data<int64_t>(),
......
......@@ -82,7 +82,7 @@ __global__ void respond_kernel(int64_t *__restrict__ cluster, int64_t *proposal,
void respond(at::Tensor cluster, at::Tensor proposal, at::Tensor row,
at::Tensor col, at::Tensor weight) {
AT_DISPATCH_ALL_TYPES(weight.type(), "respond_kernel", [&] {
AT_DISPATCH_ALL_TYPES(weight.scalar_type(), "respond_kernel", [&] {
respond_kernel<scalar_t><<<BLOCKS(cluster.numel()), THREADS>>>(
cluster.data<int64_t>(), proposal.data<int64_t>(), row.data<int64_t>(),
col.data<int64_t>(), weight.data<scalar_t>(), cluster.numel());
......
......@@ -27,7 +27,7 @@ if CUDA_HOME is not None:
['cuda/rw.cpp', 'cuda/rw_kernel.cu']),
]
__version__ = '1.2.4'
__version__ = '1.3.0'
url = 'https://github.com/rusty1s/pytorch_cluster'
install_requires = ['scipy']
......
......@@ -3,6 +3,7 @@ from torch.testing import get_all_dtypes
dtypes = get_all_dtypes()
dtypes.remove(torch.half)
dtypes.remove(torch.bool)
grad_dtypes = [torch.float, torch.double]
......
......@@ -6,7 +6,7 @@ from .knn import knn, knn_graph
from .radius import radius, radius_graph
from .rw import random_walk
__version__ = '1.2.4'
__version__ = '1.3.0'
__all__ = [
'graclus_cluster',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment