Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-cluster
Commits
eb66a19d
Commit
eb66a19d
authored
Mar 13, 2020
by
rusty1s
Browse files
compile
parent
6bf96692
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
5 additions
and
5 deletions
+5
-5
csrc/cuda/grid_cuda.cu
csrc/cuda/grid_cuda.cu
+1
-1
csrc/cuda/radius_cuda.cu
csrc/cuda/radius_cuda.cu
+2
-2
csrc/cuda/rw_cuda.cu
csrc/cuda/rw_cuda.cu
+1
-1
torch_cluster/__init__.py
torch_cluster/__init__.py
+1
-1
No files found.
csrc/cuda/grid_cuda.cu
View file @
eb66a19d
...
@@ -11,7 +11,7 @@ template <typename scalar_t>
...
@@ -11,7 +11,7 @@ template <typename scalar_t>
__global__
void
grid_kernel
(
const
scalar_t
*
pos
,
const
scalar_t
*
size
,
__global__
void
grid_kernel
(
const
scalar_t
*
pos
,
const
scalar_t
*
size
,
const
scalar_t
*
start
,
const
scalar_t
*
end
,
const
scalar_t
*
start
,
const
scalar_t
*
end
,
int64_t
*
out
,
int64_t
D
,
int64_t
numel
)
{
int64_t
*
out
,
int64_t
D
,
int64_t
numel
)
{
const
size
_t
thread_idx
=
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
;
const
int64
_t
thread_idx
=
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
;
if
(
thread_idx
<
numel
)
{
if
(
thread_idx
<
numel
)
{
int64_t
c
=
0
,
k
=
1
;
int64_t
c
=
0
,
k
=
1
;
...
...
csrc/cuda/radius_cuda.cu
View file @
eb66a19d
...
@@ -64,8 +64,8 @@ torch::Tensor radius_cuda(torch::Tensor x, torch::Tensor y, torch::Tensor ptr_x,
...
@@ -64,8 +64,8 @@ torch::Tensor radius_cuda(torch::Tensor x, torch::Tensor y, torch::Tensor ptr_x,
radius_kernel
<
scalar_t
><<<
ptr_x
.
size
(
0
)
-
1
,
THREADS
,
0
,
stream
>>>
(
radius_kernel
<
scalar_t
><<<
ptr_x
.
size
(
0
)
-
1
,
THREADS
,
0
,
stream
>>>
(
x
.
data_ptr
<
scalar_t
>
(),
y
.
data_ptr
<
scalar_t
>
(),
x
.
data_ptr
<
scalar_t
>
(),
y
.
data_ptr
<
scalar_t
>
(),
ptr_x
.
data_ptr
<
int64_t
>
(),
ptr_y
.
data_ptr
<
int64_t
>
(),
ptr_x
.
data_ptr
<
int64_t
>
(),
ptr_y
.
data_ptr
<
int64_t
>
(),
row
.
data_ptr
<
int64_t
>
(),
col
.
data_ptr
<
int64_t
>
(),
r
adiu
s
,
row
.
data_ptr
<
int64_t
>
(),
col
.
data_ptr
<
int64_t
>
(),
r
,
max_num_neighbor
s
,
max_num_neighbors
,
x
.
size
(
1
));
x
.
size
(
1
));
});
});
auto
mask
=
row
!=
-
1
;
auto
mask
=
row
!=
-
1
;
...
...
csrc/cuda/rw_cuda.cu
View file @
eb66a19d
...
@@ -23,7 +23,7 @@ __global__ void uniform_random_walk_kernel(const int64_t *rowptr,
...
@@ -23,7 +23,7 @@ __global__ void uniform_random_walk_kernel(const int64_t *rowptr,
cur
=
out
[
i
];
cur
=
out
[
i
];
row_start
=
rowptr
[
cur
],
row_end
=
rowptr
[
cur
+
1
];
row_start
=
rowptr
[
cur
],
row_end
=
rowptr
[
cur
+
1
];
out
[
l
*
numel
+
n
]
=
out
[
l
*
numel
+
thread_idx
]
=
col
[
row_start
+
int64_t
(
rand
[
i
]
*
(
row_end
-
row_start
))];
col
[
row_start
+
int64_t
(
rand
[
i
]
*
(
row_end
-
row_start
))];
}
}
}
}
...
...
torch_cluster/__init__.py
View file @
eb66a19d
...
@@ -23,7 +23,7 @@ except OSError as e:
...
@@ -23,7 +23,7 @@ except OSError as e:
raise
OSError
(
e
)
raise
OSError
(
e
)
if
torch
.
version
.
cuda
is
not
None
:
# pragma: no cover
if
torch
.
version
.
cuda
is
not
None
:
# pragma: no cover
cuda_version
=
torch
.
ops
.
torch_
sparse
.
cuda_version
()
cuda_version
=
torch
.
ops
.
torch_
cluster
.
cuda_version
()
if
cuda_version
==
-
1
:
if
cuda_version
==
-
1
:
major
=
minor
=
0
major
=
minor
=
0
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment