Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-sparse
Commits
b6a1f005
Commit
b6a1f005
authored
Jan 26, 2020
by
rusty1s
Browse files
fixes
parent
d49dcbbd
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
43 additions
and
5 deletions
+43
-5
cuda/convert_kernel.cu
cuda/convert_kernel.cu
+2
-2
cuda/spmm.cpp
cuda/spmm.cpp
+3
-2
cuda/spspmm_kernel.cu
cuda/spspmm_kernel.cu
+1
-1
test/test_convert.py
test/test_convert.py
+37
-0
No files found.
cuda/convert_kernel.cu
View file @
b6a1f005
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cusparse.h>
#include "compat.cuh"
#define THREADS
1024
#define THREADS
256
__global__
void
ind2ptr_kernel
(
const
int64_t
*
ind_data
,
int64_t
*
out_data
,
int64_t
M
,
int64_t
numel
)
{
...
...
@@ -37,7 +38,6 @@ __global__ void ptr2ind_kernel(const int64_t *ptr_data, int64_t *out_data,
int64_t
thread_idx
=
blockDim
.
x
*
blockIdx
.
x
+
threadIdx
.
x
;
// TODO: Make more efficient.
if
(
thread_idx
<
numel
)
{
int64_t
idx
=
ptr_data
[
thread_idx
],
next_idx
=
ptr_data
[
thread_idx
+
1
];
for
(
int64_t
i
=
idx
;
i
<
next_idx
;
i
++
)
{
...
...
cuda/spmm.cpp
View file @
b6a1f005
...
...
@@ -6,8 +6,9 @@ std::tuple<at::Tensor, at::optional<at::Tensor>>
spmm_cuda
(
at
::
Tensor
rowptr
,
at
::
Tensor
col
,
at
::
optional
<
at
::
Tensor
>
value_opt
,
at
::
Tensor
mat
,
std
::
string
reduce
);
at
::
Tensor
spmm_val_bw_cuda
(
at
::
Tensor
index
,
at
::
Tensor
rowptr
,
at
::
Tensor
mat
,
at
::
Tensor
grad
,
std
::
string
reduce
);
at
::
Tensor
spmm_val_bw_cuda
(
at
::
Tensor
row
,
at
::
Tensor
rowptr
,
at
::
Tensor
col
,
at
::
Tensor
mat
,
at
::
Tensor
grad
,
std
::
string
reduce
);
std
::
tuple
<
at
::
Tensor
,
at
::
optional
<
at
::
Tensor
>>
spmm
(
at
::
Tensor
rowptr
,
at
::
Tensor
col
,
at
::
optional
<
at
::
Tensor
>
value_opt
,
...
...
cuda/spspmm_kernel.cu
View file @
b6a1f005
...
...
@@ -122,7 +122,7 @@ spspmm_cuda(at::Tensor rowptrA, at::Tensor colA,
});
rowptrC
=
rowptrC
.
toType
(
at
::
kLong
);
colC
=
col
.
toType
(
at
::
kLong
);
colC
=
col
C
.
toType
(
at
::
kLong
);
return
std
::
make_tuple
(
rowptrC
,
colC
,
valueC
);
}
...
...
test/test_convert.py
View file @
b6a1f005
import
time
import
torch
from
torch_sparse
import
to_scipy
,
from_scipy
from
torch_sparse
import
to_torch_sparse
,
from_torch_sparse
from
torch_sparse.storage
import
SparseStorage
from
scipy.io
import
loadmat
def
test_convert_scipy
():
...
...
@@ -21,3 +24,37 @@ def test_convert_torch_sparse():
out
=
from_torch_sparse
(
to_torch_sparse
(
index
,
value
,
N
,
N
).
coalesce
())
assert
out
[
0
].
tolist
()
==
index
.
tolist
()
assert
out
[
1
].
tolist
()
==
value
.
tolist
()
def
test_ind2ptr
():
name
=
(
'DIMACS10'
,
'citationCiteseer'
)[
1
]
mat
=
loadmat
(
f
'benchmark/
{
name
}
.mat'
)[
'Problem'
][
0
][
0
][
2
]
mat
=
mat
.
tocsr
().
tocoo
()
mat
=
mat
.
tocsr
()
rowptr
=
torch
.
from_numpy
(
mat
.
indptr
).
to
(
torch
.
long
).
cuda
()
mat
=
mat
.
tocoo
()
row
=
torch
.
from_numpy
(
mat
.
row
).
to
(
torch
.
long
).
cuda
()
col
=
torch
.
from_numpy
(
mat
.
col
).
to
(
torch
.
long
).
cuda
()
storage
=
SparseStorage
(
row
=
row
,
col
=
col
)
torch
.
cuda
.
synchronize
()
t
=
time
.
perf_counter
()
for
_
in
range
(
100
):
storage
.
rowptr
storage
.
_rowptr
=
None
torch
.
cuda
.
synchronize
()
print
(
time
.
perf_counter
()
-
t
)
assert
storage
.
rowptr
.
tolist
()
==
rowptr
.
tolist
()
storage
=
SparseStorage
(
rowptr
=
rowptr
,
col
=
col
)
torch
.
cuda
.
synchronize
()
t
=
time
.
perf_counter
()
for
_
in
range
(
100
):
storage
.
row
storage
.
_row
=
None
torch
.
cuda
.
synchronize
()
print
(
time
.
perf_counter
()
-
t
)
assert
storage
.
row
.
tolist
()
==
row
.
tolist
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment