Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-sparse
Commits
41458598
Commit
41458598
authored
Jul 30, 2018
by
rusty1s
Browse files
to csr
parent
37a8124e
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
115 additions
and
19 deletions
+115
-19
cuda/matmul.cpp
cuda/matmul.cpp
+6
-2
cuda/matmul_cuda.cu
cuda/matmul_cuda.cu
+60
-0
setup.py
setup.py
+4
-1
test/__init__.py
test/__init__.py
+0
-0
test/test_matmul.py
test/test_matmul.py
+22
-8
test/utils.py
test/utils.py
+11
-0
torch_sparse/__init__.py
torch_sparse/__init__.py
+2
-0
torch_sparse/matmul.py
torch_sparse/matmul.py
+3
-8
torch_sparse/sparse.py
torch_sparse/sparse.py
+7
-0
No files found.
cuda/matmul.cpp
View file @
41458598
...
@@ -2,8 +2,12 @@
...
@@ -2,8 +2,12 @@
#define CHECK_CUDA(x) AT_ASSERT(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CUDA(x) AT_ASSERT(x.type().is_cuda(), #x " must be a CUDA tensor")
at
::
SparseTensor
spspmm
(
at
::
SparseTensor
matrix1
,
at
::
SparseTensor
matrix2
)
{
at
::
Tensor
spspmm_cuda
(
at
::
Tensor
matrix1
,
at
::
Tensor
matrix2
);
return
matrix1
;
at
::
Tensor
spspmm
(
at
::
Tensor
matrix1
,
at
::
Tensor
matrix2
)
{
CHECK_CUDA
(
matrix1
);
CHECK_CUDA
(
matrix2
);
return
spspmm_cuda
(
matrix1
,
matrix2
);
}
}
PYBIND11_MODULE
(
TORCH_EXTENSION_NAME
,
m
)
{
PYBIND11_MODULE
(
TORCH_EXTENSION_NAME
,
m
)
{
...
...
cuda/matmul_cuda.cu
0 → 100644
View file @
41458598
#include <ATen/ATen.h>
#include <cusparse.h>
static
cusparseHandle_t
cusparse_handle
=
0
;
static
void
init_cusparse
()
{
if
(
cusparse_handle
==
0
)
{
cusparseStatus_t
status
=
cusparseCreate
(
&
cusparse_handle
);
}
}
at
::
Tensor
spspmm_cuda
(
at
::
Tensor
matrix1
,
at
::
Tensor
matrix2
)
{
init_cusparse
();
auto
nnz
=
matrix1
.
_nnz
();
auto
inDim
=
matrix1
.
size
(
1
);
auto
row
=
matrix1
.
_indices
()[
0
].
toType
(
at
::
kInt
);
auto
row_ptrs
=
at
::
empty
(
row
.
type
(),
{
inDim
+
1
});
cusparseXcoo2csr
(
cusparse_handle
,
row
.
data
<
int
>
(),
nnz
,
inDim
,
row_ptrs
.
data
<
int
>
(),
CUSPARSE_INDEX_BASE_ZERO
);
printf
(
"%lli
\n
"
,
nnz
);
printf
(
"%lli
\n
"
,
inDim
);
/* colbuf at::empty(nnz); */
/* auto colPtrs = at::empty(inDim + 1, at::kInt); */
/* auto row = matrix1._indices(); */
/* for (int i = 0; i < 5; i++) { */
/* row_buf.data<int>()[i] = (int)row.data<int64_t>()[i]; */
/* } */
/* printf("%lli\n", row.numel()); */
return
matrix1
;
}
/* #include <ATen/SparseTensorImpl.h> */
/* namespace at { */
/* namespace native { */
/* using SparseTensor = Tensor; */
/* namespace { */
/* at::SparseTensor spspmm_cuda(at::SparseTensor matrix1, */
/* at::SparseTensor matrix2) { */
/* return matrix1; */
/* } */
/* } // namespace */
/* } // namespace native */
/* } // namespace at */
// defined in aten/src/THCUNN/SparseLinear.cu as
/* cusparseXcoo2csr(cusparse_handle, THCudaIntTensor_data(state, colbuf), nnz,
*/
/* inDim, THCudaIntTensor_data(state, colPtrs), */
/* CUSPARSE_INDEX_BASE_ONE); */
setup.py
View file @
41458598
...
@@ -12,7 +12,10 @@ ext_modules = []
...
@@ -12,7 +12,10 @@ ext_modules = []
cmdclass
=
{}
cmdclass
=
{}
if
torch
.
cuda
.
is_available
():
if
torch
.
cuda
.
is_available
():
ext_modules
+=
[
CUDAExtension
(
'matmul_cuda'
,
[
'cuda/matmul.cpp'
])]
ext_modules
+=
[
CUDAExtension
(
'matmul_cuda'
,
[
'cuda/matmul.cpp'
,
'cuda/matmul_cuda.cu'
])
]
cmdclass
[
'build_ext'
]
=
BuildExtension
cmdclass
[
'build_ext'
]
=
BuildExtension
setup
(
setup
(
...
...
test/__init__.py
0 → 100644
View file @
41458598
test/test_matmul.py
View file @
41458598
from
itertools
import
product
import
pytest
import
torch
import
torch
from
torch_sparse
import
spspmm
from
torch_sparse
import
spspmm
from
.utils
import
dtypes
,
devices
,
tensor
devices
=
[
torch
.
device
(
'cuda'
)]
dtypes
=
[
torch
.
float
]
def
test_spspmm
():
@
pytest
.
mark
.
parametrize
(
'dtype,device'
,
product
(
dtypes
,
devices
))
e1
=
torch
.
tensor
([[
0
,
0
,
1
,
2
,
2
],
[
1
,
2
,
0
,
0
,
1
]])
def
test_spspmm
(
dtype
,
device
):
v1
=
torch
.
tensor
([
1
,
2
,
3
,
4
,
5
],
dtype
=
torch
.
float
,
requires_grad
=
True
)
e1
=
torch
.
tensor
([[
0
,
0
,
1
,
2
,
2
],
[
1
,
2
,
0
,
0
,
1
]],
device
=
device
)
v1
=
tensor
([
1
,
2
,
3
,
4
,
5
],
dtype
,
device
)
matrix1
=
(
e1
,
v1
,
torch
.
Size
([
3
,
3
]))
matrix1
=
(
e1
,
v1
,
torch
.
Size
([
3
,
3
]))
e2
=
torch
.
tensor
([[
0
,
2
],
[
1
,
0
]])
e2
=
torch
.
tensor
([[
0
,
2
],
[
1
,
0
]]
,
device
=
device
)
v2
=
torch
.
tensor
([
2
,
4
],
dtype
=
torch
.
float
,
requires_grad
=
Tru
e
)
v2
=
tensor
([
2
,
4
],
dtype
,
devic
e
)
matrix2
=
(
e2
,
v2
,
torch
.
Size
([
3
,
2
]))
matrix2
=
(
e2
,
v2
,
torch
.
Size
([
3
,
2
]))
index
,
value
=
spspmm
(
*
matrix1
,
*
matrix2
)
index
,
value
=
spspmm
(
*
matrix1
,
*
matrix2
)
out
=
torch
.
sparse
.
FloatTensor
(
index
,
value
,
torch
.
Size
([
3
,
2
])).
to_dense
()
print
(
index
)
assert
out
.
tolist
()
==
[[
8
,
0
],
[
0
,
6
],
[
0
,
8
]]
print
(
value
)
# out = torch.sparse_coo_tensor(index, value, torch.Size([3, 2]), dtype)
# out = out.to_dense()
# print(out)
# assert out.tolist() == [[8, 0], [0, 6], [0, 8]]
value
.
sum
().
backward
()
# value.sum().backward()
# TODO TEST backward
test/utils.py
0 → 100644
View file @
41458598
import
torch
dtypes
=
[
torch
.
float
,
torch
.
double
]
devices
=
[
torch
.
device
(
'cpu'
)]
if
torch
.
cuda
.
is_available
():
# pragma: no cover
devices
+=
[
torch
.
device
(
'cuda:{}'
.
format
(
torch
.
cuda
.
current_device
()))]
def
tensor
(
x
,
dtype
,
device
):
return
None
if
x
is
None
else
torch
.
tensor
(
x
,
dtype
=
dtype
,
device
=
device
)
torch_sparse/__init__.py
View file @
41458598
from
.sparse
import
SparseTensor
from
.matmul
import
spspmm
from
.matmul
import
spspmm
__all__
=
[
__all__
=
[
'SparseTensor'
,
'spspmm'
,
'spspmm'
,
]
]
torch_sparse/matmul.py
View file @
41458598
...
@@ -2,6 +2,7 @@ import torch
...
@@ -2,6 +2,7 @@ import torch
from
torch
import
from_numpy
from
torch
import
from_numpy
from
scipy.sparse
import
coo_matrix
from
scipy.sparse
import
coo_matrix
from
torch_sparse
import
SparseTensor
import
matmul_cuda
import
matmul_cuda
...
@@ -43,18 +44,12 @@ def mm(e1, v1, s1, e2, v2, s2):
...
@@ -43,18 +44,12 @@ def mm(e1, v1, s1, e2, v2, s2):
def
mm_cuda
(
e1
,
v1
,
s1
,
e2
,
v2
,
s2
):
def
mm_cuda
(
e1
,
v1
,
s1
,
e2
,
v2
,
s2
):
matrix1
=
to_s
parse
(
e1
,
v1
,
s1
)
matrix1
=
S
parse
Tensor
(
e1
,
v1
,
s1
)
matrix2
=
to_s
parse
(
e2
,
v2
,
s2
)
matrix2
=
S
parse
Tensor
(
e2
,
v2
,
s2
)
out
=
matmul_cuda
.
spspmm
(
matrix1
,
matrix2
)
out
=
matmul_cuda
.
spspmm
(
matrix1
,
matrix2
)
return
out
.
_indices
(),
out
.
_values
()
return
out
.
_indices
(),
out
.
_values
()
def
to_sparse
(
index
,
value
,
size
):
assert
value
.
is_cuda
SparseTensor
=
getattr
(
torch
.
cuda
.
sparse
,
value
.
type
().
split
(
'.'
)[
-
1
])
return
SparseTensor
(
index
,
value
,
size
)
def
mm_cpu
(
e1
,
v1
,
s1
,
e2
,
v2
,
s2
):
def
mm_cpu
(
e1
,
v1
,
s1
,
e2
,
v2
,
s2
):
matrix1
,
matrix2
,
=
to_csr
(
e1
,
v1
,
s1
),
to_csr
(
e2
,
v2
,
s2
)
matrix1
,
matrix2
,
=
to_csr
(
e1
,
v1
,
s1
),
to_csr
(
e2
,
v2
,
s2
)
out
=
matrix1
.
dot
(
matrix2
).
tocoo
()
out
=
matrix1
.
dot
(
matrix2
).
tocoo
()
...
...
torch_sparse/sparse.py
0 → 100644
View file @
41458598
import
torch
def
SparseTensor
(
index
,
value
,
size
):
t
=
torch
.
cuda
if
value
.
is_cuda
else
torch
SparseTensor
=
getattr
(
t
.
sparse
,
value
.
type
().
split
(
'.'
)[
-
1
])
return
SparseTensor
(
index
,
value
,
size
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment