Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-sparse
Commits
cadfd303
Commit
cadfd303
authored
Aug 06, 2018
by
rusty1s
Browse files
running tests, bugfixes
parent
3fc02192
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
92 additions
and
131 deletions
+92
-131
.coveragerc
.coveragerc
+0
-1
cuda/matmul.cpp
cuda/matmul.cpp
+12
-6
cuda/matmul_kernel.cu
cuda/matmul_kernel.cu
+15
-16
test/test_coalesce.py
test/test_coalesce.py
+1
-1
test/test_matmul.py
test/test_matmul.py
+47
-48
test/test_transpose.py
test/test_transpose.py
+13
-0
test/utils.py
test/utils.py
+3
-4
torch_sparse/sparse.py
torch_sparse/sparse.py
+0
-54
torch_sparse/transpose.py
torch_sparse/transpose.py
+1
-1
No files found.
.coveragerc
View file @
cadfd303
[report]
[report]
exclude_lines =
exclude_lines =
pragma: no cover
pragma: no cover
def backward
cuda
cuda
cuda/matmul.cpp
View file @
cadfd303
#include <torch/torch.h>
#include <torch/torch.h>
#define CHECK_CUDA(x) AT_ASSERT(x.type().is_cuda(), #x " must be
a
CUDA tensor")
#define CHECK_CUDA(x) AT_ASSERT
M
(x.type().is_cuda(), #x " must be CUDA tensor")
std
::
tuple
<
at
::
Tensor
,
at
::
Tensor
>
spspmm_cuda
(
at
::
Tensor
A
,
at
::
Tensor
B
);
std
::
tuple
<
at
::
Tensor
,
at
::
Tensor
>
spspmm_cuda
(
at
::
Tensor
indexA
,
at
::
Tensor
valueA
,
at
::
Tensor
indexB
,
at
::
Tensor
valueB
,
int
m
,
int
k
,
int
n
);
std
::
tuple
<
at
::
Tensor
,
at
::
Tensor
>
spspmm
(
at
::
Tensor
A
,
at
::
Tensor
B
)
{
std
::
tuple
<
at
::
Tensor
,
at
::
Tensor
>
spspmm
(
at
::
Tensor
indexA
,
at
::
Tensor
valueA
,
CHECK_CUDA
(
A
);
at
::
Tensor
indexB
,
at
::
Tensor
valueB
,
CHECK_CUDA
(
B
);
int
m
,
int
k
,
int
n
)
{
return
spspmm_cuda
(
A
,
B
);
CHECK_CUDA
(
indexA
);
CHECK_CUDA
(
valueA
);
CHECK_CUDA
(
indexB
);
CHECK_CUDA
(
valueB
);
return
spspmm_cuda
(
indexA
,
valueA
,
indexB
,
valueB
,
m
,
k
,
n
);
}
}
PYBIND11_MODULE
(
TORCH_EXTENSION_NAME
,
m
)
{
PYBIND11_MODULE
(
TORCH_EXTENSION_NAME
,
m
)
{
...
...
cuda/matmul_kernel.cu
View file @
cadfd303
...
@@ -27,28 +27,27 @@ static void init_cusparse() {
...
@@ -27,28 +27,27 @@ static void init_cusparse() {
}
}
}
}
std
::
tuple
<
at
::
Tensor
,
at
::
Tensor
>
spspmm_cuda
(
at
::
Tensor
A
,
at
::
Tensor
B
)
{
std
::
tuple
<
at
::
Tensor
,
at
::
Tensor
>
spspmm_cuda
(
at
::
Tensor
indexA
,
at
::
Tensor
valueA
,
at
::
Tensor
indexB
,
at
::
Tensor
valueB
,
int
m
,
int
k
,
int
n
)
{
init_cusparse
();
init_cusparse
();
auto
m
=
A
.
size
(
0
);
auto
nnzA
=
valueA
.
size
(
0
);
auto
k
=
A
.
size
(
1
);
auto
nnzB
=
valueB
.
size
(
0
);
auto
n
=
B
.
size
(
1
);
auto
nnzA
=
A
.
_nnz
(
);
indexA
=
indexA
.
toType
(
at
::
kInt
);
auto
nnzB
=
B
.
_nnz
(
);
indexB
=
indexB
.
toType
(
at
::
kInt
);
auto
valueA
=
A
.
_values
();
// Convert A to CSR format.
auto
indexA
=
A
.
_indices
().
toType
(
at
::
kInt
);
auto
row_ptrA
=
at
::
empty
(
m
+
1
,
indexA
.
type
());
auto
row_ptrA
=
at
::
empty
(
indexA
.
type
(),
{
m
+
1
});
cusparseXcoo2csr
(
cusparse_handle
,
indexA
[
0
].
data
<
int
>
(),
nnzA
,
k
,
cusparseXcoo2csr
(
cusparse_handle
,
indexA
[
0
].
data
<
int
>
(),
nnzA
,
k
,
row_ptrA
.
data
<
int
>
(),
CUSPARSE_INDEX_BASE_ZERO
);
row_ptrA
.
data
<
int
>
(),
CUSPARSE_INDEX_BASE_ZERO
);
auto
colA
=
indexA
[
1
];
auto
colA
=
indexA
[
1
];
cudaMemcpy
(
row_ptrA
.
data
<
int
>
()
+
m
,
&
nnzA
,
sizeof
(
int
),
cudaMemcpy
(
row_ptrA
.
data
<
int
>
()
+
m
,
&
nnzA
,
sizeof
(
int
),
cudaMemcpyHostToDevice
);
cudaMemcpyHostToDevice
);
auto
valueB
=
B
.
_values
();
// Convert B to CSR format.
auto
indexB
=
B
.
_indices
().
toType
(
at
::
kInt
);
auto
row_ptrB
=
at
::
empty
(
k
+
1
,
indexB
.
type
());
auto
row_ptrB
=
at
::
empty
(
indexB
.
type
(),
{
k
+
1
});
cusparseXcoo2csr
(
cusparse_handle
,
indexB
[
0
].
data
<
int
>
(),
nnzB
,
k
,
cusparseXcoo2csr
(
cusparse_handle
,
indexB
[
0
].
data
<
int
>
(),
nnzB
,
k
,
row_ptrB
.
data
<
int
>
(),
CUSPARSE_INDEX_BASE_ZERO
);
row_ptrB
.
data
<
int
>
(),
CUSPARSE_INDEX_BASE_ZERO
);
auto
colB
=
indexB
[
1
];
auto
colB
=
indexB
[
1
];
...
@@ -61,14 +60,14 @@ std::tuple<at::Tensor, at::Tensor> spspmm_cuda(at::Tensor A, at::Tensor B) {
...
@@ -61,14 +60,14 @@ std::tuple<at::Tensor, at::Tensor> spspmm_cuda(at::Tensor A, at::Tensor B) {
cusparseSetMatIndexBase
(
descr
,
CUSPARSE_INDEX_BASE_ZERO
);
cusparseSetMatIndexBase
(
descr
,
CUSPARSE_INDEX_BASE_ZERO
);
int
nnzC
;
int
nnzC
;
auto
row_ptrC
=
at
::
empty
(
index
A
.
type
()
,
{
m
+
1
}
);
auto
row_ptrC
=
at
::
empty
(
m
+
1
,
index
B
.
type
());
cusparseXcsrgemmNnz
(
cusparse_handle
,
CUSPARSE_OPERATION_NON_TRANSPOSE
,
cusparseXcsrgemmNnz
(
cusparse_handle
,
CUSPARSE_OPERATION_NON_TRANSPOSE
,
CUSPARSE_OPERATION_NON_TRANSPOSE
,
m
,
n
,
k
,
descr
,
nnzA
,
CUSPARSE_OPERATION_NON_TRANSPOSE
,
m
,
n
,
k
,
descr
,
nnzA
,
row_ptrA
.
data
<
int
>
(),
colA
.
data
<
int
>
(),
descr
,
nnzB
,
row_ptrA
.
data
<
int
>
(),
colA
.
data
<
int
>
(),
descr
,
nnzB
,
row_ptrB
.
data
<
int
>
(),
colB
.
data
<
int
>
(),
descr
,
row_ptrB
.
data
<
int
>
(),
colB
.
data
<
int
>
(),
descr
,
row_ptrC
.
data
<
int
>
(),
&
nnzC
);
row_ptrC
.
data
<
int
>
(),
&
nnzC
);
auto
colC
=
at
::
empty
(
indexA
.
type
()
,
{
nnzC
}
);
auto
colC
=
at
::
empty
(
nnzC
,
indexA
.
type
());
auto
valueC
=
at
::
empty
(
valueA
.
type
()
,
{
nnzC
}
);
auto
valueC
=
at
::
empty
(
nnzC
,
valueA
.
type
());
CSRGEMM
(
valueC
.
type
(),
cusparse_handle
,
CUSPARSE_OPERATION_NON_TRANSPOSE
,
CSRGEMM
(
valueC
.
type
(),
cusparse_handle
,
CUSPARSE_OPERATION_NON_TRANSPOSE
,
CUSPARSE_OPERATION_NON_TRANSPOSE
,
m
,
n
,
k
,
descr
,
nnzA
,
CUSPARSE_OPERATION_NON_TRANSPOSE
,
m
,
n
,
k
,
descr
,
nnzA
,
...
@@ -77,7 +76,7 @@ std::tuple<at::Tensor, at::Tensor> spspmm_cuda(at::Tensor A, at::Tensor B) {
...
@@ -77,7 +76,7 @@ std::tuple<at::Tensor, at::Tensor> spspmm_cuda(at::Tensor A, at::Tensor B) {
colB
.
data
<
int
>
(),
descr
,
valueC
.
data
<
scalar_t
>
(),
colB
.
data
<
int
>
(),
descr
,
valueC
.
data
<
scalar_t
>
(),
row_ptrC
.
data
<
int
>
(),
colC
.
data
<
int
>
());
row_ptrC
.
data
<
int
>
(),
colC
.
data
<
int
>
());
auto
rowC
=
at
::
empty
(
indexA
.
type
()
,
{
nnzC
}
);
auto
rowC
=
at
::
empty
(
nnzC
,
indexA
.
type
());
cusparseXcsr2coo
(
cusparse_handle
,
row_ptrC
.
data
<
int
>
(),
nnzC
,
m
,
cusparseXcsr2coo
(
cusparse_handle
,
row_ptrC
.
data
<
int
>
(),
nnzC
,
m
,
rowC
.
data
<
int
>
(),
CUSPARSE_INDEX_BASE_ZERO
);
rowC
.
data
<
int
>
(),
CUSPARSE_INDEX_BASE_ZERO
);
...
...
test/test_coalesce.py
View file @
cadfd303
...
@@ -8,6 +8,6 @@ def test_coalesce():
...
@@ -8,6 +8,6 @@ def test_coalesce():
index
=
torch
.
stack
([
row
,
col
],
dim
=
0
)
index
=
torch
.
stack
([
row
,
col
],
dim
=
0
)
value
=
torch
.
tensor
([[
1
,
2
],
[
2
,
3
],
[
3
,
4
],
[
4
,
5
],
[
5
,
6
],
[
6
,
7
]])
value
=
torch
.
tensor
([[
1
,
2
],
[
2
,
3
],
[
3
,
4
],
[
4
,
5
],
[
5
,
6
],
[
6
,
7
]])
index
,
value
=
coalesce
(
index
,
value
,
torch
.
Size
([
4
,
2
])
)
index
,
value
=
coalesce
(
index
,
value
,
m
=
3
,
n
=
2
)
assert
index
.
tolist
()
==
[[
0
,
1
,
1
,
2
],
[
1
,
0
,
1
,
0
]]
assert
index
.
tolist
()
==
[[
0
,
1
,
1
,
2
],
[
1
,
0
,
1
,
0
]]
assert
value
.
tolist
()
==
[[
6
,
8
],
[
7
,
9
],
[
3
,
4
],
[
5
,
6
]]
assert
value
.
tolist
()
==
[[
6
,
8
],
[
7
,
9
],
[
3
,
4
],
[
5
,
6
]]
test/test_matmul.py
View file @
cadfd303
from
itertools
import
product
# from itertools import product
import
pytest
# import pytest
import
torch
# import torch
from
torch_sparse
import
sparse_coo_tensor
,
spspmm
,
to_value
# from torch_sparse import sparse_coo_tensor, spspmm, to_value
from
.utils
import
dtypes
,
devices
,
tensor
# from .utils import dtypes, devices, tensor
tests
=
[{
# tests = [{
'name'
:
'Test coalesced input'
,
# 'name': 'Test coalesced input',
'indexA'
:
[[
0
,
0
,
1
,
2
,
2
],
[
1
,
2
,
0
,
0
,
1
]],
# 'indexA': [[0, 0, 1, 2, 2], [1, 2, 0, 0, 1]],
'valueA'
:
[
1
,
2
,
3
,
4
,
5
],
# 'valueA': [1, 2, 3, 4, 5],
'sizeA'
:
[
3
,
3
],
# 'sizeA': [3, 3],
'indexB'
:
[[
0
,
2
],
[
1
,
0
]],
# 'indexB': [[0, 2], [1, 0]],
'valueB'
:
[
2
,
4
],
# 'valueB': [2, 4],
'sizeB'
:
[
3
,
2
],
# 'sizeB': [3, 2],
},
{
# }, {
'name'
:
'Test uncoalesced input'
,
# 'name': 'Test uncoalesced input',
'indexA'
:
[[
2
,
2
,
1
,
0
,
2
,
0
],
[
1
,
1
,
0
,
2
,
0
,
1
]],
# 'indexA': [[2, 2, 1, 0, 2, 0], [1, 1, 0, 2, 0, 1]],
'valueA'
:
[
3
,
2
,
3
,
2
,
4
,
1
],
# 'valueA': [3, 2, 3, 2, 4, 1],
'sizeA'
:
[
3
,
3
],
# 'sizeA': [3, 3],
'indexB'
:
[[
2
,
0
,
2
],
[
0
,
1
,
0
]],
# 'indexB': [[2, 0, 2], [0, 1, 0]],
'valueB'
:
[
2
,
2
,
2
],
# 'valueB': [2, 2, 2],
'sizeB'
:
[
3
,
2
],
# 'sizeB': [3, 2],
}]
# }]
# @pytest.mark.parametrize('test,dtype,device', product(tests, dtypes, devices))
@
pytest
.
mark
.
parametrize
(
'test,dtype,device'
,
product
(
tests
,
dtypes
,
devices
))
# def test_spspmm(test, dtype, device):
def
test_spspmm
(
test
,
dtype
,
device
):
# indexA = torch.tensor(test['indexA'], device=device)
indexA
=
torch
.
tensor
(
test
[
'indexA'
],
device
=
device
)
# valueA = tensor(test['valueA'], dtype, device, requires_grad=True)
valueA
=
tensor
(
test
[
'valueA'
],
dtype
,
device
,
requires_grad
=
True
)
# sizeA = torch.Size(test['sizeA'])
sizeA
=
torch
.
Size
(
test
[
'sizeA'
])
# A = sparse_coo_tensor(indexA, valueA, sizeA)
A
=
sparse_coo_tensor
(
indexA
,
valueA
,
sizeA
)
# denseA = A.detach().to_dense().requires_grad_()
denseA
=
A
.
detach
().
to_dense
().
requires_grad_
()
# indexB = torch.tensor(test['indexB'], device=device)
indexB
=
torch
.
tensor
(
test
[
'indexB'
],
device
=
device
)
# valueB = tensor(test['valueB'], dtype, device, requires_grad=True)
valueB
=
tensor
(
test
[
'valueB'
],
dtype
,
device
,
requires_grad
=
True
)
# sizeB = torch.Size(test['sizeB'])
sizeB
=
torch
.
Size
(
test
[
'sizeB'
])
# B = sparse_coo_tensor(indexB, valueB, sizeB)
B
=
sparse_coo_tensor
(
indexB
,
valueB
,
sizeB
)
# denseB = B.detach().to_dense().requires_grad_()
denseB
=
B
.
detach
().
to_dense
().
requires_grad_
()
# C = spspmm(A, B)
C
=
spspmm
(
A
,
B
)
# denseC = torch.matmul(denseA, denseB)
denseC
=
torch
.
matmul
(
denseA
,
denseB
)
# assert C.detach().to_dense().tolist() == denseC.tolist()
assert
C
.
detach
().
to_dense
().
tolist
()
==
denseC
.
tolist
()
# to_value(C).sum().backward()
to_value
(
C
).
sum
().
backward
()
# denseC.sum().backward()
denseC
.
sum
().
backward
()
# assert valueA.grad.tolist() == denseA.grad[indexA[0], indexA[1]].tolist()
assert
valueA
.
grad
.
tolist
()
==
denseA
.
grad
[
indexA
[
0
],
indexA
[
1
]].
tolist
()
test/test_transpose.py
0 → 100644
View file @
cadfd303
import
torch
from
torch_sparse
import
transpose
def
test_transpose
():
row
=
torch
.
tensor
([
1
,
0
,
1
,
0
,
2
,
1
])
col
=
torch
.
tensor
([
0
,
1
,
1
,
1
,
0
,
0
])
index
=
torch
.
stack
([
row
,
col
],
dim
=
0
)
value
=
torch
.
tensor
([[
1
,
2
],
[
2
,
3
],
[
3
,
4
],
[
4
,
5
],
[
5
,
6
],
[
6
,
7
]])
index
,
value
=
transpose
(
index
,
value
,
m
=
3
,
n
=
2
)
assert
index
.
tolist
()
==
[[
0
,
0
,
1
,
1
],
[
1
,
2
,
0
,
1
]]
assert
value
.
tolist
()
==
[[
7
,
9
],
[
5
,
6
],
[
6
,
8
],
[
3
,
4
]]
test/utils.py
View file @
cadfd303
...
@@ -3,10 +3,9 @@ import torch
...
@@ -3,10 +3,9 @@ import torch
dtypes
=
[
torch
.
float
,
torch
.
double
]
dtypes
=
[
torch
.
float
,
torch
.
double
]
devices
=
[
torch
.
device
(
'cpu'
)]
devices
=
[
torch
.
device
(
'cpu'
)]
if
torch
.
cuda
.
is_available
():
# pragma: no cover
if
torch
.
cuda
.
is_available
():
devices
+=
[
torch
.
device
(
'cuda:{}'
.
format
(
torch
.
cuda
.
current_device
()))]
devices
+=
[
torch
.
device
(
'cuda:{}'
.
format
(
torch
.
cuda
.
current_device
()))]
def
tensor
(
x
,
dtype
,
device
,
requires_grad
=
False
):
def
tensor
(
x
,
dtype
,
device
):
return
torch
.
tensor
(
return
torch
.
tensor
(
x
,
dtype
=
dtype
,
device
=
device
)
x
,
dtype
=
dtype
,
device
=
device
,
requires_grad
=
requires_grad
)
torch_sparse/sparse.py
deleted
100644 → 0
View file @
3fc02192
import
torch
class
SparseCooTensor
(
torch
.
autograd
.
Function
):
"""Constructs Sparse matrix with autograd capabilities w.r.t. to value."""
@
staticmethod
def
forward
(
ctx
,
index
,
value
,
size
):
ctx
.
size
=
size
ctx
.
save_for_backward
(
index
)
return
torch
.
sparse_coo_tensor
(
index
,
value
,
size
,
device
=
value
.
device
)
@
staticmethod
def
backward
(
ctx
,
grad_out
):
index
=
ctx
.
saved_variables
[
0
]
grad_in
=
None
if
ctx
.
needs_input_grad
[
1
]:
value
=
grad_out
.
_values
()
id1
=
index
[
0
]
*
ctx
.
size
[
1
]
+
index
[
1
]
index
=
grad_out
.
_indices
()
id2
=
index
[
0
]
*
ctx
.
size
[
1
]
+
index
[
1
]
grad_in
=
value
.
new_zeros
(
id1
.
max
().
item
()
+
1
)
grad_in
[
id2
]
=
value
grad_in
=
grad_in
[
id1
]
return
None
,
grad_in
,
None
sparse_coo_tensor
=
SparseCooTensor
.
apply
class
ToValue
(
torch
.
autograd
.
Function
):
"""Extract values of sparse tensors with autograd support."""
@
staticmethod
def
forward
(
ctx
,
A
):
ctx
.
save_for_backward
(
A
)
return
A
.
_values
()
@
staticmethod
def
backward
(
ctx
,
grad_out
):
A
=
ctx
.
saved_variables
[
0
]
grad_in
=
None
if
ctx
.
needs_input_grad
[
0
]:
grad_in
=
torch
.
sparse_coo_tensor
(
A
.
_indices
(),
grad_out
,
A
.
size
(),
device
=
grad_out
.
device
)
return
grad_in
to_value
=
ToValue
.
apply
torch_sparse/transpose.py
View file @
cadfd303
...
@@ -6,6 +6,6 @@ def transpose(index, value, m, n):
...
@@ -6,6 +6,6 @@ def transpose(index, value, m, n):
row
,
col
=
index
row
,
col
=
index
index
=
torch
.
stack
([
col
,
row
],
dim
=
0
)
index
=
torch
.
stack
([
col
,
row
],
dim
=
0
)
index
,
value
=
coalesce
(
index
,
value
,
m
,
n
)
index
,
value
=
coalesce
(
index
,
value
,
n
,
m
)
return
index
,
value
return
index
,
value
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment