Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-sparse
Commits
61f01b59
Commit
61f01b59
authored
Mar 18, 2020
by
rusty1s
Browse files
added saint sampling
parent
a1ae9033
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
200 additions
and
50 deletions
+200
-50
test/test_saint.py
test/test_saint.py
+32
-0
torch_sparse/__init__.py
torch_sparse/__init__.py
+4
-0
torch_sparse/saint.py
torch_sparse/saint.py
+60
-0
torch_sparse/tensor.py
torch_sparse/tensor.py
+104
-50
No files found.
test/test_saint.py
0 → 100644
View file @
61f01b59
import
pytest
import
torch
from
torch_sparse.tensor
import
SparseTensor
from
.utils
import
devices
@
pytest
.
mark
.
parametrize
(
'device'
,
devices
)
def
test_sample_node
(
device
):
row
=
torch
.
tensor
([
0
,
0
,
1
,
1
,
2
,
2
,
2
,
3
,
3
,
4
])
col
=
torch
.
tensor
([
1
,
2
,
0
,
2
,
0
,
1
,
3
,
2
,
4
,
3
])
adj
=
SparseTensor
(
row
=
row
,
col
=
col
).
to
(
device
)
adj
,
perm
=
adj
.
sample_node
(
num_nodes
=
3
)
@
pytest
.
mark
.
parametrize
(
'device'
,
devices
)
def
test_sample_edge
(
device
):
row
=
torch
.
tensor
([
0
,
0
,
1
,
1
,
2
,
2
,
2
,
3
,
3
,
4
])
col
=
torch
.
tensor
([
1
,
2
,
0
,
2
,
0
,
1
,
3
,
2
,
4
,
3
])
adj
=
SparseTensor
(
row
=
row
,
col
=
col
).
to
(
device
)
adj
,
perm
=
adj
.
sample_edge
(
num_edges
=
3
)
@
pytest
.
mark
.
parametrize
(
'device'
,
devices
)
def
test_sample_rw
(
device
):
row
=
torch
.
tensor
([
0
,
0
,
1
,
1
,
2
,
2
,
2
,
3
,
3
,
4
])
col
=
torch
.
tensor
([
1
,
2
,
0
,
2
,
0
,
1
,
3
,
2
,
4
,
3
])
adj
=
SparseTensor
(
row
=
row
,
col
=
col
).
to
(
device
)
adj
,
perm
=
adj
.
sample_rw
(
num_root_nodes
=
3
,
walk_length
=
2
)
torch_sparse/__init__.py
View file @
61f01b59
...
...
@@ -55,6 +55,7 @@ from .reduce import sum, mean, min, max # noqa
from
.matmul
import
matmul
# noqa
from
.cat
import
cat
,
cat_diag
# noqa
from
.metis
import
partition
# noqa
from
.saint
import
sample_node
,
sample_edge
,
sample_rw
# noqa
from
.convert
import
to_torch_sparse
,
from_torch_sparse
# noqa
from
.convert
import
to_scipy
,
from_scipy
# noqa
...
...
@@ -95,6 +96,9 @@ __all__ = [
'cat'
,
'cat_diag'
,
'partition'
,
'sample_node'
,
'sample_edge'
,
'sample_rw'
,
'to_torch_sparse'
,
'from_torch_sparse'
,
'to_scipy'
,
...
...
torch_sparse/saint.py
0 → 100644
View file @
61f01b59
from
typing
import
Tuple
import
torch
import
numpy
as
np
from
torch_scatter
import
scatter_add
from
torch_sparse.tensor
import
SparseTensor
def
sample_node
(
src
:
SparseTensor
,
num_nodes
:
int
)
->
Tuple
[
torch
.
Tensor
,
torch
.
Tensor
]:
row
,
col
,
_
=
src
.
coo
()
inv_in_deg
=
src
.
storage
.
colcount
().
to
(
torch
.
float
).
pow_
(
-
1
)
inv_in_deg
[
inv_in_deg
==
float
(
'inf'
)]
=
0
prob
=
inv_in_deg
[
col
]
prob
.
mul_
(
prob
)
prob
=
scatter_add
(
prob
,
row
,
dim
=
0
,
dim_size
=
src
.
size
(
0
))
prob
.
div_
(
prob
.
sum
())
node_idx
=
prob
.
multinomial
(
num_nodes
,
replacement
=
True
).
unique
()
return
src
.
permute
(
node_idx
),
node_idx
def
sample_edge
(
src
:
SparseTensor
,
num_edges
:
int
)
->
Tuple
[
torch
.
Tensor
,
torch
.
Tensor
]:
row
,
col
,
_
=
src
.
coo
()
inv_out_deg
=
src
.
storage
.
rowcount
().
to
(
torch
.
float
).
pow_
(
-
1
)
inv_out_deg
[
inv_out_deg
==
float
(
'inf'
)]
=
0
inv_in_deg
=
src
.
storage
.
colcount
().
to
(
torch
.
float
).
pow_
(
-
1
)
inv_in_deg
[
inv_in_deg
==
float
(
'inf'
)]
=
0
prob
=
inv_out_deg
[
row
]
+
inv_in_deg
[
col
]
prob
.
div_
(
prob
.
sum
())
edge_idx
=
prob
.
multinomial
(
num_edges
,
replacement
=
True
)
node_idx
=
col
[
edge_idx
].
unique
()
return
src
.
permute
(
node_idx
),
node_idx
def
sample_rw
(
src
:
SparseTensor
,
num_root_nodes
:
int
,
walk_length
:
int
)
->
Tuple
[
torch
.
Tensor
,
torch
.
Tensor
]:
start
=
np
.
random
.
choice
(
src
.
size
(
0
),
size
=
num_root_nodes
,
replace
=
False
)
start
=
torch
.
from_numpy
(
start
).
to
(
src
.
device
())
# get random walks of length `walk_length`:
# => `rw.size(1) == walk_length + 1
return
None
,
None
SparseTensor
.
sample_node
=
sample_node
SparseTensor
.
sample_edge
=
sample_edge
SparseTensor
.
sample_rw
=
sample_rw
torch_sparse/tensor.py
View file @
61f01b59
...
...
@@ -12,17 +12,25 @@ from torch_sparse.utils import is_scalar
class
SparseTensor
(
object
):
storage
:
SparseStorage
def
__init__
(
self
,
row
:
Optional
[
torch
.
Tensor
]
=
None
,
def
__init__
(
self
,
row
:
Optional
[
torch
.
Tensor
]
=
None
,
rowptr
:
Optional
[
torch
.
Tensor
]
=
None
,
col
:
Optional
[
torch
.
Tensor
]
=
None
,
value
:
Optional
[
torch
.
Tensor
]
=
None
,
sparse_sizes
:
Optional
[
Tuple
[
int
,
int
]]
=
None
,
is_sorted
:
bool
=
False
):
self
.
storage
=
SparseStorage
(
row
=
row
,
rowptr
=
rowptr
,
col
=
col
,
value
=
value
,
sparse_sizes
=
sparse_sizes
,
rowcount
=
None
,
colptr
=
None
,
colcount
=
None
,
csr2csc
=
None
,
csc2csr
=
None
,
is_sorted
=
is_sorted
)
self
.
storage
=
SparseStorage
(
row
=
row
,
rowptr
=
rowptr
,
col
=
col
,
value
=
value
,
sparse_sizes
=
sparse_sizes
,
rowcount
=
None
,
colptr
=
None
,
colcount
=
None
,
csr2csc
=
None
,
csc2csr
=
None
,
is_sorted
=
is_sorted
)
@
classmethod
def
from_storage
(
self
,
storage
:
SparseStorage
):
...
...
@@ -45,12 +53,17 @@ class SparseTensor(object):
if
has_value
:
value
=
mat
[
row
,
col
]
return
SparseTensor
(
row
=
row
,
rowptr
=
None
,
col
=
col
,
value
=
value
,
sparse_sizes
=
(
mat
.
size
(
0
),
mat
.
size
(
1
)),
is_sorted
=
True
)
return
SparseTensor
(
row
=
row
,
rowptr
=
None
,
col
=
col
,
value
=
value
,
sparse_sizes
=
(
mat
.
size
(
0
),
mat
.
size
(
1
)),
is_sorted
=
True
)
@
classmethod
def
from_torch_sparse_coo_tensor
(
self
,
mat
:
torch
.
Tensor
,
def
from_torch_sparse_coo_tensor
(
self
,
mat
:
torch
.
Tensor
,
has_value
:
bool
=
True
):
mat
=
mat
.
coalesce
()
index
=
mat
.
_indices
()
...
...
@@ -60,13 +73,20 @@ class SparseTensor(object):
if
has_value
:
value
=
mat
.
_values
()
return
SparseTensor
(
row
=
row
,
rowptr
=
None
,
col
=
col
,
value
=
value
,
sparse_sizes
=
(
mat
.
size
(
0
),
mat
.
size
(
1
)),
is_sorted
=
True
)
return
SparseTensor
(
row
=
row
,
rowptr
=
None
,
col
=
col
,
value
=
value
,
sparse_sizes
=
(
mat
.
size
(
0
),
mat
.
size
(
1
)),
is_sorted
=
True
)
@
classmethod
def
eye
(
self
,
M
:
int
,
N
:
Optional
[
int
]
=
None
,
options
:
Optional
[
torch
.
Tensor
]
=
None
,
has_value
:
bool
=
True
,
def
eye
(
self
,
M
:
int
,
N
:
Optional
[
int
]
=
None
,
options
:
Optional
[
torch
.
Tensor
]
=
None
,
has_value
:
bool
=
True
,
fill_cache
:
bool
=
False
):
N
=
M
if
N
is
None
else
N
...
...
@@ -84,8 +104,8 @@ class SparseTensor(object):
value
:
Optional
[
torch
.
Tensor
]
=
None
if
has_value
:
if
options
is
not
None
:
value
=
torch
.
ones
(
row
.
numel
(),
dtype
=
options
.
dtype
,
device
=
row
.
device
)
value
=
torch
.
ones
(
row
.
numel
(),
dtype
=
options
.
dtype
,
device
=
row
.
device
)
else
:
value
=
torch
.
ones
(
row
.
numel
(),
device
=
row
.
device
)
...
...
@@ -108,9 +128,17 @@ class SparseTensor(object):
csr2csc
=
csc2csr
=
row
storage
:
SparseStorage
=
SparseStorage
(
row
=
row
,
rowptr
=
rowptr
,
col
=
col
,
value
=
value
,
sparse_sizes
=
(
M
,
N
),
rowcount
=
rowcount
,
colptr
=
colptr
,
colcount
=
colcount
,
csr2csc
=
csr2csc
,
csc2csr
=
csc2csr
,
is_sorted
=
True
)
row
=
row
,
rowptr
=
rowptr
,
col
=
col
,
value
=
value
,
sparse_sizes
=
(
M
,
N
),
rowcount
=
rowcount
,
colptr
=
colptr
,
colcount
=
colcount
,
csr2csc
=
csr2csc
,
csc2csr
=
csc2csr
,
is_sorted
=
True
)
self
=
SparseTensor
.
__new__
(
SparseTensor
)
self
.
storage
=
storage
...
...
@@ -153,12 +181,14 @@ class SparseTensor(object):
def
has_value
(
self
)
->
bool
:
return
self
.
storage
.
has_value
()
def
set_value_
(
self
,
value
:
Optional
[
torch
.
Tensor
],
def
set_value_
(
self
,
value
:
Optional
[
torch
.
Tensor
],
layout
:
Optional
[
str
]
=
None
):
self
.
storage
.
set_value_
(
value
,
layout
)
return
self
def
set_value
(
self
,
value
:
Optional
[
torch
.
Tensor
],
def
set_value
(
self
,
value
:
Optional
[
torch
.
Tensor
],
layout
:
Optional
[
str
]
=
None
):
return
self
.
from_storage
(
self
.
storage
.
set_value
(
value
,
layout
))
...
...
@@ -187,23 +217,31 @@ class SparseTensor(object):
# Utility functions #######################################################
def
fill_value_
(
self
,
fill_value
:
float
,
def
fill_value_
(
self
,
fill_value
:
float
,
options
:
Optional
[
torch
.
Tensor
]
=
None
):
if
options
is
not
None
:
value
=
torch
.
full
((
self
.
nnz
(),
),
fill_value
,
dtype
=
options
.
dtype
,
value
=
torch
.
full
((
self
.
nnz
(),
),
fill_value
,
dtype
=
options
.
dtype
,
device
=
self
.
device
())
else
:
value
=
torch
.
full
((
self
.
nnz
(),
),
fill_value
,
value
=
torch
.
full
((
self
.
nnz
(),
),
fill_value
,
device
=
self
.
device
())
return
self
.
set_value_
(
value
,
layout
=
'coo'
)
def
fill_value
(
self
,
fill_value
:
float
,
def
fill_value
(
self
,
fill_value
:
float
,
options
:
Optional
[
torch
.
Tensor
]
=
None
):
if
options
is
not
None
:
value
=
torch
.
full
((
self
.
nnz
(),
),
fill_value
,
dtype
=
options
.
dtype
,
value
=
torch
.
full
((
self
.
nnz
(),
),
fill_value
,
dtype
=
options
.
dtype
,
device
=
self
.
device
())
else
:
value
=
torch
.
full
((
self
.
nnz
(),
),
fill_value
,
value
=
torch
.
full
((
self
.
nnz
(),
),
fill_value
,
device
=
self
.
device
())
return
self
.
set_value
(
value
,
layout
=
'coo'
)
...
...
@@ -270,8 +308,13 @@ class SparseTensor(object):
N
=
max
(
self
.
size
(
0
),
self
.
size
(
1
))
out
=
SparseTensor
(
row
=
row
,
rowptr
=
None
,
col
=
col
,
value
=
value
,
sparse_sizes
=
(
N
,
N
),
is_sorted
=
False
)
out
=
SparseTensor
(
row
=
row
,
rowptr
=
None
,
col
=
col
,
value
=
value
,
sparse_sizes
=
(
N
,
N
),
is_sorted
=
False
)
out
=
out
.
coalesce
(
reduce
)
return
out
...
...
@@ -294,7 +337,8 @@ class SparseTensor(object):
else
:
return
False
def
requires_grad_
(
self
,
requires_grad
:
bool
=
True
,
def
requires_grad_
(
self
,
requires_grad
:
bool
=
True
,
options
:
Optional
[
torch
.
Tensor
]
=
None
):
if
requires_grad
and
not
self
.
has_value
():
self
.
fill_value_
(
1.
,
options
=
options
)
...
...
@@ -315,8 +359,8 @@ class SparseTensor(object):
if
value
is
not
None
:
return
value
else
:
return
torch
.
tensor
(
0.
,
dtype
=
torch
.
float
,
device
=
self
.
storage
.
col
().
device
)
return
torch
.
tensor
(
0.
,
dtype
=
torch
.
float
,
device
=
self
.
storage
.
col
().
device
)
def
device
(
self
):
return
self
.
storage
.
col
().
device
...
...
@@ -324,7 +368,8 @@ class SparseTensor(object):
def
cpu
(
self
):
return
self
.
device_as
(
torch
.
tensor
(
0.
),
non_blocking
=
False
)
def
cuda
(
self
,
options
:
Optional
[
torch
.
Tensor
]
=
None
,
def
cuda
(
self
,
options
:
Optional
[
torch
.
Tensor
]
=
None
,
non_blocking
:
bool
=
False
):
if
options
is
not
None
:
return
self
.
device_as
(
options
,
non_blocking
)
...
...
@@ -387,19 +432,19 @@ class SparseTensor(object):
row
,
col
,
value
=
self
.
coo
()
if
value
is
not
None
:
mat
=
torch
.
zeros
(
self
.
sizes
(),
dtype
=
value
.
dtype
,
device
=
self
.
device
())
mat
=
torch
.
zeros
(
self
.
sizes
(),
dtype
=
value
.
dtype
,
device
=
self
.
device
())
elif
options
is
not
None
:
mat
=
torch
.
zeros
(
self
.
sizes
(),
dtype
=
options
.
dtype
,
device
=
self
.
device
())
mat
=
torch
.
zeros
(
self
.
sizes
(),
dtype
=
options
.
dtype
,
device
=
self
.
device
())
else
:
mat
=
torch
.
zeros
(
self
.
sizes
(),
device
=
self
.
device
())
if
value
is
not
None
:
mat
[
row
,
col
]
=
value
else
:
mat
[
row
,
col
]
=
torch
.
ones
(
self
.
nnz
(),
dtype
=
mat
.
dtype
,
device
=
mat
.
device
)
mat
[
row
,
col
]
=
torch
.
ones
(
self
.
nnz
(),
dtype
=
mat
.
dtype
,
device
=
mat
.
device
)
return
mat
...
...
@@ -409,8 +454,8 @@ class SparseTensor(object):
index
=
torch
.
stack
([
row
,
col
],
dim
=
0
)
if
value
is
None
:
if
options
is
not
None
:
value
=
torch
.
ones
(
self
.
nnz
(),
dtype
=
options
.
dtype
,
device
=
self
.
device
())
value
=
torch
.
ones
(
self
.
nnz
(),
dtype
=
options
.
dtype
,
device
=
self
.
device
())
else
:
value
=
torch
.
ones
(
self
.
nnz
(),
device
=
self
.
device
())
...
...
@@ -434,7 +479,7 @@ def is_shared(self: SparseTensor) -> bool:
def
to
(
self
,
*
args
:
Optional
[
List
[
Any
]],
**
kwargs
:
Optional
[
Dict
[
str
,
Any
]])
->
SparseTensor
:
device
,
dtype
,
non_blocking
,
_
=
torch
.
_C
.
_nn
.
_parse_to
(
*
args
,
**
kwargs
)
device
,
dtype
,
non_blocking
=
torch
.
_C
.
_nn
.
_parse_to
(
*
args
,
**
kwargs
)
if
dtype
is
not
None
:
self
=
self
.
type_as
(
torch
.
tensor
(
0.
,
dtype
=
dtype
))
...
...
@@ -515,8 +560,8 @@ SparseTensor.__repr__ = __repr__
# Scipy Conversions ###########################################################
ScipySparseMatrix
=
Union
[
scipy
.
sparse
.
coo_matrix
,
scipy
.
sparse
.
csr_matrix
,
scipy
.
sparse
.
csc_matrix
]
ScipySparseMatrix
=
Union
[
scipy
.
sparse
.
coo_matrix
,
scipy
.
sparse
.
csr_matrix
,
scipy
.
sparse
.
csc_matrix
]
@
torch
.
jit
.
ignore
...
...
@@ -535,16 +580,25 @@ def from_scipy(mat: ScipySparseMatrix, has_value: bool = True) -> SparseTensor:
value
=
torch
.
from_numpy
(
mat
.
data
)
sparse_sizes
=
mat
.
shape
[:
2
]
storage
=
SparseStorage
(
row
=
row
,
rowptr
=
rowptr
,
col
=
col
,
value
=
value
,
sparse_sizes
=
sparse_sizes
,
rowcount
=
None
,
colptr
=
colptr
,
colcount
=
None
,
csr2csc
=
None
,
csc2csr
=
None
,
is_sorted
=
True
)
storage
=
SparseStorage
(
row
=
row
,
rowptr
=
rowptr
,
col
=
col
,
value
=
value
,
sparse_sizes
=
sparse_sizes
,
rowcount
=
None
,
colptr
=
colptr
,
colcount
=
None
,
csr2csc
=
None
,
csc2csr
=
None
,
is_sorted
=
True
)
return
SparseTensor
.
from_storage
(
storage
)
@
torch
.
jit
.
ignore
def
to_scipy
(
self
:
SparseTensor
,
layout
:
Optional
[
str
]
=
None
,
def
to_scipy
(
self
:
SparseTensor
,
layout
:
Optional
[
str
]
=
None
,
dtype
:
Optional
[
torch
.
dtype
]
=
None
)
->
ScipySparseMatrix
:
assert
self
.
dim
()
==
2
layout
=
get_layout
(
layout
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment