Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
f41934df
Unverified
Commit
f41934df
authored
Apr 20, 2023
by
czkkkkkk
Committed by
GitHub
Apr 20, 2023
Browse files
[Sparse] Add conversion between DGLGraph and SparseMatrix. (#5553)
parent
e6226e82
Changes
22
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
182 additions
and
90 deletions
+182
-90
benchmarks/benchmarks/api/bench_homograph_scipy_construction.py
...arks/benchmarks/api/bench_homograph_scipy_construction.py
+2
-4
docs/source/api/python/dgl.DGLGraph.rst
docs/source/api/python/dgl.DGLGraph.rst
+2
-1
examples/pytorch/TAHIN/data_loader.py
examples/pytorch/TAHIN/data_loader.py
+1
-1
examples/pytorch/diffpool/model/dgl_layers/gnn.py
examples/pytorch/diffpool/model/dgl_layers/gnn.py
+1
-1
examples/pytorch/dtgrnn/dcrnn.py
examples/pytorch/dtgrnn/dcrnn.py
+2
-2
examples/pytorch/ogb/ngnn_seal/main.py
examples/pytorch/ogb/ngnn_seal/main.py
+1
-1
examples/pytorch/ogb/seal_ogbl/main.py
examples/pytorch/ogb/seal_ogbl/main.py
+1
-1
examples/pytorch/seal/utils.py
examples/pytorch/seal/utils.py
+1
-1
examples/pytorch/vgae/train.py
examples/pytorch/vgae/train.py
+2
-2
python/dgl/heterograph.py
python/dgl/heterograph.py
+87
-19
python/dgl/nn/pytorch/conv/cugraph_gatconv.py
python/dgl/nn/pytorch/conv/cugraph_gatconv.py
+1
-1
python/dgl/nn/pytorch/conv/cugraph_relgraphconv.py
python/dgl/nn/pytorch/conv/cugraph_relgraphconv.py
+1
-1
python/dgl/nn/pytorch/conv/cugraph_sageconv.py
python/dgl/nn/pytorch/conv/cugraph_sageconv.py
+1
-1
python/dgl/transforms/functional.py
python/dgl/transforms/functional.py
+24
-17
tests/python/common/test_heterograph-misc.py
tests/python/common/test_heterograph-misc.py
+8
-8
tests/python/common/test_heterograph.py
tests/python/common/test_heterograph.py
+29
-13
tests/python/common/test_traversal.py
tests/python/common/test_traversal.py
+1
-1
tests/python/common/transforms/test_functional-sort.py
tests/python/common/transforms/test_functional-sort.py
+8
-8
tests/python/common/transforms/test_transform.py
tests/python/common/transforms/test_transform.py
+4
-2
tests/python/mxnet/test_nn.py
tests/python/mxnet/test_nn.py
+5
-5
No files found.
benchmarks/benchmarks/api/bench_homograph_scipy_construction.py
View file @
f41934df
...
@@ -15,12 +15,10 @@ from .. import utils
...
@@ -15,12 +15,10 @@ from .. import utils
@
utils
.
parametrize
(
"scipy_format"
,
[
"coo"
,
"csr"
])
@
utils
.
parametrize
(
"scipy_format"
,
[
"coo"
,
"csr"
])
def
track_time
(
size
,
scipy_format
):
def
track_time
(
size
,
scipy_format
):
matrix_dict
=
{
matrix_dict
=
{
"small"
:
dgl
.
data
.
CiteseerGraphDataset
(
verbose
=
False
)[
"small"
:
dgl
.
data
.
CiteseerGraphDataset
(
verbose
=
False
)[
0
].
adj_external
(
0
].
adjacency_matrix
(
scipy_fmt
=
scipy_format
),
"large"
:
utils
.
get_livejournal
().
adjacency_matrix
(
scipy_fmt
=
scipy_format
scipy_fmt
=
scipy_format
),
),
"large"
:
utils
.
get_livejournal
().
adj_external
(
scipy_fmt
=
scipy_format
),
}
}
# dry run
# dry run
...
...
docs/source/api/python/dgl.DGLGraph.rst
View file @
f41934df
...
@@ -144,8 +144,9 @@ Methods for getting the adjacency and the incidence matrix of the graph.
...
@@ -144,8 +144,9 @@ Methods for getting the adjacency and the incidence matrix of the graph.
:toctree: ../../generated/
:toctree: ../../generated/
DGLGraph.adj
DGLGraph.adj
DGLGraph.adj_sparse
DGLGraph.adjacency_matrix
DGLGraph.adjacency_matrix
DGLGraph.adj_tensors
DGLGraph.adj_external
DGLGraph.inc
DGLGraph.inc
DGLGraph.incidence_matrix
DGLGraph.incidence_matrix
...
...
examples/pytorch/TAHIN/data_loader.py
View file @
f41934df
...
@@ -19,7 +19,7 @@ def split_data(hg, etype_name):
...
@@ -19,7 +19,7 @@ def split_data(hg, etype_name):
pos_label
=
[
1
]
*
num_link
pos_label
=
[
1
]
*
num_link
pos_data
=
list
(
zip
(
user_item_src
,
user_item_dst
,
pos_label
))
pos_data
=
list
(
zip
(
user_item_src
,
user_item_dst
,
pos_label
))
ui_adj
=
np
.
array
(
hg
.
adj
(
etype
=
etype_name
).
to_dense
())
ui_adj
=
np
.
array
(
hg
.
adj
_external
(
etype
=
etype_name
).
to_dense
())
full_idx
=
np
.
where
(
ui_adj
==
0
)
full_idx
=
np
.
where
(
ui_adj
==
0
)
sample
=
random
.
sample
(
range
(
0
,
len
(
full_idx
[
0
])),
num_link
)
sample
=
random
.
sample
(
range
(
0
,
len
(
full_idx
[
0
])),
num_link
)
...
...
examples/pytorch/diffpool/model/dgl_layers/gnn.py
View file @
f41934df
...
@@ -141,7 +141,7 @@ class DiffPoolBatchedGraphLayer(nn.Module):
...
@@ -141,7 +141,7 @@ class DiffPoolBatchedGraphLayer(nn.Module):
)
# size = (sum_N, batch_size * N_a)
)
# size = (sum_N, batch_size * N_a)
h
=
torch
.
matmul
(
torch
.
t
(
assign_tensor
),
feat
)
h
=
torch
.
matmul
(
torch
.
t
(
assign_tensor
),
feat
)
adj
=
g
.
adj
acency_matrix
(
transpose
=
True
,
ctx
=
device
)
adj
=
g
.
adj
_external
(
transpose
=
True
,
ctx
=
device
)
adj_new
=
torch
.
sparse
.
mm
(
adj
,
assign_tensor
)
adj_new
=
torch
.
sparse
.
mm
(
adj
,
assign_tensor
)
adj_new
=
torch
.
mm
(
torch
.
t
(
assign_tensor
),
adj_new
)
adj_new
=
torch
.
mm
(
torch
.
t
(
assign_tensor
),
adj_new
)
...
...
examples/pytorch/dtgrnn/dcrnn.py
View file @
f41934df
...
@@ -71,7 +71,7 @@ class DiffConv(nn.Module):
...
@@ -71,7 +71,7 @@ class DiffConv(nn.Module):
@
staticmethod
@
staticmethod
def
get_weight_matrix
(
g
):
def
get_weight_matrix
(
g
):
adj
=
g
.
adj
(
scipy_fmt
=
"coo"
)
adj
=
g
.
adj
_external
(
scipy_fmt
=
"coo"
)
ind
=
g
.
in_degrees
()
ind
=
g
.
in_degrees
()
outd
=
g
.
out_degrees
()
outd
=
g
.
out_degrees
()
weight
=
g
.
edata
[
"weight"
]
weight
=
g
.
edata
[
"weight"
]
...
@@ -81,7 +81,7 @@ class DiffConv(nn.Module):
...
@@ -81,7 +81,7 @@ class DiffConv(nn.Module):
@
staticmethod
@
staticmethod
def
diffuse
(
progress_g
,
weighted_adj
,
degree
):
def
diffuse
(
progress_g
,
weighted_adj
,
degree
):
device
=
progress_g
.
device
device
=
progress_g
.
device
progress_adj
=
progress_g
.
adj
(
scipy_fmt
=
"coo"
)
progress_adj
=
progress_g
.
adj
_external
(
scipy_fmt
=
"coo"
)
progress_adj
.
data
=
progress_g
.
edata
[
"weight"
].
cpu
().
numpy
()
progress_adj
.
data
=
progress_g
.
edata
[
"weight"
].
cpu
().
numpy
()
ret_adj
=
sparse
.
coo_matrix
(
ret_adj
=
sparse
.
coo_matrix
(
progress_adj
@
(
weighted_adj
/
degree
.
cpu
().
numpy
())
progress_adj
@
(
weighted_adj
/
degree
.
cpu
().
numpy
())
...
...
examples/pytorch/ogb/ngnn_seal/main.py
View file @
f41934df
...
@@ -85,7 +85,7 @@ class SEALOGBLDataset(Dataset):
...
@@ -85,7 +85,7 @@ class SEALOGBLDataset(Dataset):
NIDs
,
EIDs
=
subg
.
ndata
[
dgl
.
NID
],
subg
.
edata
[
dgl
.
EID
]
NIDs
,
EIDs
=
subg
.
ndata
[
dgl
.
NID
],
subg
.
edata
[
dgl
.
EID
]
z
=
drnl_node_labeling
(
subg
.
adj
(
scipy_fmt
=
"csr"
),
0
,
1
)
z
=
drnl_node_labeling
(
subg
.
adj
_external
(
scipy_fmt
=
"csr"
),
0
,
1
)
edge_weights
=
(
edge_weights
=
(
self
.
edge_weights
[
EIDs
]
if
self
.
edge_weights
is
not
None
else
None
self
.
edge_weights
[
EIDs
]
if
self
.
edge_weights
is
not
None
else
None
)
)
...
...
examples/pytorch/ogb/seal_ogbl/main.py
View file @
f41934df
...
@@ -150,7 +150,7 @@ class SealSampler(Sampler):
...
@@ -150,7 +150,7 @@ class SealSampler(Sampler):
subg
.
remove_edges
(
edges_to_remove
)
subg
.
remove_edges
(
edges_to_remove
)
# add double radius node labeling
# add double radius node labeling
subg
.
ndata
[
"z"
]
=
self
.
_double_radius_node_labeling
(
subg
.
ndata
[
"z"
]
=
self
.
_double_radius_node_labeling
(
subg
.
adj
(
scipy_fmt
=
"csr"
)
subg
.
adj
_external
(
scipy_fmt
=
"csr"
)
)
)
subg_aug
=
subg
.
add_self_loop
()
subg_aug
=
subg
.
add_self_loop
()
if
"weight"
in
subg
.
edata
:
if
"weight"
in
subg
.
edata
:
...
...
examples/pytorch/seal/utils.py
View file @
f41934df
...
@@ -72,7 +72,7 @@ def drnl_node_labeling(subgraph, src, dst):
...
@@ -72,7 +72,7 @@ def drnl_node_labeling(subgraph, src, dst):
Returns:
Returns:
z(Tensor): node labeling tensor
z(Tensor): node labeling tensor
"""
"""
adj
=
subgraph
.
adj
().
to_dense
().
numpy
()
adj
=
subgraph
.
adj
_external
().
to_dense
().
numpy
()
src
,
dst
=
(
dst
,
src
)
if
src
>
dst
else
(
src
,
dst
)
src
,
dst
=
(
dst
,
src
)
if
src
>
dst
else
(
src
,
dst
)
idx
=
list
(
range
(
src
))
+
list
(
range
(
src
+
1
,
adj
.
shape
[
0
]))
idx
=
list
(
range
(
src
))
+
list
(
range
(
src
+
1
,
adj
.
shape
[
0
]))
...
...
examples/pytorch/vgae/train.py
View file @
f41934df
...
@@ -125,7 +125,7 @@ def dgl_main():
...
@@ -125,7 +125,7 @@ def dgl_main():
in_dim
=
feats
.
shape
[
-
1
]
in_dim
=
feats
.
shape
[
-
1
]
# generate input
# generate input
adj_orig
=
graph
.
adj
acency_matrix
().
to_dense
()
adj_orig
=
graph
.
adj
_external
().
to_dense
()
# build test set with 10% positive links
# build test set with 10% positive links
(
(
...
@@ -142,7 +142,7 @@ def dgl_main():
...
@@ -142,7 +142,7 @@ def dgl_main():
train_edge_idx
=
torch
.
tensor
(
train_edge_idx
).
to
(
device
)
train_edge_idx
=
torch
.
tensor
(
train_edge_idx
).
to
(
device
)
train_graph
=
dgl
.
edge_subgraph
(
graph
,
train_edge_idx
,
relabel_nodes
=
False
)
train_graph
=
dgl
.
edge_subgraph
(
graph
,
train_edge_idx
,
relabel_nodes
=
False
)
train_graph
=
train_graph
.
to
(
device
)
train_graph
=
train_graph
.
to
(
device
)
adj
=
train_graph
.
adj
acency_matrix
().
to_dense
().
to
(
device
)
adj
=
train_graph
.
adj
_external
().
to_dense
().
to
(
device
)
# compute loss parameters
# compute loss parameters
weight_tensor
,
norm
=
compute_loss_para
(
adj
)
weight_tensor
,
norm
=
compute_loss_para
(
adj
)
...
...
python/dgl/heterograph.py
View file @
f41934df
...
@@ -3754,14 +3754,90 @@ class DGLGraph(object):
...
@@ -3754,14 +3754,90 @@ class DGLGraph(object):
else
:
else
:
return
deg
return
deg
def
adjacency_matrix
(
def
adjacency_matrix
(
self
,
etype
=
None
):
self
,
transpose
=
False
,
ctx
=
F
.
cpu
(),
scipy_fmt
=
None
,
etype
=
None
):
"""Alias of :meth:`adj`"""
"""Alias of :meth:`adj`"""
return
self
.
adj
(
transpose
,
ctx
,
scipy_fmt
,
etype
)
return
self
.
adj
(
etype
)
def
adj
(
self
,
etype
=
None
,
eweight_name
=
None
):
"""Get the adjacency matrix of the graph.
Parameters
----------
etype : str or (str, str, str), optional
The type names of the edges. The allowed type name formats are:
* ``(str, str, str)`` for source node type, edge type and
destination node type.
* or one ``str`` edge type name if the name can uniquely identify a
triplet format in the graph.
Can be omitted if the graph has only one type of edges.
eweight_name : str, optional
The name of edge feature used as the non-zero values. If not given,
the non-zero values are all 1.
Returns
-------
SparseMatrix
The adjacency matrix.
Examples
--------
The following example uses PyTorch backend.
>>> import dgl
>>> import torch
def
adj
(
self
,
transpose
=
False
,
ctx
=
F
.
cpu
(),
scipy_fmt
=
None
,
etype
=
None
):
>>> g = dgl.graph(([0, 1, 2], [1, 2, 3]))
"""Return the adjacency matrix of edges of the given edge type.
>>> g.adj()
SparseMatrix(indices=tensor([[0, 1, 2],
[1, 2, 3]]),
values=tensor([1., 1., 1.]),
shape=(4, 4), nnz=3)
>>> g = dgl.heterograph({
... ('user', 'follows', 'user'): ([0, 1], [0, 1]),
... ('developer', 'develops', 'game'): ([0, 1], [0, 2])
... })
>>> g.adj(etype='develops')
SparseMatrix(indices=tensor([[0, 1],
[0, 2]]),
values=tensor([1., 1.]),
shape=(2, 3), nnz=2)
>>> g.edata['h'] = {('user', 'follows', 'user'): torch.tensor([3, 2])}
>>> g.adj(etype='follows', eweight_name='h')
SparseMatrix(indices=tensor([[0, 1],
[0, 1]]),
values=tensor([3, 2]),
shape=(2, 2), nnz=2)
"""
assert
F
.
backend_name
==
"pytorch"
,
"Only PyTorch backend supports adj."
# Temporal fix to introduce a dependency on torch
import
torch
from
.sparse
import
spmatrix
etype
=
self
.
to_canonical_etype
(
etype
)
indices
=
torch
.
stack
(
self
.
all_edges
(
etype
=
etype
))
shape
=
(
self
.
num_nodes
(
etype
[
0
]),
self
.
number_of_nodes
(
etype
[
2
]))
if
eweight_name
is
not
None
:
val
=
self
.
edata
[
eweight_name
][
etype
]
else
:
val
=
None
return
spmatrix
(
indices
,
val
=
val
,
shape
=
shape
,
)
def
adj_external
(
self
,
transpose
=
False
,
ctx
=
F
.
cpu
(),
scipy_fmt
=
None
,
etype
=
None
):
"""Return the adjacency matrix in an external format, such as Scipy or
backend dependent sparse tensor.
By default, a row of returned adjacency matrix represents the
By default, a row of returned adjacency matrix represents the
source of an edge and the column represents the destination.
source of an edge and the column represents the destination.
...
@@ -3787,7 +3863,6 @@ class DGLGraph(object):
...
@@ -3787,7 +3863,6 @@ class DGLGraph(object):
Can be omitted if the graph has only one type of edges.
Can be omitted if the graph has only one type of edges.
Returns
Returns
-------
-------
SparseTensor or scipy.sparse.spmatrix
SparseTensor or scipy.sparse.spmatrix
...
@@ -3810,7 +3885,7 @@ class DGLGraph(object):
...
@@ -3810,7 +3885,7 @@ class DGLGraph(object):
Get a backend dependent sparse tensor. Here we use PyTorch for example.
Get a backend dependent sparse tensor. Here we use PyTorch for example.
>>> g.adj(etype='develops')
>>> g.adj
_external
(etype='develops')
tensor(indices=tensor([[0, 1],
tensor(indices=tensor([[0, 1],
[0, 2]]),
[0, 2]]),
values=tensor([1., 1.]),
values=tensor([1., 1.]),
...
@@ -3818,7 +3893,7 @@ class DGLGraph(object):
...
@@ -3818,7 +3893,7 @@ class DGLGraph(object):
Get a scipy coo sparse matrix.
Get a scipy coo sparse matrix.
>>> g.adj(scipy_fmt='coo', etype='develops')
>>> g.adj
_external
(scipy_fmt='coo', etype='develops')
<2x3 sparse matrix of type '<class 'numpy.int64'>'
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in COOrdinate format>
with 2 stored elements in COOrdinate format>
"""
"""
...
@@ -3830,44 +3905,37 @@ class DGLGraph(object):
...
@@ -3830,44 +3905,37 @@ class DGLGraph(object):
etid
,
transpose
,
scipy_fmt
,
False
etid
,
transpose
,
scipy_fmt
,
False
)
)
def
adj_
spa
rs
e
(
self
,
fmt
,
etype
=
None
):
def
adj_
tenso
rs
(
self
,
fmt
,
etype
=
None
):
"""Return the adjacency matrix of edges of the given edge type as tensors of
"""Return the adjacency matrix of edges of the given edge type as tensors of
a sparse matrix representation.
a sparse matrix representation.
By default, a row of returned adjacency matrix represents the
By default, a row of returned adjacency matrix represents the
source of an edge and the column represents the destination.
source of an edge and the column represents the destination.
Parameters
Parameters
----------
----------
fmt : str
fmt : str
Either ``coo``, ``csr`` or ``csc``.
Either ``coo``, ``csr`` or ``csc``.
etype : str or (str, str, str), optional
etype : str or (str, str, str), optional
The type names of the edges. The allowed type name formats are:
The type names of the edges. The allowed type name formats are:
* ``(str, str, str)`` for source node type, edge type and destination node type.
* ``(str, str, str)`` for source node type, edge type and destination node type.
* or one ``str`` edge type name if the name can uniquely identify a
* or one ``str`` edge type name if the name can uniquely identify a
triplet format in the graph.
triplet format in the graph.
Can be omitted if the graph has only one type of edges.
Can be omitted if the graph has only one type of edges.
Returns
Returns
-------
-------
tuple[Tensor]
tuple[Tensor]
If :attr:`fmt` is ``coo``, returns a pair of source and destination node ID
If :attr:`fmt` is ``coo``, returns a pair of source and destination node ID
tensors.
tensors.
If :attr:`fmt` is ``csr`` or ``csc``, return the CSR or CSC representation
If :attr:`fmt` is ``csr`` or ``csc``, return the CSR or CSC representation
of the adjacency matrix as a triplet of tensors
of the adjacency matrix as a triplet of tensors
``(indptr, indices, edge_ids)``. Namely ``edge_ids`` could be an empty
``(indptr, indices, edge_ids)``. Namely ``edge_ids`` could be an empty
tensor with 0 elements, in which case the edge IDs are consecutive
tensor with 0 elements, in which case the edge IDs are consecutive
integers starting from 0.
integers starting from 0.
Examples
Examples
--------
--------
>>> g = dgl.graph(([0, 1, 2], [1, 2, 3]))
>>> g = dgl.graph(([0, 1, 2], [1, 2, 3]))
>>> g.adj_
spa
rs
e
('coo')
>>> g.adj_
tenso
rs('coo')
(tensor([0, 1, 2]), tensor([1, 2, 3]))
(tensor([0, 1, 2]), tensor([1, 2, 3]))
>>> g.adj_
spa
rs
e
('csr')
>>> g.adj_
tenso
rs('csr')
(tensor([0, 1, 2, 3, 3]), tensor([1, 2, 3]), tensor([0, 1, 2]))
(tensor([0, 1, 2, 3, 3]), tensor([1, 2, 3]), tensor([0, 1, 2]))
"""
"""
etid
=
self
.
get_etype_id
(
etype
)
etid
=
self
.
get_etype_id
(
etype
)
...
...
python/dgl/nn/pytorch/conv/cugraph_gatconv.py
View file @
f41934df
...
@@ -163,7 +163,7 @@ class CuGraphGATConv(nn.Module):
...
@@ -163,7 +163,7 @@ class CuGraphGATConv(nn.Module):
:math:`H` is the number of heads, and :math:`D_{out}` is size of
:math:`H` is the number of heads, and :math:`D_{out}` is size of
output feature.
output feature.
"""
"""
offsets
,
indices
,
_
=
g
.
adj_
spa
rs
e
(
"csc"
)
offsets
,
indices
,
_
=
g
.
adj_
tenso
rs
(
"csc"
)
if
g
.
is_block
:
if
g
.
is_block
:
if
max_in_degree
is
None
:
if
max_in_degree
is
None
:
...
...
python/dgl/nn/pytorch/conv/cugraph_relgraphconv.py
View file @
f41934df
...
@@ -177,7 +177,7 @@ class CuGraphRelGraphConv(nn.Module):
...
@@ -177,7 +177,7 @@ class CuGraphRelGraphConv(nn.Module):
New node features. Shape: :math:`(|V|, D_{out})`.
New node features. Shape: :math:`(|V|, D_{out})`.
"""
"""
# Create csc-representation and cast etypes to int32.
# Create csc-representation and cast etypes to int32.
offsets
,
indices
,
edge_ids
=
g
.
adj_
spa
rs
e
(
"csc"
)
offsets
,
indices
,
edge_ids
=
g
.
adj_
tenso
rs
(
"csc"
)
edge_types_perm
=
etypes
[
edge_ids
.
long
()].
int
()
edge_types_perm
=
etypes
[
edge_ids
.
long
()].
int
()
# Create cugraph-ops graph.
# Create cugraph-ops graph.
...
...
python/dgl/nn/pytorch/conv/cugraph_sageconv.py
View file @
f41934df
...
@@ -119,7 +119,7 @@ class CuGraphSAGEConv(nn.Module):
...
@@ -119,7 +119,7 @@ class CuGraphSAGEConv(nn.Module):
torch.Tensor
torch.Tensor
Output node features. Shape: :math:`(N, D_{out})`.
Output node features. Shape: :math:`(N, D_{out})`.
"""
"""
offsets
,
indices
,
_
=
g
.
adj_
spa
rs
e
(
"csc"
)
offsets
,
indices
,
_
=
g
.
adj_
tenso
rs
(
"csc"
)
if
g
.
is_block
:
if
g
.
is_block
:
if
max_in_degree
is
None
:
if
max_in_degree
is
None
:
...
...
python/dgl/transforms/functional.py
View file @
f41934df
...
@@ -1165,7 +1165,9 @@ def khop_adj(g, k):
...
@@ -1165,7 +1165,9 @@ def khop_adj(g, k):
[0., 1., 3., 3., 1.]])
[0., 1., 3., 3., 1.]])
"""
"""
assert
g
.
is_homogeneous
,
"only homogeneous graph is supported"
assert
g
.
is_homogeneous
,
"only homogeneous graph is supported"
adj_k
=
g
.
adj
(
transpose
=
True
,
scipy_fmt
=
g
.
formats
()[
"created"
][
0
])
**
k
adj_k
=
(
g
.
adj_external
(
transpose
=
True
,
scipy_fmt
=
g
.
formats
()[
"created"
][
0
])
**
k
)
return
F
.
tensor
(
adj_k
.
todense
().
astype
(
np
.
float32
))
return
F
.
tensor
(
adj_k
.
todense
().
astype
(
np
.
float32
))
...
@@ -1235,7 +1237,10 @@ def khop_graph(g, k, copy_ndata=True):
...
@@ -1235,7 +1237,10 @@ def khop_graph(g, k, copy_ndata=True):
"""
"""
assert
g
.
is_homogeneous
,
"only homogeneous graph is supported"
assert
g
.
is_homogeneous
,
"only homogeneous graph is supported"
n
=
g
.
num_nodes
()
n
=
g
.
num_nodes
()
adj_k
=
g
.
adj
(
transpose
=
False
,
scipy_fmt
=
g
.
formats
()[
"created"
][
0
])
**
k
adj_k
=
(
g
.
adj_external
(
transpose
=
False
,
scipy_fmt
=
g
.
formats
()[
"created"
][
0
])
**
k
)
adj_k
=
adj_k
.
tocoo
()
adj_k
=
adj_k
.
tocoo
()
multiplicity
=
adj_k
.
data
multiplicity
=
adj_k
.
data
row
=
np
.
repeat
(
adj_k
.
row
,
multiplicity
)
row
=
np
.
repeat
(
adj_k
.
row
,
multiplicity
)
...
@@ -1447,7 +1452,7 @@ def laplacian_lambda_max(g):
...
@@ -1447,7 +1452,7 @@ def laplacian_lambda_max(g):
rst
=
[]
rst
=
[]
for
g_i
in
g_arr
:
for
g_i
in
g_arr
:
n
=
g_i
.
num_nodes
()
n
=
g_i
.
num_nodes
()
adj
=
g_i
.
adj
(
adj
=
g_i
.
adj
_external
(
transpose
=
True
,
scipy_fmt
=
g_i
.
formats
()[
"created"
][
0
]
transpose
=
True
,
scipy_fmt
=
g_i
.
formats
()[
"created"
][
0
]
).
astype
(
float
)
).
astype
(
float
)
norm
=
sparse
.
diags
(
norm
=
sparse
.
diags
(
...
@@ -1511,7 +1516,9 @@ def metapath_reachable_graph(g, metapath):
...
@@ -1511,7 +1516,9 @@ def metapath_reachable_graph(g, metapath):
"""
"""
adj
=
1
adj
=
1
for
etype
in
metapath
:
for
etype
in
metapath
:
adj
=
adj
*
g
.
adj
(
etype
=
etype
,
scipy_fmt
=
"csr"
,
transpose
=
False
)
adj
=
adj
*
g
.
adj_external
(
etype
=
etype
,
scipy_fmt
=
"csr"
,
transpose
=
False
)
adj
=
(
adj
!=
0
).
tocsr
()
adj
=
(
adj
!=
0
).
tocsr
()
srctype
=
g
.
to_canonical_etype
(
metapath
[
0
])[
0
]
srctype
=
g
.
to_canonical_etype
(
metapath
[
0
])[
0
]
...
@@ -2883,12 +2890,12 @@ def sort_csr_by_tag(g, tag, tag_offset_name="_TAG_OFFSET", tag_type="node"):
...
@@ -2883,12 +2890,12 @@ def sort_csr_by_tag(g, tag, tag_offset_name="_TAG_OFFSET", tag_type="node"):
>>> import torch
>>> import torch
>>> g = dgl.graph(([0,0,0,0,0,1,1,1],[0,1,2,3,4,0,1,2]))
>>> g = dgl.graph(([0,0,0,0,0,1,1,1],[0,1,2,3,4,0,1,2]))
>>> g.adj
acency_matrix
(scipy_fmt='csr').nonzero()
>>> g.adj
_external
(scipy_fmt='csr').nonzero()
(array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32),
(array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32),
array([0, 1, 2, 3, 4, 0, 1, 2], dtype=int32))
array([0, 1, 2, 3, 4, 0, 1, 2], dtype=int32))
>>> tag = torch.IntTensor([1,1,0,2,0])
>>> tag = torch.IntTensor([1,1,0,2,0])
>>> g_sorted = dgl.sort_csr_by_tag(g, tag)
>>> g_sorted = dgl.sort_csr_by_tag(g, tag)
>>> g_sorted.adj
acency_matrix
(scipy_fmt='csr').nonzero()
>>> g_sorted.adj
_external
(scipy_fmt='csr').nonzero()
(array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32),
(array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32),
array([2, 4, 0, 1, 3, 2, 0, 1], dtype=int32))
array([2, 4, 0, 1, 3, 2, 0, 1], dtype=int32))
>>> g_sorted.ndata['_TAG_OFFSET']
>>> g_sorted.ndata['_TAG_OFFSET']
...
@@ -2905,7 +2912,7 @@ def sort_csr_by_tag(g, tag, tag_offset_name="_TAG_OFFSET", tag_type="node"):
...
@@ -2905,7 +2912,7 @@ def sort_csr_by_tag(g, tag, tag_offset_name="_TAG_OFFSET", tag_type="node"):
(tensor([0, 0, 0, 0, 0, 1, 1, 1]), tensor([0, 1, 2, 3, 4, 0, 1, 2]))
(tensor([0, 0, 0, 0, 0, 1, 1, 1]), tensor([0, 1, 2, 3, 4, 0, 1, 2]))
>>> tag = torch.tensor([1, 1, 0, 2, 0, 1, 1, 0])
>>> tag = torch.tensor([1, 1, 0, 2, 0, 1, 1, 0])
>>> g_sorted = dgl.sort_csr_by_tag(g, tag, tag_type='edge')
>>> g_sorted = dgl.sort_csr_by_tag(g, tag, tag_type='edge')
>>> g_sorted.adj(scipy_fmt='csr').nonzero()
>>> g_sorted.adj
_external
(scipy_fmt='csr').nonzero()
(array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32), array([2, 4, 0, 1, 3, 2, 0, 1], dtype=int32))
(array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32), array([2, 4, 0, 1, 3, 2, 0, 1], dtype=int32))
>>> g_sorted.srcdata['_TAG_OFFSET']
>>> g_sorted.srcdata['_TAG_OFFSET']
tensor([[0, 2, 4, 5],
tensor([[0, 2, 4, 5],
...
@@ -3011,12 +3018,12 @@ def sort_csc_by_tag(g, tag, tag_offset_name="_TAG_OFFSET", tag_type="node"):
...
@@ -3011,12 +3018,12 @@ def sort_csc_by_tag(g, tag, tag_offset_name="_TAG_OFFSET", tag_type="node"):
>>> import dgl
>>> import dgl
>>> import torch
>>> import torch
>>> g = dgl.graph(([0,1,2,3,4,0,1,2],[0,0,0,0,0,1,1,1]))
>>> g = dgl.graph(([0,1,2,3,4,0,1,2],[0,0,0,0,0,1,1,1]))
>>> g.adj
acency_matrix
(scipy_fmt='csr', transpose=True).nonzero()
>>> g.adj
_external
(scipy_fmt='csr', transpose=True).nonzero()
(array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32),
(array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32),
array([0, 1, 2, 3, 4, 0, 1, 2], dtype=int32)))
array([0, 1, 2, 3, 4, 0, 1, 2], dtype=int32)))
>>> tag = torch.IntTensor([1,1,0,2,0])
>>> tag = torch.IntTensor([1,1,0,2,0])
>>> g_sorted = dgl.sort_csc_by_tag(g, tag)
>>> g_sorted = dgl.sort_csc_by_tag(g, tag)
>>> g_sorted.adj
acency_matrix
(scipy_fmt='csr', transpose=True).nonzero()
>>> g_sorted.adj
_external
(scipy_fmt='csr', transpose=True).nonzero()
(array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32),
(array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32),
array([2, 4, 0, 1, 3, 2, 0, 1], dtype=int32))
array([2, 4, 0, 1, 3, 2, 0, 1], dtype=int32))
>>> g_sorted.ndata['_TAG_OFFSET']
>>> g_sorted.ndata['_TAG_OFFSET']
...
@@ -3031,7 +3038,7 @@ def sort_csc_by_tag(g, tag, tag_offset_name="_TAG_OFFSET", tag_type="node"):
...
@@ -3031,7 +3038,7 @@ def sort_csc_by_tag(g, tag, tag_offset_name="_TAG_OFFSET", tag_type="node"):
>>> g = dgl.graph(([0,1,2,3,4,0,1,2],[0,0,0,0,0,1,1,1]))
>>> g = dgl.graph(([0,1,2,3,4,0,1,2],[0,0,0,0,0,1,1,1]))
>>> tag = torch.tensor([1, 1, 0, 2, 0, 1, 1, 0])
>>> tag = torch.tensor([1, 1, 0, 2, 0, 1, 1, 0])
>>> g_sorted = dgl.sort_csc_by_tag(g, tag, tag_type='edge')
>>> g_sorted = dgl.sort_csc_by_tag(g, tag, tag_type='edge')
>>> g_sorted.adj(scipy_fmt='csr', transpose=True).nonzero()
>>> g_sorted.adj
_external
(scipy_fmt='csr', transpose=True).nonzero()
(array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32), array([2, 4, 0, 1, 3, 2, 0, 1], dtype=int32))
(array([0, 0, 0, 0, 0, 1, 1, 1], dtype=int32), array([2, 4, 0, 1, 3, 2, 0, 1], dtype=int32))
>>> g_sorted.dstdata['_TAG_OFFSET']
>>> g_sorted.dstdata['_TAG_OFFSET']
tensor([[0, 2, 4, 5],
tensor([[0, 2, 4, 5],
...
@@ -3376,7 +3383,7 @@ def rcmk_perm(g):
...
@@ -3376,7 +3383,7 @@ def rcmk_perm(g):
allowed_fmats
=
sum
(
g
.
formats
().
values
(),
[])
allowed_fmats
=
sum
(
g
.
formats
().
values
(),
[])
if
fmat
not
in
allowed_fmats
:
if
fmat
not
in
allowed_fmats
:
g
=
g
.
formats
(
allowed_fmats
+
[
fmat
])
g
=
g
.
formats
(
allowed_fmats
+
[
fmat
])
csr_adj
=
g
.
adj
(
scipy_fmt
=
fmat
)
csr_adj
=
g
.
adj
_external
(
scipy_fmt
=
fmat
)
perm
=
sparse
.
csgraph
.
reverse_cuthill_mckee
(
csr_adj
)
perm
=
sparse
.
csgraph
.
reverse_cuthill_mckee
(
csr_adj
)
return
perm
.
copy
()
return
perm
.
copy
()
...
@@ -3573,7 +3580,7 @@ def random_walk_pe(g, k, eweight_name=None):
...
@@ -3573,7 +3580,7 @@ def random_walk_pe(g, k, eweight_name=None):
"""
"""
N
=
g
.
num_nodes
()
# number of nodes
N
=
g
.
num_nodes
()
# number of nodes
M
=
g
.
num_edges
()
# number of edges
M
=
g
.
num_edges
()
# number of edges
A
=
g
.
adj
(
scipy_fmt
=
"csr"
)
# adjacency matrix
A
=
g
.
adj
_external
(
scipy_fmt
=
"csr"
)
# adjacency matrix
if
eweight_name
is
not
None
:
if
eweight_name
is
not
None
:
# add edge weights if required
# add edge weights if required
W
=
sparse
.
csr_matrix
(
W
=
sparse
.
csr_matrix
(
...
@@ -3657,7 +3664,7 @@ def lap_pe(g, k, padding=False, return_eigval=False):
...
@@ -3657,7 +3664,7 @@ def lap_pe(g, k, padding=False, return_eigval=False):
)
)
# get laplacian matrix as I - D^-0.5 * A * D^-0.5
# get laplacian matrix as I - D^-0.5 * A * D^-0.5
A
=
g
.
adj
(
scipy_fmt
=
"csr"
)
# adjacency matrix
A
=
g
.
adj
_external
(
scipy_fmt
=
"csr"
)
# adjacency matrix
N
=
sparse
.
diags
(
N
=
sparse
.
diags
(
F
.
asnumpy
(
g
.
in_degrees
()).
clip
(
1
)
**
-
0.5
,
dtype
=
float
F
.
asnumpy
(
g
.
in_degrees
()).
clip
(
1
)
**
-
0.5
,
dtype
=
float
)
# D^-1/2
)
# D^-1/2
...
@@ -3789,7 +3796,7 @@ def double_radius_node_labeling(g, src, dst):
...
@@ -3789,7 +3796,7 @@ def double_radius_node_labeling(g, src, dst):
>>> dgl.double_radius_node_labeling(g, 0, 1)
>>> dgl.double_radius_node_labeling(g, 0, 1)
tensor([1, 1, 3, 2, 3, 7, 0])
tensor([1, 1, 3, 2, 3, 7, 0])
"""
"""
adj
=
g
.
adj
(
scipy_fmt
=
"csr"
)
adj
=
g
.
adj
_external
(
scipy_fmt
=
"csr"
)
src
,
dst
=
(
dst
,
src
)
if
src
>
dst
else
(
src
,
dst
)
src
,
dst
=
(
dst
,
src
)
if
src
>
dst
else
(
src
,
dst
)
idx
=
list
(
range
(
src
))
+
list
(
range
(
src
+
1
,
adj
.
shape
[
0
]))
idx
=
list
(
range
(
src
))
+
list
(
range
(
src
+
1
,
adj
.
shape
[
0
]))
...
@@ -3899,14 +3906,14 @@ def shortest_dist(g, root=None, return_paths=False):
...
@@ -3899,14 +3906,14 @@ def shortest_dist(g, root=None, return_paths=False):
"""
"""
if
root
is
None
:
if
root
is
None
:
dist
,
pred
=
sparse
.
csgraph
.
shortest_path
(
dist
,
pred
=
sparse
.
csgraph
.
shortest_path
(
g
.
adj
(
scipy_fmt
=
"csr"
),
g
.
adj
_external
(
scipy_fmt
=
"csr"
),
return_predecessors
=
True
,
return_predecessors
=
True
,
unweighted
=
True
,
unweighted
=
True
,
directed
=
True
,
directed
=
True
,
)
)
else
:
else
:
dist
,
pred
=
sparse
.
csgraph
.
dijkstra
(
dist
,
pred
=
sparse
.
csgraph
.
dijkstra
(
g
.
adj
(
scipy_fmt
=
"csr"
),
g
.
adj
_external
(
scipy_fmt
=
"csr"
),
directed
=
True
,
directed
=
True
,
indices
=
root
,
indices
=
root
,
return_predecessors
=
True
,
return_predecessors
=
True
,
...
@@ -4010,7 +4017,7 @@ def svd_pe(g, k, padding=False, random_flip=True):
...
@@ -4010,7 +4017,7 @@ def svd_pe(g, k, padding=False, random_flip=True):
"The number of singular values k must be no greater than the "
"The number of singular values k must be no greater than the "
"number of nodes n, but "
+
f
"got
{
k
}
and
{
n
}
respectively."
"number of nodes n, but "
+
f
"got
{
k
}
and
{
n
}
respectively."
)
)
a
=
g
.
adj
(
ctx
=
g
.
device
,
scipy_fmt
=
"coo"
).
toarray
()
a
=
g
.
adj
_external
(
ctx
=
g
.
device
,
scipy_fmt
=
"coo"
).
toarray
()
u
,
d
,
vh
=
scipy
.
linalg
.
svd
(
a
)
u
,
d
,
vh
=
scipy
.
linalg
.
svd
(
a
)
v
=
vh
.
transpose
()
v
=
vh
.
transpose
()
m
=
min
(
n
,
k
)
m
=
min
(
n
,
k
)
...
...
tests/python/common/test_heterograph-misc.py
View file @
f41934df
...
@@ -144,11 +144,11 @@ def test_query():
...
@@ -144,11 +144,11 @@ def test_query():
assert
F
.
allclose
(
g
.
out_degrees
([
8
,
9
]),
F
.
tensor
([
0
,
1
]))
assert
F
.
allclose
(
g
.
out_degrees
([
8
,
9
]),
F
.
tensor
([
0
,
1
]))
assert
np
.
array_equal
(
assert
np
.
array_equal
(
F
.
sparse_to_numpy
(
g
.
adj
acency_matrix
(
transpose
=
True
)),
F
.
sparse_to_numpy
(
g
.
adj
_external
(
transpose
=
True
)),
scipy_coo_input
().
toarray
().
T
,
scipy_coo_input
().
toarray
().
T
,
)
)
assert
np
.
array_equal
(
assert
np
.
array_equal
(
F
.
sparse_to_numpy
(
g
.
adj
acency_matrix
(
transpose
=
False
)),
F
.
sparse_to_numpy
(
g
.
adj
_external
(
transpose
=
False
)),
scipy_coo_input
().
toarray
(),
scipy_coo_input
().
toarray
(),
)
)
...
@@ -245,11 +245,11 @@ def test_query():
...
@@ -245,11 +245,11 @@ def test_query():
assert
F
.
allclose
(
g
.
out_degrees
([
8
,
9
]),
F
.
tensor
([
0
,
1
]))
assert
F
.
allclose
(
g
.
out_degrees
([
8
,
9
]),
F
.
tensor
([
0
,
1
]))
assert
np
.
array_equal
(
assert
np
.
array_equal
(
F
.
sparse_to_numpy
(
g
.
adj
acency_matrix
(
transpose
=
True
)),
F
.
sparse_to_numpy
(
g
.
adj
_external
(
transpose
=
True
)),
scipy_coo_input
().
toarray
().
T
,
scipy_coo_input
().
toarray
().
T
,
)
)
assert
np
.
array_equal
(
assert
np
.
array_equal
(
F
.
sparse_to_numpy
(
g
.
adj
acency_matrix
(
transpose
=
False
)),
F
.
sparse_to_numpy
(
g
.
adj
_external
(
transpose
=
False
)),
scipy_coo_input
().
toarray
(),
scipy_coo_input
().
toarray
(),
)
)
...
@@ -308,12 +308,12 @@ def test_scipy_adjmat():
...
@@ -308,12 +308,12 @@ def test_scipy_adjmat():
g
.
add_nodes
(
10
)
g
.
add_nodes
(
10
)
g
.
add_edges
(
range
(
9
),
range
(
1
,
10
))
g
.
add_edges
(
range
(
9
),
range
(
1
,
10
))
adj_0
=
g
.
adj
(
scipy_fmt
=
"csr"
)
adj_0
=
g
.
adj
_external
(
scipy_fmt
=
"csr"
)
adj_1
=
g
.
adj
(
scipy_fmt
=
"coo"
)
adj_1
=
g
.
adj
_external
(
scipy_fmt
=
"coo"
)
assert
np
.
array_equal
(
adj_0
.
toarray
(),
adj_1
.
toarray
())
assert
np
.
array_equal
(
adj_0
.
toarray
(),
adj_1
.
toarray
())
adj_t0
=
g
.
adj
(
transpose
=
False
,
scipy_fmt
=
"csr"
)
adj_t0
=
g
.
adj
_external
(
transpose
=
False
,
scipy_fmt
=
"csr"
)
adj_t_1
=
g
.
adj
(
transpose
=
False
,
scipy_fmt
=
"coo"
)
adj_t_1
=
g
.
adj
_external
(
transpose
=
False
,
scipy_fmt
=
"coo"
)
assert
np
.
array_equal
(
adj_0
.
toarray
(),
adj_1
.
toarray
())
assert
np
.
array_equal
(
adj_0
.
toarray
(),
adj_1
.
toarray
())
...
...
tests/python/common/test_heterograph.py
View file @
f41934df
...
@@ -708,41 +708,57 @@ def _test_edge_ids():
...
@@ -708,41 +708,57 @@ def _test_edge_ids():
assert
eid
==
0
assert
eid
==
0
@
pytest
.
mark
.
skipif
(
F
.
backend_name
!=
"pytorch"
,
reason
=
"Only support PyTorch for now"
)
@
parametrize_idtype
@
parametrize_idtype
def
test_adj
(
idtype
):
def
test_adj
(
idtype
):
g
=
create_test_heterograph
(
idtype
)
g
=
create_test_heterograph
(
idtype
)
adj
=
F
.
sparse_to_numpy
(
g
.
adj
(
transpose
=
True
,
etype
=
"follows"
))
adj
=
g
.
adj
(
"follows"
)
assert
F
.
asnumpy
(
adj
.
indices
()).
tolist
()
==
[[
0
,
1
],
[
1
,
2
]]
assert
np
.
allclose
(
F
.
asnumpy
(
adj
.
val
),
np
.
array
([
1
,
1
]))
g
.
edata
[
"h"
]
=
{(
"user"
,
"plays"
,
"game"
):
F
.
tensor
([
1
,
2
,
3
,
4
])}
print
(
g
.
edata
[
"h"
])
adj
=
g
.
adj
(
"plays"
,
"h"
)
assert
F
.
asnumpy
(
adj
.
indices
()).
tolist
()
==
[[
0
,
1
,
2
,
1
],
[
0
,
0
,
1
,
1
]]
assert
np
.
allclose
(
F
.
asnumpy
(
adj
.
val
),
np
.
array
([
1
,
2
,
3
,
4
]))
@
parametrize_idtype
def
test_adj_external
(
idtype
):
g
=
create_test_heterograph
(
idtype
)
adj
=
F
.
sparse_to_numpy
(
g
.
adj_external
(
transpose
=
True
,
etype
=
"follows"
))
assert
np
.
allclose
(
assert
np
.
allclose
(
adj
,
np
.
array
([[
0.0
,
0.0
,
0.0
],
[
1.0
,
0.0
,
0.0
],
[
0.0
,
1.0
,
0.0
]])
adj
,
np
.
array
([[
0.0
,
0.0
,
0.0
],
[
1.0
,
0.0
,
0.0
],
[
0.0
,
1.0
,
0.0
]])
)
)
adj
=
F
.
sparse_to_numpy
(
g
.
adj
(
transpose
=
False
,
etype
=
"follows"
))
adj
=
F
.
sparse_to_numpy
(
g
.
adj
_external
(
transpose
=
False
,
etype
=
"follows"
))
assert
np
.
allclose
(
assert
np
.
allclose
(
adj
,
np
.
array
([[
0.0
,
1.0
,
0.0
],
[
0.0
,
0.0
,
1.0
],
[
0.0
,
0.0
,
0.0
]])
adj
,
np
.
array
([[
0.0
,
1.0
,
0.0
],
[
0.0
,
0.0
,
1.0
],
[
0.0
,
0.0
,
0.0
]])
)
)
adj
=
F
.
sparse_to_numpy
(
g
.
adj
(
transpose
=
True
,
etype
=
"plays"
))
adj
=
F
.
sparse_to_numpy
(
g
.
adj
_external
(
transpose
=
True
,
etype
=
"plays"
))
assert
np
.
allclose
(
adj
,
np
.
array
([[
1.0
,
1.0
,
0.0
],
[
0.0
,
1.0
,
1.0
]]))
assert
np
.
allclose
(
adj
,
np
.
array
([[
1.0
,
1.0
,
0.0
],
[
0.0
,
1.0
,
1.0
]]))
adj
=
F
.
sparse_to_numpy
(
g
.
adj
(
transpose
=
False
,
etype
=
"plays"
))
adj
=
F
.
sparse_to_numpy
(
g
.
adj
_external
(
transpose
=
False
,
etype
=
"plays"
))
assert
np
.
allclose
(
adj
,
np
.
array
([[
1.0
,
0.0
],
[
1.0
,
1.0
],
[
0.0
,
1.0
]]))
assert
np
.
allclose
(
adj
,
np
.
array
([[
1.0
,
0.0
],
[
1.0
,
1.0
],
[
0.0
,
1.0
]]))
adj
=
g
.
adj
(
transpose
=
True
,
scipy_fmt
=
"csr"
,
etype
=
"follows"
)
adj
=
g
.
adj
_external
(
transpose
=
True
,
scipy_fmt
=
"csr"
,
etype
=
"follows"
)
assert
np
.
allclose
(
assert
np
.
allclose
(
adj
.
todense
(),
adj
.
todense
(),
np
.
array
([[
0.0
,
0.0
,
0.0
],
[
1.0
,
0.0
,
0.0
],
[
0.0
,
1.0
,
0.0
]]),
np
.
array
([[
0.0
,
0.0
,
0.0
],
[
1.0
,
0.0
,
0.0
],
[
0.0
,
1.0
,
0.0
]]),
)
)
adj
=
g
.
adj
(
transpose
=
True
,
scipy_fmt
=
"coo"
,
etype
=
"follows"
)
adj
=
g
.
adj
_external
(
transpose
=
True
,
scipy_fmt
=
"coo"
,
etype
=
"follows"
)
assert
np
.
allclose
(
assert
np
.
allclose
(
adj
.
todense
(),
adj
.
todense
(),
np
.
array
([[
0.0
,
0.0
,
0.0
],
[
1.0
,
0.0
,
0.0
],
[
0.0
,
1.0
,
0.0
]]),
np
.
array
([[
0.0
,
0.0
,
0.0
],
[
1.0
,
0.0
,
0.0
],
[
0.0
,
1.0
,
0.0
]]),
)
)
adj
=
g
.
adj
(
transpose
=
True
,
scipy_fmt
=
"csr"
,
etype
=
"plays"
)
adj
=
g
.
adj
_external
(
transpose
=
True
,
scipy_fmt
=
"csr"
,
etype
=
"plays"
)
assert
np
.
allclose
(
assert
np
.
allclose
(
adj
.
todense
(),
np
.
array
([[
1.0
,
1.0
,
0.0
],
[
0.0
,
1.0
,
1.0
]])
adj
.
todense
(),
np
.
array
([[
1.0
,
1.0
,
0.0
],
[
0.0
,
1.0
,
1.0
]])
)
)
adj
=
g
.
adj
(
transpose
=
True
,
scipy_fmt
=
"coo"
,
etype
=
"plays"
)
adj
=
g
.
adj
_external
(
transpose
=
True
,
scipy_fmt
=
"coo"
,
etype
=
"plays"
)
assert
np
.
allclose
(
assert
np
.
allclose
(
adj
.
todense
(),
np
.
array
([[
1.0
,
1.0
,
0.0
],
[
0.0
,
1.0
,
1.0
]])
adj
.
todense
(),
np
.
array
([[
1.0
,
1.0
,
0.0
],
[
0.0
,
1.0
,
1.0
]])
)
)
adj
=
F
.
sparse_to_numpy
(
g
[
"follows"
].
adj
(
transpose
=
True
))
adj
=
F
.
sparse_to_numpy
(
g
[
"follows"
].
adj
_external
(
transpose
=
True
))
assert
np
.
allclose
(
assert
np
.
allclose
(
adj
,
np
.
array
([[
0.0
,
0.0
,
0.0
],
[
1.0
,
0.0
,
0.0
],
[
0.0
,
1.0
,
0.0
]])
adj
,
np
.
array
([[
0.0
,
0.0
,
0.0
],
[
1.0
,
0.0
,
0.0
],
[
0.0
,
1.0
,
0.0
]])
)
)
...
@@ -3535,7 +3551,7 @@ def test_create_block(idtype):
...
@@ -3535,7 +3551,7 @@ def test_create_block(idtype):
@
parametrize_idtype
@
parametrize_idtype
@
pytest
.
mark
.
parametrize
(
"fmt"
,
[
"coo"
,
"csr"
,
"csc"
])
@
pytest
.
mark
.
parametrize
(
"fmt"
,
[
"coo"
,
"csr"
,
"csc"
])
def
test_adj_
spa
rs
e
(
idtype
,
fmt
):
def
test_adj_
tenso
rs
(
idtype
,
fmt
):
if
fmt
==
"coo"
:
if
fmt
==
"coo"
:
A
=
ssp
.
random
(
10
,
10
,
0.2
).
tocoo
()
A
=
ssp
.
random
(
10
,
10
,
0.2
).
tocoo
()
A
.
data
=
np
.
arange
(
20
)
A
.
data
=
np
.
arange
(
20
)
...
@@ -3562,11 +3578,11 @@ def test_adj_sparse(idtype, fmt):
...
@@ -3562,11 +3578,11 @@ def test_adj_sparse(idtype, fmt):
A_coo
=
A
.
tocoo
()
A_coo
=
A
.
tocoo
()
A_csr
=
A
.
tocsr
()
A_csr
=
A
.
tocsr
()
A_csc
=
A
.
tocsc
()
A_csc
=
A
.
tocsc
()
row
,
col
=
g
.
adj_
spa
rs
e
(
"coo"
)
row
,
col
=
g
.
adj_
tenso
rs
(
"coo"
)
assert
np
.
array_equal
(
F
.
asnumpy
(
row
),
A_coo
.
row
)
assert
np
.
array_equal
(
F
.
asnumpy
(
row
),
A_coo
.
row
)
assert
np
.
array_equal
(
F
.
asnumpy
(
col
),
A_coo
.
col
)
assert
np
.
array_equal
(
F
.
asnumpy
(
col
),
A_coo
.
col
)
indptr
,
indices
,
eids
=
g
.
adj_
spa
rs
e
(
"csr"
)
indptr
,
indices
,
eids
=
g
.
adj_
tenso
rs
(
"csr"
)
assert
np
.
array_equal
(
F
.
asnumpy
(
indptr
),
A_csr
.
indptr
)
assert
np
.
array_equal
(
F
.
asnumpy
(
indptr
),
A_csr
.
indptr
)
if
fmt
==
"csr"
:
if
fmt
==
"csr"
:
assert
len
(
eids
)
==
0
assert
len
(
eids
)
==
0
...
@@ -3578,7 +3594,7 @@ def test_adj_sparse(idtype, fmt):
...
@@ -3578,7 +3594,7 @@ def test_adj_sparse(idtype, fmt):
indices_sorted_np
[
A_csr
.
data
]
=
A_csr
.
indices
indices_sorted_np
[
A_csr
.
data
]
=
A_csr
.
indices
assert
np
.
array_equal
(
F
.
asnumpy
(
indices_sorted
),
indices_sorted_np
)
assert
np
.
array_equal
(
F
.
asnumpy
(
indices_sorted
),
indices_sorted_np
)
indptr
,
indices
,
eids
=
g
.
adj_
spa
rs
e
(
"csc"
)
indptr
,
indices
,
eids
=
g
.
adj_
tenso
rs
(
"csc"
)
assert
np
.
array_equal
(
F
.
asnumpy
(
indptr
),
A_csc
.
indptr
)
assert
np
.
array_equal
(
F
.
asnumpy
(
indptr
),
A_csc
.
indptr
)
if
fmt
==
"csc"
:
if
fmt
==
"csc"
:
assert
len
(
eids
)
==
0
assert
len
(
eids
)
==
0
...
...
tests/python/common/test_traversal.py
View file @
f41934df
...
@@ -70,7 +70,7 @@ def test_topological_nodes(idtype, n=100):
...
@@ -70,7 +70,7 @@ def test_topological_nodes(idtype, n=100):
layers_dgl
=
dgl
.
topological_nodes_generator
(
g
)
layers_dgl
=
dgl
.
topological_nodes_generator
(
g
)
adjmat
=
g
.
adj
acency_matrix
(
transpose
=
True
)
adjmat
=
g
.
adj
_external
(
transpose
=
True
)
def
tensor_topo_traverse
():
def
tensor_topo_traverse
():
n
=
g
.
num_nodes
()
n
=
g
.
num_nodes
()
...
...
tests/python/common/transforms/test_functional-sort.py
View file @
f41934df
...
@@ -74,8 +74,8 @@ def test_sort_with_tag(idtype):
...
@@ -74,8 +74,8 @@ def test_sort_with_tag(idtype):
new_g
=
dgl
.
sort_csr_by_tag
(
new_g
=
dgl
.
sort_csr_by_tag
(
g
,
tag
if
tag_type
==
"node"
else
edge_tag_dst
,
tag_type
=
tag_type
g
,
tag
if
tag_type
==
"node"
else
edge_tag_dst
,
tag_type
=
tag_type
)
)
old_csr
=
g
.
adj
acency_matrix
(
scipy_fmt
=
"csr"
)
old_csr
=
g
.
adj
_external
(
scipy_fmt
=
"csr"
)
new_csr
=
new_g
.
adj
acency_matrix
(
scipy_fmt
=
"csr"
)
new_csr
=
new_g
.
adj
_external
(
scipy_fmt
=
"csr"
)
assert
check_sort
(
new_csr
,
tag
,
new_g
.
dstdata
[
"_TAG_OFFSET"
])
assert
check_sort
(
new_csr
,
tag
,
new_g
.
dstdata
[
"_TAG_OFFSET"
])
assert
not
check_sort
(
assert
not
check_sort
(
old_csr
,
tag
old_csr
,
tag
...
@@ -85,8 +85,8 @@ def test_sort_with_tag(idtype):
...
@@ -85,8 +85,8 @@ def test_sort_with_tag(idtype):
new_g
=
dgl
.
sort_csc_by_tag
(
new_g
=
dgl
.
sort_csc_by_tag
(
g
,
tag
if
tag_type
==
"node"
else
edge_tag_src
,
tag_type
=
tag_type
g
,
tag
if
tag_type
==
"node"
else
edge_tag_src
,
tag_type
=
tag_type
)
)
old_csc
=
g
.
adj
acency_matrix
(
transpose
=
True
,
scipy_fmt
=
"csr"
)
old_csc
=
g
.
adj
_external
(
transpose
=
True
,
scipy_fmt
=
"csr"
)
new_csc
=
new_g
.
adj
acency_matrix
(
transpose
=
True
,
scipy_fmt
=
"csr"
)
new_csc
=
new_g
.
adj
_external
(
transpose
=
True
,
scipy_fmt
=
"csr"
)
assert
check_sort
(
new_csc
,
tag
,
new_g
.
srcdata
[
"_TAG_OFFSET"
])
assert
check_sort
(
new_csc
,
tag
,
new_g
.
srcdata
[
"_TAG_OFFSET"
])
assert
not
check_sort
(
old_csc
,
tag
)
assert
not
check_sort
(
old_csc
,
tag
)
...
@@ -103,14 +103,14 @@ def test_sort_with_tag_bipartite(idtype):
...
@@ -103,14 +103,14 @@ def test_sort_with_tag_bipartite(idtype):
vtag
=
F
.
tensor
(
np
.
random
.
choice
(
num_tags
,
g
.
num_nodes
(
"_V"
)))
vtag
=
F
.
tensor
(
np
.
random
.
choice
(
num_tags
,
g
.
num_nodes
(
"_V"
)))
new_g
=
dgl
.
sort_csr_by_tag
(
g
,
vtag
)
new_g
=
dgl
.
sort_csr_by_tag
(
g
,
vtag
)
old_csr
=
g
.
adj
acency_matrix
(
scipy_fmt
=
"csr"
)
old_csr
=
g
.
adj
_external
(
scipy_fmt
=
"csr"
)
new_csr
=
new_g
.
adj
acency_matrix
(
scipy_fmt
=
"csr"
)
new_csr
=
new_g
.
adj
_external
(
scipy_fmt
=
"csr"
)
assert
check_sort
(
new_csr
,
vtag
,
new_g
.
nodes
[
"_U"
].
data
[
"_TAG_OFFSET"
])
assert
check_sort
(
new_csr
,
vtag
,
new_g
.
nodes
[
"_U"
].
data
[
"_TAG_OFFSET"
])
assert
not
check_sort
(
old_csr
,
vtag
)
assert
not
check_sort
(
old_csr
,
vtag
)
new_g
=
dgl
.
sort_csc_by_tag
(
g
,
utag
)
new_g
=
dgl
.
sort_csc_by_tag
(
g
,
utag
)
old_csc
=
g
.
adj
acency_matrix
(
transpose
=
True
,
scipy_fmt
=
"csr"
)
old_csc
=
g
.
adj
_external
(
transpose
=
True
,
scipy_fmt
=
"csr"
)
new_csc
=
new_g
.
adj
acency_matrix
(
transpose
=
True
,
scipy_fmt
=
"csr"
)
new_csc
=
new_g
.
adj
_external
(
transpose
=
True
,
scipy_fmt
=
"csr"
)
assert
check_sort
(
new_csc
,
utag
,
new_g
.
nodes
[
"_V"
].
data
[
"_TAG_OFFSET"
])
assert
check_sort
(
new_csc
,
utag
,
new_g
.
nodes
[
"_V"
].
data
[
"_TAG_OFFSET"
])
assert
not
check_sort
(
old_csc
,
utag
)
assert
not
check_sort
(
old_csc
,
utag
)
...
...
tests/python/common/transforms/test_transform.py
View file @
f41934df
...
@@ -3135,10 +3135,12 @@ def test_module_sign(g):
...
@@ -3135,10 +3135,12 @@ def test_module_sign(g):
ctx
=
F
.
ctx
()
ctx
=
F
.
ctx
()
g
=
g
.
to
(
ctx
)
g
=
g
.
to
(
ctx
)
adj
=
g
.
adj
(
transpose
=
True
,
scipy_fmt
=
"coo"
).
todense
()
adj
=
g
.
adj
_external
(
transpose
=
True
,
scipy_fmt
=
"coo"
).
todense
()
adj
=
torch
.
tensor
(
adj
).
float
().
to
(
ctx
)
adj
=
torch
.
tensor
(
adj
).
float
().
to
(
ctx
)
weight_adj
=
g
.
adj
(
transpose
=
True
,
scipy_fmt
=
"coo"
).
astype
(
float
).
todense
()
weight_adj
=
(
g
.
adj_external
(
transpose
=
True
,
scipy_fmt
=
"coo"
).
astype
(
float
).
todense
()
)
weight_adj
=
torch
.
tensor
(
weight_adj
).
float
().
to
(
ctx
)
weight_adj
=
torch
.
tensor
(
weight_adj
).
float
().
to
(
ctx
)
src
,
dst
=
g
.
edges
()
src
,
dst
=
g
.
edges
()
src
,
dst
=
src
.
long
(),
dst
.
long
()
src
,
dst
=
src
.
long
(),
dst
.
long
()
...
...
tests/python/mxnet/test_nn.py
View file @
f41934df
...
@@ -34,7 +34,7 @@ def test_graph_conv(idtype, out_dim):
...
@@ -34,7 +34,7 @@ def test_graph_conv(idtype, out_dim):
g
=
dgl
.
from_networkx
(
nx
.
path_graph
(
3
))
g
=
dgl
.
from_networkx
(
nx
.
path_graph
(
3
))
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
ctx
=
F
.
ctx
()
adj
=
g
.
adj
acency_matrix
(
transpose
=
True
,
ctx
=
ctx
)
adj
=
g
.
adj
_external
(
transpose
=
True
,
ctx
=
ctx
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
"none"
,
bias
=
True
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
"none"
,
bias
=
True
)
conv
.
initialize
(
ctx
=
ctx
)
conv
.
initialize
(
ctx
=
ctx
)
...
@@ -154,7 +154,7 @@ def _S2AXWb(A, N, X, W, b):
...
@@ -154,7 +154,7 @@ def _S2AXWb(A, N, X, W, b):
def
test_tagconv
(
out_dim
):
def
test_tagconv
(
out_dim
):
g
=
dgl
.
from_networkx
(
nx
.
path_graph
(
3
)).
to
(
F
.
ctx
())
g
=
dgl
.
from_networkx
(
nx
.
path_graph
(
3
)).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
ctx
=
F
.
ctx
()
adj
=
g
.
adj
acency_matrix
(
transpose
=
True
,
ctx
=
ctx
)
adj
=
g
.
adj
_external
(
transpose
=
True
,
ctx
=
ctx
)
norm
=
mx
.
nd
.
power
(
g
.
in_degrees
().
astype
(
"float32"
),
-
0.5
)
norm
=
mx
.
nd
.
power
(
g
.
in_degrees
().
astype
(
"float32"
),
-
0.5
)
conv
=
nn
.
TAGConv
(
5
,
out_dim
,
bias
=
True
)
conv
=
nn
.
TAGConv
(
5
,
out_dim
,
bias
=
True
)
...
@@ -361,7 +361,7 @@ def test_dense_cheb_conv(out_dim):
...
@@ -361,7 +361,7 @@ def test_dense_cheb_conv(out_dim):
for
k
in
range
(
1
,
4
):
for
k
in
range
(
1
,
4
):
ctx
=
F
.
ctx
()
ctx
=
F
.
ctx
()
g
=
dgl
.
from_scipy
(
sp
.
sparse
.
random
(
100
,
100
,
density
=
0.3
)).
to
(
F
.
ctx
())
g
=
dgl
.
from_scipy
(
sp
.
sparse
.
random
(
100
,
100
,
density
=
0.3
)).
to
(
F
.
ctx
())
adj
=
g
.
adj
acency_matrix
(
transpose
=
True
,
ctx
=
ctx
).
tostype
(
"default"
)
adj
=
g
.
adj
_external
(
transpose
=
True
,
ctx
=
ctx
).
tostype
(
"default"
)
cheb
=
nn
.
ChebConv
(
5
,
out_dim
,
k
)
cheb
=
nn
.
ChebConv
(
5
,
out_dim
,
k
)
dense_cheb
=
nn
.
DenseChebConv
(
5
,
out_dim
,
k
)
dense_cheb
=
nn
.
DenseChebConv
(
5
,
out_dim
,
k
)
cheb
.
initialize
(
ctx
=
ctx
)
cheb
.
initialize
(
ctx
=
ctx
)
...
@@ -387,7 +387,7 @@ def test_dense_cheb_conv(out_dim):
...
@@ -387,7 +387,7 @@ def test_dense_cheb_conv(out_dim):
def
test_dense_graph_conv
(
idtype
,
g
,
norm_type
,
out_dim
):
def
test_dense_graph_conv
(
idtype
,
g
,
norm_type
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
ctx
=
F
.
ctx
()
adj
=
g
.
adj
acency_matrix
(
transpose
=
True
,
ctx
=
ctx
).
tostype
(
"default"
)
adj
=
g
.
adj
_external
(
transpose
=
True
,
ctx
=
ctx
).
tostype
(
"default"
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
norm_type
,
bias
=
True
)
conv
=
nn
.
GraphConv
(
5
,
out_dim
,
norm
=
norm_type
,
bias
=
True
)
dense_conv
=
nn
.
DenseGraphConv
(
5
,
out_dim
,
norm
=
norm_type
,
bias
=
True
)
dense_conv
=
nn
.
DenseGraphConv
(
5
,
out_dim
,
norm
=
norm_type
,
bias
=
True
)
conv
.
initialize
(
ctx
=
ctx
)
conv
.
initialize
(
ctx
=
ctx
)
...
@@ -408,7 +408,7 @@ def test_dense_graph_conv(idtype, g, norm_type, out_dim):
...
@@ -408,7 +408,7 @@ def test_dense_graph_conv(idtype, g, norm_type, out_dim):
def
test_dense_sage_conv
(
idtype
,
g
,
out_dim
):
def
test_dense_sage_conv
(
idtype
,
g
,
out_dim
):
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
g
=
g
.
astype
(
idtype
).
to
(
F
.
ctx
())
ctx
=
F
.
ctx
()
ctx
=
F
.
ctx
()
adj
=
g
.
adj
acency_matrix
(
transpose
=
True
,
ctx
=
ctx
).
tostype
(
"default"
)
adj
=
g
.
adj
_external
(
transpose
=
True
,
ctx
=
ctx
).
tostype
(
"default"
)
sage
=
nn
.
SAGEConv
(
5
,
out_dim
,
"gcn"
)
sage
=
nn
.
SAGEConv
(
5
,
out_dim
,
"gcn"
)
dense_sage
=
nn
.
DenseSAGEConv
(
5
,
out_dim
)
dense_sage
=
nn
.
DenseSAGEConv
(
5
,
out_dim
)
sage
.
initialize
(
ctx
=
ctx
)
sage
.
initialize
(
ctx
=
ctx
)
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment