Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
704bcaf6
Unverified
Commit
704bcaf6
authored
Feb 19, 2023
by
Hongzhi (Steve), Chen
Committed by
GitHub
Feb 19, 2023
Browse files
examples (#5323)
Co-authored-by:
Ubuntu
<
ubuntu@ip-172-31-28-63.ap-northeast-1.compute.internal
>
parent
6bc82161
Changes
332
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
124 additions
and
111 deletions
+124
-111
examples/sparse/sgc.py
examples/sparse/sgc.py
+1
-0
examples/sparse/sign.py
examples/sparse/sign.py
+1
-0
examples/tensorflow/dgi/gcn.py
examples/tensorflow/dgi/gcn.py
+1
-1
examples/tensorflow/dgi/train.py
examples/tensorflow/dgi/train.py
+4
-4
examples/tensorflow/gat/gat.py
examples/tensorflow/gat/gat.py
+2
-3
examples/tensorflow/gat/train.py
examples/tensorflow/gat/train.py
+4
-5
examples/tensorflow/gcn/gcn.py
examples/tensorflow/gcn/gcn.py
+1
-1
examples/tensorflow/gcn/gcn_builtin.py
examples/tensorflow/gcn/gcn_builtin.py
+99
-85
examples/tensorflow/gcn/gcn_mp.py
examples/tensorflow/gcn/gcn_mp.py
+3
-3
examples/tensorflow/gcn/train.py
examples/tensorflow/gcn/train.py
+3
-3
examples/tensorflow/rgcn/entity_classify.py
examples/tensorflow/rgcn/entity_classify.py
+4
-4
examples/tensorflow/rgcn/utils.py
examples/tensorflow/rgcn/utils.py
+1
-2
No files found.
examples/sparse/sgc.py
View file @
704bcaf6
...
...
@@ -10,6 +10,7 @@ import torch.nn.functional as F
from
dgl.data
import
CoraGraphDataset
from
torch.optim
import
Adam
################################################################################
# (HIGHLIGHT) Take the advantage of DGL sparse APIs to implement the feature
# pre-computation.
...
...
examples/sparse/sign.py
View file @
704bcaf6
...
...
@@ -13,6 +13,7 @@ import torch.nn.functional as F
from
dgl.data
import
CoraGraphDataset
from
torch.optim
import
Adam
################################################################################
# (HIGHLIGHT) Take the advantage of DGL sparse APIs to implement the feature
# diffusion in SIGN laconically.
...
...
examples/tensorflow/dgi/gcn.py
View file @
704bcaf6
...
...
@@ -2,9 +2,9 @@
This code was copied from the GCN implementation in DGL examples.
"""
import
tensorflow
as
tf
from
tensorflow.keras
import
layers
from
dgl.nn.tensorflow
import
GraphConv
from
tensorflow.keras
import
layers
class
GCN
(
layers
.
Layer
):
...
...
examples/tensorflow/dgi/train.py
View file @
704bcaf6
import
argparse
import
time
import
dgl
import
networkx
as
nx
import
numpy
as
np
import
tensorflow
as
tf
from
dgi
import
DGI
,
Classifier
from
tensorflow.keras
import
layers
import
dgl
from
dgi
import
Classifier
,
DGI
from
dgl.data
import
(
CiteseerGraphDataset
,
CoraGraphDataset
,
PubmedGraphDataset
,
register_data_args
,
)
from
tensorflow.keras
import
layers
def
evaluate
(
model
,
features
,
labels
,
mask
):
...
...
examples/tensorflow/gat/gat.py
View file @
704bcaf6
...
...
@@ -7,11 +7,10 @@ Author's code: https://github.com/PetarV-/GAT
Pytorch implementation: https://github.com/Diego999/pyGAT
"""
import
tensorflow
as
tf
from
tensorflow.keras
import
layers
import
dgl.function
as
fn
import
tensorflow
as
tf
from
dgl.nn
import
GATConv
from
tensorflow.keras
import
layers
class
GAT
(
tf
.
keras
.
Model
):
...
...
examples/tensorflow/gat/train.py
View file @
704bcaf6
...
...
@@ -13,19 +13,19 @@ Pytorch implementation: https://github.com/Diego999/pyGAT
import
argparse
import
time
import
dgl
import
networkx
as
nx
import
numpy
as
np
import
tensorflow
as
tf
from
gat
import
GAT
from
utils
import
EarlyStopping
import
dgl
from
dgl.data
import
(
CiteseerGraphDataset
,
CoraGraphDataset
,
PubmedGraphDataset
,
register_data_args
,
)
from
gat
import
GAT
from
utils
import
EarlyStopping
def
accuracy
(
logits
,
labels
):
...
...
@@ -174,7 +174,6 @@ def main(args):
if
__name__
==
"__main__"
:
parser
=
argparse
.
ArgumentParser
(
description
=
"GAT"
)
register_data_args
(
parser
)
parser
.
add_argument
(
...
...
examples/tensorflow/gcn/gcn.py
View file @
704bcaf6
...
...
@@ -6,9 +6,9 @@ References:
- Code: https://github.com/tkipf/gcn
"""
import
tensorflow
as
tf
from
tensorflow.keras
import
layers
from
dgl.nn.tensorflow
import
GraphConv
from
tensorflow.keras
import
layers
class
GCN
(
tf
.
keras
.
Model
):
...
...
examples/tensorflow/gcn/gcn_builtin.py
View file @
704bcaf6
import
argparse
import
time
import
math
import
numpy
as
np
import
time
import
dgl
import
dgl.function
as
fn
import
networkx
as
nx
import
numpy
as
np
import
tensorflow
as
tf
import
dgl.function
as
fn
import
dgl
from
dgl.data
import
register_data_args
from
dgl.data
import
CoraGraphDataset
,
CiteseerGraphDataset
,
PubmedGraphDataset
from
dgl.data
import
(
CiteseerGraphDataset
,
CoraGraphDataset
,
PubmedGraphDataset
,
register_data_args
,
)
from
tensorflow.keras
import
layers
class
GCNLayer
(
layers
.
Layer
):
def
__init__
(
self
,
g
,
in_feats
,
out_feats
,
activation
,
dropout
,
bias
=
True
):
def
__init__
(
self
,
g
,
in_feats
,
out_feats
,
activation
,
dropout
,
bias
=
True
):
super
(
GCNLayer
,
self
).
__init__
()
self
.
g
=
g
w_init
=
tf
.
keras
.
initializers
.
VarianceScaling
(
scale
=
1.0
,
mode
=
"fan_out"
,
distribution
=
"uniform"
)
self
.
weight
=
tf
.
Variable
(
initial_value
=
w_init
(
shape
=
(
in_feats
,
out_feats
),
dtype
=
'float32'
),
trainable
=
True
)
scale
=
1.0
,
mode
=
"fan_out"
,
distribution
=
"uniform"
)
self
.
weight
=
tf
.
Variable
(
initial_value
=
w_init
(
shape
=
(
in_feats
,
out_feats
),
dtype
=
"float32"
),
trainable
=
True
,
)
if
dropout
:
self
.
dropout
=
layers
.
Dropout
(
rate
=
dropout
)
else
:
self
.
dropout
=
0.
self
.
dropout
=
0.
0
if
bias
:
b_init
=
tf
.
zeros_initializer
()
self
.
bias
=
tf
.
Variable
(
initial_value
=
b_init
(
shape
=
(
out_feats
,),
dtype
=
'float32'
),
trainable
=
True
)
self
.
bias
=
tf
.
Variable
(
initial_value
=
b_init
(
shape
=
(
out_feats
,),
dtype
=
"float32"
),
trainable
=
True
,
)
else
:
self
.
bias
=
None
self
.
activation
=
activation
...
...
@@ -43,11 +45,10 @@ class GCNLayer(layers.Layer):
def
call
(
self
,
h
):
if
self
.
dropout
:
h
=
self
.
dropout
(
h
)
self
.
g
.
ndata
[
'h'
]
=
tf
.
matmul
(
h
,
self
.
weight
)
self
.
g
.
ndata
[
'norm_h'
]
=
self
.
g
.
ndata
[
'h'
]
*
self
.
g
.
ndata
[
'norm'
]
self
.
g
.
update_all
(
fn
.
copy_u
(
'norm_h'
,
'm'
),
fn
.
sum
(
'm'
,
'h'
))
h
=
self
.
g
.
ndata
[
'h'
]
self
.
g
.
ndata
[
"h"
]
=
tf
.
matmul
(
h
,
self
.
weight
)
self
.
g
.
ndata
[
"norm_h"
]
=
self
.
g
.
ndata
[
"h"
]
*
self
.
g
.
ndata
[
"norm"
]
self
.
g
.
update_all
(
fn
.
copy_u
(
"norm_h"
,
"m"
),
fn
.
sum
(
"m"
,
"h"
))
h
=
self
.
g
.
ndata
[
"h"
]
if
self
.
bias
is
not
None
:
h
=
h
+
self
.
bias
if
self
.
activation
:
...
...
@@ -56,24 +57,19 @@ class GCNLayer(layers.Layer):
class
GCN
(
layers
.
Layer
):
def
__init__
(
self
,
g
,
in_feats
,
n_hidden
,
n_classes
,
n_layers
,
activation
,
dropout
):
def
__init__
(
self
,
g
,
in_feats
,
n_hidden
,
n_classes
,
n_layers
,
activation
,
dropout
):
super
(
GCN
,
self
).
__init__
()
self
.
layers
=
[]
# input layer
self
.
layers
.
append
(
GCNLayer
(
g
,
in_feats
,
n_hidden
,
activation
,
dropout
))
self
.
layers
.
append
(
GCNLayer
(
g
,
in_feats
,
n_hidden
,
activation
,
dropout
))
# hidden layers
for
i
in
range
(
n_layers
-
1
):
self
.
layers
.
append
(
GCNLayer
(
g
,
n_hidden
,
n_hidden
,
activation
,
dropout
))
GCNLayer
(
g
,
n_hidden
,
n_hidden
,
activation
,
dropout
)
)
# output layer
self
.
layers
.
append
(
GCNLayer
(
g
,
n_hidden
,
n_classes
,
None
,
dropout
))
...
...
@@ -95,14 +91,14 @@ def evaluate(model, features, labels, mask):
def
main
(
args
):
# load and preprocess dataset
if
args
.
dataset
==
'
cora
'
:
if
args
.
dataset
==
"
cora
"
:
data
=
CoraGraphDataset
()
elif
args
.
dataset
==
'
citeseer
'
:
elif
args
.
dataset
==
"
citeseer
"
:
data
=
CiteseerGraphDataset
()
elif
args
.
dataset
==
'
pubmed
'
:
elif
args
.
dataset
==
"
pubmed
"
:
data
=
PubmedGraphDataset
()
else
:
raise
ValueError
(
'
Unknown dataset: {}
'
.
format
(
args
.
dataset
))
raise
ValueError
(
"
Unknown dataset: {}
"
.
format
(
args
.
dataset
))
g
=
data
[
0
]
if
args
.
gpu
<
0
:
...
...
@@ -112,24 +108,29 @@ def main(args):
g
=
g
.
to
(
device
)
with
tf
.
device
(
device
):
features
=
g
.
ndata
[
'
feat
'
]
labels
=
g
.
ndata
[
'
label
'
]
train_mask
=
g
.
ndata
[
'
train_mask
'
]
val_mask
=
g
.
ndata
[
'
val_mask
'
]
test_mask
=
g
.
ndata
[
'
test_mask
'
]
features
=
g
.
ndata
[
"
feat
"
]
labels
=
g
.
ndata
[
"
label
"
]
train_mask
=
g
.
ndata
[
"
train_mask
"
]
val_mask
=
g
.
ndata
[
"
val_mask
"
]
test_mask
=
g
.
ndata
[
"
test_mask
"
]
in_feats
=
features
.
shape
[
1
]
n_classes
=
data
.
num_labels
n_edges
=
data
.
graph
.
number_of_edges
()
print
(
"""----Data statistics------'
print
(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
%
(
n_edges
,
n_classes
,
train_mask
.
numpy
().
sum
(),
val_mask
.
numpy
().
sum
(),
test_mask
.
numpy
().
sum
()))
#Test samples %d"""
%
(
n_edges
,
n_classes
,
train_mask
.
numpy
().
sum
(),
val_mask
.
numpy
().
sum
(),
test_mask
.
numpy
().
sum
(),
)
)
# add self loop
g
=
dgl
.
remove_self_loop
(
g
)
...
...
@@ -140,22 +141,24 @@ def main(args):
norm
=
tf
.
math
.
pow
(
degs
,
-
0.5
)
norm
=
tf
.
where
(
tf
.
math
.
is_inf
(
norm
),
tf
.
zeros_like
(
norm
),
norm
)
g
.
ndata
[
'
norm
'
]
=
tf
.
expand_dims
(
norm
,
-
1
)
g
.
ndata
[
"
norm
"
]
=
tf
.
expand_dims
(
norm
,
-
1
)
# create GCN model
model
=
GCN
(
g
,
in_feats
,
args
.
n_hidden
,
n_classes
,
args
.
n_layers
,
tf
.
nn
.
relu
,
args
.
dropout
)
optimizer
=
tf
.
keras
.
optimizers
.
Adam
(
learning_rate
=
args
.
lr
)
model
=
GCN
(
g
,
in_feats
,
args
.
n_hidden
,
n_classes
,
args
.
n_layers
,
tf
.
nn
.
relu
,
args
.
dropout
,
)
optimizer
=
tf
.
keras
.
optimizers
.
Adam
(
learning_rate
=
args
.
lr
)
loss_fcn
=
tf
.
keras
.
losses
.
SparseCategoricalCrossentropy
(
from_logits
=
True
)
from_logits
=
True
)
# initialize graph
dur
=
[]
for
epoch
in
range
(
args
.
n_epochs
):
...
...
@@ -170,8 +173,9 @@ def main(args):
# of Adam(W) optimizer with PyTorch. And this results in worse results.
# Manually adding weights to the loss to do weight decay solves this problem.
for
weight
in
model
.
trainable_weights
:
loss_value
=
loss_value
+
\
args
.
weight_decay
*
tf
.
nn
.
l2_loss
(
weight
)
loss_value
=
loss_value
+
args
.
weight_decay
*
tf
.
nn
.
l2_loss
(
weight
)
grads
=
tape
.
gradient
(
loss_value
,
model
.
trainable_weights
)
optimizer
.
apply_gradients
(
zip
(
grads
,
model
.
trainable_weights
))
...
...
@@ -180,31 +184,41 @@ def main(args):
dur
.
append
(
time
.
time
()
-
t0
)
acc
=
evaluate
(
model
,
features
,
labels
,
val_mask
)
print
(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}"
.
format
(
epoch
,
np
.
mean
(
dur
),
loss_value
.
numpy
().
item
(),
acc
,
n_edges
/
np
.
mean
(
dur
)
/
1000
))
print
(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}"
.
format
(
epoch
,
np
.
mean
(
dur
),
loss_value
.
numpy
().
item
(),
acc
,
n_edges
/
np
.
mean
(
dur
)
/
1000
,
)
)
acc
=
evaluate
(
model
,
features
,
labels
,
test_mask
)
print
(
"Test Accuracy {:.4f}"
.
format
(
acc
))
if
__name__
==
'
__main__
'
:
parser
=
argparse
.
ArgumentParser
(
description
=
'
GCN
'
)
if
__name__
==
"
__main__
"
:
parser
=
argparse
.
ArgumentParser
(
description
=
"
GCN
"
)
register_data_args
(
parser
)
parser
.
add_argument
(
"--dropout"
,
type
=
float
,
default
=
0.5
,
help
=
"dropout probability"
)
parser
.
add_argument
(
"--gpu"
,
type
=
int
,
default
=-
1
,
help
=
"gpu"
)
parser
.
add_argument
(
"--lr"
,
type
=
float
,
default
=
1e-2
,
help
=
"learning rate"
)
parser
.
add_argument
(
"--n-epochs"
,
type
=
int
,
default
=
200
,
help
=
"number of training epochs"
)
parser
.
add_argument
(
"--n-hidden"
,
type
=
int
,
default
=
16
,
help
=
"number of hidden gcn units"
)
parser
.
add_argument
(
"--n-layers"
,
type
=
int
,
default
=
1
,
help
=
"number of hidden gcn layers"
)
parser
.
add_argument
(
"--weight-decay"
,
type
=
float
,
default
=
5e-4
,
help
=
"Weight for L2 loss"
)
parser
.
add_argument
(
"--dropout"
,
type
=
float
,
default
=
0.5
,
help
=
"dropout probability"
)
parser
.
add_argument
(
"--gpu"
,
type
=
int
,
default
=-
1
,
help
=
"gpu"
)
parser
.
add_argument
(
"--lr"
,
type
=
float
,
default
=
1e-2
,
help
=
"learning rate"
)
parser
.
add_argument
(
"--n-epochs"
,
type
=
int
,
default
=
200
,
help
=
"number of training epochs"
)
parser
.
add_argument
(
"--n-hidden"
,
type
=
int
,
default
=
16
,
help
=
"number of hidden gcn units"
)
parser
.
add_argument
(
"--n-layers"
,
type
=
int
,
default
=
1
,
help
=
"number of hidden gcn layers"
)
parser
.
add_argument
(
"--weight-decay"
,
type
=
float
,
default
=
5e-4
,
help
=
"Weight for L2 loss"
)
args
=
parser
.
parse_args
()
print
(
args
)
...
...
examples/tensorflow/gcn/gcn_mp.py
View file @
704bcaf6
...
...
@@ -2,18 +2,18 @@ import argparse
import
math
import
time
import
dgl
import
networkx
as
nx
import
numpy
as
np
import
tensorflow
as
tf
from
tensorflow.keras
import
layers
import
dgl
from
dgl.data
import
(
CiteseerGraphDataset
,
CoraGraphDataset
,
PubmedGraphDataset
,
register_data_args
,
)
from
tensorflow.keras
import
layers
def
gcn_msg
(
edge
):
...
...
examples/tensorflow/gcn/train.py
View file @
704bcaf6
import
argparse
import
time
import
dgl
import
numpy
as
np
import
tensorflow
as
tf
from
gcn
import
GCN
import
dgl
from
dgl.data
import
CiteseerGraphDataset
,
CoraGraphDataset
,
PubmedGraphDataset
from
gcn
import
GCN
def
evaluate
(
model
,
features
,
labels
,
mask
):
...
...
examples/tensorflow/rgcn/entity_classify.py
View file @
704bcaf6
...
...
@@ -12,14 +12,14 @@ import argparse
import
time
from
functools
import
partial
import
dgl
import
numpy
as
np
import
tensorflow
as
tf
from
model
import
BaseRGCN
from
tensorflow.keras
import
layers
import
dgl
from
dgl.data.rdf
import
AIFBDataset
,
AMDataset
,
BGSDataset
,
MUTAGDataset
from
dgl.nn.tensorflow
import
RelGraphConv
from
model
import
BaseRGCN
from
tensorflow.keras
import
layers
class
EntityClassify
(
BaseRGCN
):
...
...
examples/tensorflow/rgcn/utils.py
View file @
704bcaf6
...
...
@@ -5,11 +5,10 @@ https://github.com/MichSchli/RelationPrediction
"""
import
dgl
import
numpy
as
np
import
tensorflow
as
tf
import
dgl
#######################################################################
#
# Utility function for building training and testing graphs
...
...
Prev
1
…
13
14
15
16
17
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment