Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
48c7ec44
Unverified
Commit
48c7ec44
authored
Dec 10, 2019
by
Da Zheng
Committed by
GitHub
Dec 10, 2019
Browse files
[Doc] Fix gcn tutorial (#1080)
* don't have activation. * add evaluation. * address comment.
parent
870857cf
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
30 additions
and
11 deletions
+30
-11
tutorials/models/1_gnn/1_gcn.py
tutorials/models/1_gnn/1_gcn.py
+30
-11
No files found.
tutorials/models/1_gnn/1_gcn.py
View file @
48c7ec44
...
@@ -58,6 +58,7 @@ class NodeApplyModule(nn.Module):
...
@@ -58,6 +58,7 @@ class NodeApplyModule(nn.Module):
def
forward
(
self
,
node
):
def
forward
(
self
,
node
):
h
=
self
.
linear
(
node
.
data
[
'h'
])
h
=
self
.
linear
(
node
.
data
[
'h'
])
if
self
.
activation
is
not
None
:
h
=
self
.
activation
(
h
)
h
=
self
.
activation
(
h
)
return
{
'h'
:
h
}
return
{
'h'
:
h
}
...
@@ -82,13 +83,14 @@ class GCN(nn.Module):
...
@@ -82,13 +83,14 @@ class GCN(nn.Module):
# model in PyTorch. We can initialize GCN like any ``nn.Module``. For example,
# model in PyTorch. We can initialize GCN like any ``nn.Module``. For example,
# let's define a simple neural network consisting of two GCN layers. Suppose we
# let's define a simple neural network consisting of two GCN layers. Suppose we
# are training the classifier for the cora dataset (the input feature size is
# are training the classifier for the cora dataset (the input feature size is
# 1433 and the number of classes is 7).
# 1433 and the number of classes is 7). The last GCN layer computes node embeddings,
# so the last layer in general doesn't apply activation.
class
Net
(
nn
.
Module
):
class
Net
(
nn
.
Module
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
super
(
Net
,
self
).
__init__
()
self
.
gcn1
=
GCN
(
1433
,
16
,
F
.
relu
)
self
.
gcn1
=
GCN
(
1433
,
16
,
F
.
relu
)
self
.
gcn2
=
GCN
(
16
,
7
,
F
.
relu
)
self
.
gcn2
=
GCN
(
16
,
7
,
None
)
def
forward
(
self
,
g
,
features
):
def
forward
(
self
,
g
,
features
):
x
=
self
.
gcn1
(
g
,
features
)
x
=
self
.
gcn1
(
g
,
features
)
...
@@ -106,29 +108,45 @@ def load_cora_data():
...
@@ -106,29 +108,45 @@ def load_cora_data():
data
=
citegrh
.
load_cora
()
data
=
citegrh
.
load_cora
()
features
=
th
.
FloatTensor
(
data
.
features
)
features
=
th
.
FloatTensor
(
data
.
features
)
labels
=
th
.
LongTensor
(
data
.
labels
)
labels
=
th
.
LongTensor
(
data
.
labels
)
mask
=
th
.
ByteTensor
(
data
.
train_mask
)
train_mask
=
th
.
ByteTensor
(
data
.
train_mask
)
test_mask
=
th
.
ByteTensor
(
data
.
test_mask
)
g
=
data
.
graph
g
=
data
.
graph
# add self loop
# add self loop
g
.
remove_edges_from
(
nx
.
selfloop_edges
(
g
))
g
.
remove_edges_from
(
nx
.
selfloop_edges
(
g
))
g
=
DGLGraph
(
g
)
g
=
DGLGraph
(
g
)
g
.
add_edges
(
g
.
nodes
(),
g
.
nodes
())
g
.
add_edges
(
g
.
nodes
(),
g
.
nodes
())
return
g
,
features
,
labels
,
mask
return
g
,
features
,
labels
,
train_mask
,
test_mask
###############################################################################
# When a model is trained, we can use the following method to evaluate
# the performance of the model on the test dataset:
def
evaluate
(
model
,
g
,
features
,
labels
,
mask
):
model
.
eval
()
with
th
.
no_grad
():
logits
=
model
(
g
,
features
)
logits
=
logits
[
mask
]
labels
=
labels
[
mask
]
_
,
indices
=
th
.
max
(
logits
,
dim
=
1
)
correct
=
th
.
sum
(
indices
==
labels
)
return
correct
.
item
()
*
1.0
/
len
(
labels
)
###############################################################################
###############################################################################
# We then train the network as follows:
# We then train the network as follows:
import
time
import
time
import
numpy
as
np
import
numpy
as
np
g
,
features
,
labels
,
mask
=
load_cora_data
()
g
,
features
,
labels
,
train_mask
,
test_
mask
=
load_cora_data
()
optimizer
=
th
.
optim
.
Adam
(
net
.
parameters
(),
lr
=
1e-3
)
optimizer
=
th
.
optim
.
Adam
(
net
.
parameters
(),
lr
=
1e-3
)
dur
=
[]
dur
=
[]
for
epoch
in
range
(
3
0
):
for
epoch
in
range
(
5
0
):
if
epoch
>=
3
:
if
epoch
>=
3
:
t0
=
time
.
time
()
t0
=
time
.
time
()
net
.
train
()
logits
=
net
(
g
,
features
)
logits
=
net
(
g
,
features
)
logp
=
F
.
log_softmax
(
logits
,
1
)
logp
=
F
.
log_softmax
(
logits
,
1
)
loss
=
F
.
nll_loss
(
logp
[
mask
],
labels
[
mask
])
loss
=
F
.
nll_loss
(
logp
[
train_
mask
],
labels
[
train_
mask
])
optimizer
.
zero_grad
()
optimizer
.
zero_grad
()
loss
.
backward
()
loss
.
backward
()
...
@@ -137,8 +155,9 @@ for epoch in range(30):
...
@@ -137,8 +155,9 @@ for epoch in range(30):
if
epoch
>=
3
:
if
epoch
>=
3
:
dur
.
append
(
time
.
time
()
-
t0
)
dur
.
append
(
time
.
time
()
-
t0
)
print
(
"Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f}"
.
format
(
acc
=
evaluate
(
net
,
g
,
features
,
labels
,
test_mask
)
epoch
,
loss
.
item
(),
np
.
mean
(
dur
)))
print
(
"Epoch {:05d} | Loss {:.4f} | Test Acc {:.4f} | Time(s) {:.4f}"
.
format
(
epoch
,
loss
.
item
(),
acc
,
np
.
mean
(
dur
)))
###############################################################################
###############################################################################
# .. _math:
# .. _math:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment