Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
pyg_autoscale
Commits
dcce414c
Commit
dcce414c
authored
Jun 10, 2021
by
rusty1s
Browse files
edge dropout
parent
fa10cf1b
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
16 additions
and
6 deletions
+16
-6
large_benchmark/conf/model/gcn2.yaml
large_benchmark/conf/model/gcn2.yaml
+1
-0
large_benchmark/conf/model/pna.yaml
large_benchmark/conf/model/pna.yaml
+1
-0
large_benchmark/main.py
large_benchmark/main.py
+14
-6
No files found.
large_benchmark/conf/model/gcn2.yaml
View file @
dcce414c
...
@@ -125,6 +125,7 @@ params:
...
@@ -125,6 +125,7 @@ params:
shared_weights
:
false
shared_weights
:
false
alpha
:
0.1
alpha
:
0.1
theta
:
0.5
theta
:
0.5
edge_dropout
:
0.8
num_parts
:
150
num_parts
:
150
batch_size
:
1
batch_size
:
1
max_steps
:
151
max_steps
:
151
...
...
large_benchmark/conf/model/pna.yaml
View file @
dcce414c
...
@@ -98,6 +98,7 @@ params:
...
@@ -98,6 +98,7 @@ params:
drop_input
:
false
drop_input
:
false
batch_norm
:
false
batch_norm
:
false
residual
:
false
residual
:
false
edge_dropout
:
0.8
num_parts
:
150
num_parts
:
150
batch_size
:
1
batch_size
:
1
max_steps
:
151
max_steps
:
151
...
...
large_benchmark/main.py
View file @
dcce414c
...
@@ -7,17 +7,18 @@ from torch_geometric.nn.conv.gcn_conv import gcn_norm
...
@@ -7,17 +7,18 @@ from torch_geometric.nn.conv.gcn_conv import gcn_norm
from
torch_geometric_autoscale
import
(
get_data
,
metis
,
permute
,
from
torch_geometric_autoscale
import
(
get_data
,
metis
,
permute
,
SubgraphLoader
,
EvalSubgraphLoader
,
SubgraphLoader
,
EvalSubgraphLoader
,
models
,
compute_micro_f1
)
models
,
compute_micro_f1
,
dropout
)
from
torch_geometric_autoscale.data
import
get_ppi
from
torch_geometric_autoscale.data
import
get_ppi
torch
.
manual_seed
(
123
)
torch
.
manual_seed
(
123
)
def
mini_train
(
model
,
loader
,
criterion
,
optimizer
,
max_steps
,
grad_norm
=
None
):
def
mini_train
(
model
,
loader
,
criterion
,
optimizer
,
max_steps
,
grad_norm
=
None
,
edge_dropout
=
0.0
):
model
.
train
()
model
.
train
()
total_loss
=
total_examples
=
0
total_loss
=
total_examples
=
0
for
i
,
(
batch
,
batch_size
,
n_id
,
offset
,
count
)
in
enumerate
(
loader
):
for
i
,
(
batch
,
batch_size
,
*
args
)
in
enumerate
(
loader
):
x
=
batch
.
x
.
to
(
model
.
device
)
x
=
batch
.
x
.
to
(
model
.
device
)
adj_t
=
batch
.
adj_t
.
to
(
model
.
device
)
adj_t
=
batch
.
adj_t
.
to
(
model
.
device
)
y
=
batch
.
y
[:
batch_size
].
to
(
model
.
device
)
y
=
batch
.
y
[:
batch_size
].
to
(
model
.
device
)
...
@@ -26,8 +27,11 @@ def mini_train(model, loader, criterion, optimizer, max_steps, grad_norm=None):
...
@@ -26,8 +27,11 @@ def mini_train(model, loader, criterion, optimizer, max_steps, grad_norm=None):
if
train_mask
.
sum
()
==
0
:
if
train_mask
.
sum
()
==
0
:
continue
continue
# We make use of edge dropout on ogbn-products to avoid overfitting.
adj_t
=
dropout
(
adj_t
,
p
=
edge_dropout
)
optimizer
.
zero_grad
()
optimizer
.
zero_grad
()
out
=
model
(
x
,
adj_t
,
batch_size
,
n_id
,
offset
,
count
)
out
=
model
(
x
,
adj_t
,
batch_size
,
*
args
)
loss
=
criterion
(
out
[
train_mask
],
y
[
train_mask
])
loss
=
criterion
(
out
[
train_mask
],
y
[
train_mask
])
loss
.
backward
()
loss
.
backward
()
if
grad_norm
is
not
None
:
if
grad_norm
is
not
None
:
...
@@ -37,7 +41,7 @@ def mini_train(model, loader, criterion, optimizer, max_steps, grad_norm=None):
...
@@ -37,7 +41,7 @@ def mini_train(model, loader, criterion, optimizer, max_steps, grad_norm=None):
total_loss
+=
float
(
loss
)
*
int
(
train_mask
.
sum
())
total_loss
+=
float
(
loss
)
*
int
(
train_mask
.
sum
())
total_examples
+=
int
(
train_mask
.
sum
())
total_examples
+=
int
(
train_mask
.
sum
())
# We abort after a fixed number of steps to refresh histories...
# We
may
abort after a fixed number of steps to refresh histories...
if
(
i
+
1
)
>=
max_steps
and
(
i
+
1
)
<
len
(
loader
):
if
(
i
+
1
)
>=
max_steps
and
(
i
+
1
)
<
len
(
loader
):
break
break
...
@@ -61,6 +65,10 @@ def main(conf):
...
@@ -61,6 +65,10 @@ def main(conf):
conf
.
model
.
params
=
conf
.
model
.
params
[
conf
.
dataset
.
name
]
conf
.
model
.
params
=
conf
.
model
.
params
[
conf
.
dataset
.
name
]
params
=
conf
.
model
.
params
params
=
conf
.
model
.
params
print
(
OmegaConf
.
to_yaml
(
conf
))
print
(
OmegaConf
.
to_yaml
(
conf
))
try
:
edge_dropout
=
params
.
edge_dropout
except
:
# noqa
edge_dropout
=
0.0
grad_norm
=
None
if
isinstance
(
params
.
grad_norm
,
str
)
else
params
.
grad_norm
grad_norm
=
None
if
isinstance
(
params
.
grad_norm
,
str
)
else
params
.
grad_norm
device
=
f
'cuda:
{
conf
.
device
}
'
if
torch
.
cuda
.
is_available
()
else
'cpu'
device
=
f
'cuda:
{
conf
.
device
}
'
if
torch
.
cuda
.
is_available
()
else
'cpu'
...
@@ -142,7 +150,7 @@ def main(conf):
...
@@ -142,7 +150,7 @@ def main(conf):
best_val_acc
=
test_acc
=
0
best_val_acc
=
test_acc
=
0
for
epoch
in
range
(
1
,
params
.
epochs
+
1
):
for
epoch
in
range
(
1
,
params
.
epochs
+
1
):
loss
=
mini_train
(
model
,
train_loader
,
criterion
,
optimizer
,
loss
=
mini_train
(
model
,
train_loader
,
criterion
,
optimizer
,
params
.
max_steps
,
grad_norm
)
params
.
max_steps
,
grad_norm
,
edge_dropout
)
out
=
mini_test
(
model
,
eval_loader
)
out
=
mini_test
(
model
,
eval_loader
)
train_acc
=
compute_micro_f1
(
out
,
data
.
y
,
data
.
train_mask
)
train_acc
=
compute_micro_f1
(
out
,
data
.
y
,
data
.
train_mask
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment