Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
pyg_autoscale
Commits
d0564e2e
Commit
d0564e2e
authored
Feb 04, 2021
by
rusty1s
Browse files
small benchmark script
parent
a4f271f7
Changes
16
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
745 additions
and
0 deletions
+745
-0
small_benchmark/README.md
small_benchmark/README.md
+4
-0
small_benchmark/conf/config.yaml
small_benchmark/conf/config.yaml
+5
-0
small_benchmark/conf/dataset/amazon-computers.yaml
small_benchmark/conf/dataset/amazon-computers.yaml
+2
-0
small_benchmark/conf/dataset/amazon-photo.yaml
small_benchmark/conf/dataset/amazon-photo.yaml
+2
-0
small_benchmark/conf/dataset/citeseer.yaml
small_benchmark/conf/dataset/citeseer.yaml
+3
-0
small_benchmark/conf/dataset/cluster.yaml
small_benchmark/conf/dataset/cluster.yaml
+2
-0
small_benchmark/conf/dataset/coauthor-cs.yaml
small_benchmark/conf/dataset/coauthor-cs.yaml
+2
-0
small_benchmark/conf/dataset/coauthor-physics.yaml
small_benchmark/conf/dataset/coauthor-physics.yaml
+2
-0
small_benchmark/conf/dataset/cora.yaml
small_benchmark/conf/dataset/cora.yaml
+2
-0
small_benchmark/conf/dataset/pubmed.yaml
small_benchmark/conf/dataset/pubmed.yaml
+2
-0
small_benchmark/conf/dataset/wikics.yaml
small_benchmark/conf/dataset/wikics.yaml
+3
-0
small_benchmark/conf/model/appnp.yaml
small_benchmark/conf/model/appnp.yaml
+133
-0
small_benchmark/conf/model/gat.yaml
small_benchmark/conf/model/gat.yaml
+113
-0
small_benchmark/conf/model/gcn.yaml
small_benchmark/conf/model/gcn.yaml
+125
-0
small_benchmark/conf/model/gcn2.yaml
small_benchmark/conf/model/gcn2.yaml
+215
-0
small_benchmark/main.py
small_benchmark/main.py
+130
-0
No files found.
small_benchmark/README.md
0 → 100644
View file @
d0564e2e
# Benchmark on Small-scale Graphs
```
python main.py
small_benchmark/conf/config.yaml
0 → 100644
View file @
d0564e2e
defaults
:
-
model
:
gcn
-
dataset
:
cora
device
:
0
root
:
'
/tmp/datasets'
small_benchmark/conf/dataset/amazon-computers.yaml
0 → 100644
View file @
d0564e2e
# @package _group_
name
:
AmazonComputers
small_benchmark/conf/dataset/amazon-photo.yaml
0 → 100644
View file @
d0564e2e
# @package _group_
name
:
AmazonPhoto
small_benchmark/conf/dataset/citeseer.yaml
0 → 100644
View file @
d0564e2e
# @package _group_
name
:
CiteSeer
small_benchmark/conf/dataset/cluster.yaml
0 → 100644
View file @
d0564e2e
# @package _group_
name
:
CLUSTER
small_benchmark/conf/dataset/coauthor-cs.yaml
0 → 100644
View file @
d0564e2e
# @package _group_
name
:
CoauthorCS
small_benchmark/conf/dataset/coauthor-physics.yaml
0 → 100644
View file @
d0564e2e
# @package _group_
name
:
CoauthorPhysics
small_benchmark/conf/dataset/cora.yaml
0 → 100644
View file @
d0564e2e
# @package _group_
name
:
Cora
small_benchmark/conf/dataset/pubmed.yaml
0 → 100644
View file @
d0564e2e
# @package _group_
name
:
PubMed
small_benchmark/conf/dataset/wikics.yaml
0 → 100644
View file @
d0564e2e
# @package _group_
name
:
WikiCS
small_benchmark/conf/model/appnp.yaml
0 → 100644
View file @
d0564e2e
# @package _group_
name
:
APPNP
norm
:
true
loop
:
false
params
:
Cora
:
architecture
:
num_layers
:
10
hidden_channels
:
64
alpha
:
0.1
dropout
:
0.5
num_parts
:
4
batch_size
:
2
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.005
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
600
runs
:
20
CiteSeer
:
architecture
:
num_layers
:
10
hidden_channels
:
64
alpha
:
0.1
dropout
:
0.5
num_parts
:
24
batch_size
:
6
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.005
nonreg_weight_decay
:
0
grad_norm
:
1.0
epochs
:
600
runs
:
20
PubMed
:
architecture
:
num_layers
:
10
hidden_channels
:
64
alpha
:
0.1
dropout
:
0.5
num_parts
:
4
batch_size
:
2
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.005
nonreg_weight_decay
:
0
grad_norm
:
1.0
epochs
:
600
runs
:
20
CoauthorCS
:
architecture
:
num_layers
:
10
hidden_channels
:
64
alpha
:
0.2
dropout
:
0.5
num_parts
:
32
batch_size
:
16
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.005
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
600
runs
:
20
CoauthorPhysics
:
architecture
:
num_layers
:
10
hidden_channels
:
64
alpha
:
0.2
dropout
:
0.5
num_parts
:
2
batch_size
:
1
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.005
nonreg_weight_decay
:
0
grad_norm
:
1.0
epochs
:
600
runs
:
20
AmazonComputers
:
architecture
:
num_layers
:
10
hidden_channels
:
64
alpha
:
0.2
dropout
:
0.5
num_parts
:
4
batch_size
:
2
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.005
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
600
runs
:
20
AmazonPhoto
:
architecture
:
num_layers
:
10
hidden_channels
:
64
alpha
:
0.2
dropout
:
0.5
num_parts
:
16
batch_size
:
8
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.005
nonreg_weight_decay
:
0
grad_norm
:
1.0
epochs
:
600
runs
:
20
WikiCS
:
architecture
:
num_layers
:
2
hidden_channels
:
64
alpha
:
0.11
dropout
:
0.4
num_parts
:
4
batch_size
:
2
num_workers
:
0
lr
:
0.02
reg_weight_decay
:
0.0005
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
600
runs
:
20
small_benchmark/conf/model/gat.yaml
0 → 100644
View file @
d0564e2e
# @package _group_
name
:
GAT
norm
:
false
loop
:
true
params
:
Cora
:
architecture
:
num_layers
:
2
hidden_channels
:
8
hidden_heads
:
8
out_heads
:
1
residual
:
false
dropout
:
0.6
num_parts
:
40
batch_size
:
10
num_workers
:
0
lr
:
0.005
reg_weight_decay
:
5e-4
nonreg_weight_decay
:
0
grad_norm
:
1.0
epochs
:
400
runs
:
20
CiteSeer
:
architecture
:
num_layers
:
2
hidden_channels
:
8
hidden_heads
:
8
out_heads
:
1
residual
:
false
dropout
:
0.6
num_parts
:
24
batch_size
:
8
num_workers
:
0
lr
:
0.005
reg_weight_decay
:
5e-4
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
400
runs
:
20
PubMed
:
architecture
:
num_layers
:
2
hidden_channels
:
8
hidden_heads
:
8
out_heads
:
8
residual
:
false
dropout
:
0.6
num_parts
:
4
batch_size
:
1
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.001
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
400
runs
:
20
CoauthorCS
:
architecture
:
num_layers
:
2
hidden_channels
:
8
hidden_heads
:
8
out_heads
:
1
residual
:
false
dropout
:
0.6
num_parts
:
8
batch_size
:
2
num_workers
:
0
lr
:
0.005
reg_weight_decay
:
0.01
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
400
runs
:
20
CoauthorPhysics
:
architecture
:
num_layers
:
2
hidden_channels
:
8
hidden_heads
:
8
out_heads
:
1
residual
:
false
dropout
:
0.6
num_parts
:
4
batch_size
:
1
num_workers
:
0
lr
:
0.005
reg_weight_decay
:
0.01
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
400
runs
:
20
WikiCS
:
architecture
:
num_layers
:
2
hidden_channels
:
14
hidden_heads
:
5
out_heads
:
1
residual
:
false
dropout
:
0.5
num_parts
:
2
batch_size
:
1
num_workers
:
0
lr
:
0.007
reg_weight_decay
:
5e-4
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
400
runs
:
20
small_benchmark/conf/model/gcn.yaml
0 → 100644
View file @
d0564e2e
# @package _group_
name
:
GCN
norm
:
true
loop
:
false
params
:
Cora
:
architecture
:
num_layers
:
2
hidden_channels
:
16
dropout
:
0.5
num_parts
:
40
batch_size
:
10
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
5e-4
nonreg_weight_decay
:
0
grad_norm
:
1.0
epochs
:
200
runs
:
20
CiteSeer
:
architecture
:
num_layers
:
2
hidden_channels
:
16
dropout
:
0.5
num_parts
:
24
batch_size
:
8
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
5e-4
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
200
runs
:
20
PubMed
:
architecture
:
num_layers
:
2
hidden_channels
:
16
dropout
:
0.5
num_parts
:
8
batch_size
:
4
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
5e-4
nonreg_weight_decay
:
0
grad_norm
:
1.0
epochs
:
200
runs
:
20
CoauthorCS
:
architecture
:
num_layers
:
2
hidden_channels
:
64
dropout
:
0.5
num_parts
:
2
batch_size
:
1
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.001
nonreg_weight_decay
:
0
grad_norm
:
1.0
epochs
:
200
runs
:
20
CoauthorPhysics
:
architecture
:
num_layers
:
2
hidden_channels
:
64
dropout
:
0.5
num_parts
:
4
batch_size
:
2
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.001
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
200
runs
:
20
AmazonComputers
:
architecture
:
num_layers
:
2
hidden_channels
:
64
dropout
:
0.5
num_parts
:
32
batch_size
:
16
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.001
nonreg_weight_decay
:
0
grad_norm
:
1.0
epochs
:
200
runs
:
20
AmazonPhoto
:
architecture
:
num_layers
:
2
hidden_channels
:
64
dropout
:
0.5
num_parts
:
32
batch_size
:
16
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.001
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
200
runs
:
20
WikiCS
:
architecture
:
num_layers
:
2
hidden_channels
:
33
dropout
:
0.25
num_parts
:
32
batch_size
:
16
num_workers
:
0
lr
:
0.02
reg_weight_decay
:
5e-4
nonreg_weight_decay
:
0
grad_norm
:
1.0
epochs
:
200
runs
:
20
small_benchmark/conf/model/gcn2.yaml
0 → 100644
View file @
d0564e2e
# @package _group_
name
:
GCN2
norm
:
true
loop
:
false
params
:
Cora
:
architecture
:
num_layers
:
64
hidden_channels
:
64
dropout
:
0.6
alpha
:
0.1
theta
:
0.5
num_parts
:
2
batch_size
:
1
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.01
nonreg_weight_decay
:
5e-4
grad_norm
:
null
epochs
:
1000
runs
:
10
CiteSeer
:
architecture
:
num_layers
:
32
hidden_channels
:
256
dropout
:
0.7
alpha
:
0.1
theta
:
0.6
num_parts
:
40
batch_size
:
20
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.01
nonreg_weight_decay
:
5e-4
grad_norm
:
1.0
epochs
:
1000
runs
:
10
PubMed
:
architecture
:
num_layers
:
16
hidden_channels
:
256
dropout
:
0.5
alpha
:
0.1
theta
:
0.4
num_parts
:
4
batch_size
:
2
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
5e-4
nonreg_weight_decay
:
5e-4
grad_norm
:
null
epochs
:
1000
runs
:
10
CoauthorCS
:
architecture
:
num_layers
:
16
hidden_channels
:
64
dropout
:
0.5
alpha
:
0.1
theta
:
0.5
num_parts
:
16
batch_size
:
8
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.01
nonreg_weight_decay
:
5e-4
grad_norm
:
null
epochs
:
1000
runs
:
10
CoauthorPhysics
:
architecture
:
num_layers
:
16
hidden_channels
:
64
dropout
:
0.5
alpha
:
0.1
theta
:
0.5
num_parts
:
2
batch_size
:
1
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.01
nonreg_weight_decay
:
5e-4
grad_norm
:
1.0
epochs
:
1000
runs
:
10
AmazonComputers
:
architecture
:
num_layers
:
16
hidden_channels
:
64
dropout
:
0.2
alpha
:
0.1
theta
:
0.5
num_parts
:
32
batch_size
:
16
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.01
nonreg_weight_decay
:
5e-4
grad_norm
:
1.0
epochs
:
300
runs
:
10
AmazonPhoto
:
architecture
:
num_layers
:
16
hidden_channels
:
64
dropout
:
0.5
alpha
:
0.1
theta
:
0.5
num_parts
:
4
batch_size
:
2
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0.01
nonreg_weight_decay
:
5e-4
grad_norm
:
1.0
epochs
:
1000
runs
:
10
WikiCS
:
architecture
:
num_layers
:
4
hidden_channels
:
128
dropout
:
0.5
alpha
:
0.1
theta
:
0.5
num_parts
:
4
batch_size
:
2
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
5e-4
nonreg_weight_decay
:
5e-4
grad_norm
:
null
epochs
:
1000
runs
:
10
Reddit
:
architecture
:
num_layers
:
4
hidden_channels
:
128
dropout
:
0.0
alpha
:
0.1
theta
:
0.5
num_parts
:
24
batch_size
:
12
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
1000
runs
:
1
Flickr
:
architecture
:
num_layers
:
4
hidden_channels
:
256
dropout
:
0.0
alpha
:
0.1
theta
:
0.5
residual
:
false
shared_weights
:
true
num_parts
:
24
batch_size
:
12
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
1000
runs
:
1
Yelp
:
architecture
:
num_layers
:
2
hidden_channels
:
256
dropout
:
0.0
alpha
:
0.1
theta
:
0.5
residual
:
false
shared_weights
:
true
num_parts
:
40
batch_size
:
10
num_workers
:
0
lr
:
0.01
reg_weight_decay
:
0
nonreg_weight_decay
:
0
grad_norm
:
null
epochs
:
1000
runs
:
1
PPI
:
architecture
:
num_layers
:
9
hidden_channels
:
2048
dropout
:
0.2
alpha
:
0.5
theta
:
1.0
residual
:
true
shared_weights
:
false
num_parts
:
12
batch_size
:
1
num_workers
:
0
lr
:
0.001
reg_weight_decay
:
0
nonreg_weight_decay
:
0
grad_norm
:
1.0
epochs
:
3000
runs
:
1
small_benchmark/main.py
0 → 100644
View file @
d0564e2e
import
hydra
from
tqdm
import
tqdm
from
omegaconf
import
OmegaConf
import
torch
from
torch_geometric.nn.conv.gcn_conv
import
gcn_norm
from
scaling_gnns
import
get_data
,
models
,
SubgraphLoader
,
compute_acc
torch
.
manual_seed
(
123
)
criterion
=
torch
.
nn
.
CrossEntropyLoss
()
def
train
(
run
,
data
,
model
,
loader
,
optimizer
,
grad_norm
=
None
):
model
.
train
()
train_mask
=
data
.
train_mask
train_mask
=
train_mask
[:,
run
]
if
train_mask
.
dim
()
==
2
else
train_mask
total_loss
=
total_examples
=
0
for
info
in
loader
:
info
=
info
.
to
(
model
.
device
)
batch_size
,
n_id
,
adj_t
,
e_id
=
info
y
=
data
.
y
[
n_id
[:
batch_size
]]
mask
=
train_mask
[
n_id
[:
batch_size
]]
if
mask
.
sum
()
==
0
:
continue
optimizer
.
zero_grad
()
out
=
model
(
data
.
x
[
n_id
],
adj_t
,
batch_size
,
n_id
)
loss
=
criterion
(
out
[
mask
],
y
[
mask
])
loss
.
backward
()
if
grad_norm
is
not
None
:
torch
.
nn
.
utils
.
clip_grad_norm_
(
model
.
parameters
(),
grad_norm
)
optimizer
.
step
()
total_loss
+=
float
(
loss
)
*
int
(
mask
.
sum
())
total_examples
+=
int
(
mask
.
sum
())
return
total_loss
/
total_examples
@
torch
.
no_grad
()
def
test
(
run
,
data
,
model
):
model
.
eval
()
val_mask
=
data
.
val_mask
val_mask
=
val_mask
[:,
run
]
if
val_mask
.
dim
()
==
2
else
val_mask
test_mask
=
data
.
test_mask
test_mask
=
test_mask
[:,
run
]
if
test_mask
.
dim
()
==
2
else
test_mask
out
=
model
(
data
.
x
,
data
.
adj_t
)
val_acc
=
compute_acc
(
out
,
data
.
y
,
val_mask
)
test_acc
=
compute_acc
(
out
,
data
.
y
,
test_mask
)
return
val_acc
,
test_acc
@
hydra
.
main
(
config_path
=
'conf'
,
config_name
=
'config'
)
def
main
(
conf
):
model_name
,
dataset_name
=
conf
.
model
.
name
,
conf
.
dataset
.
name
conf
.
model
.
params
=
conf
.
model
.
params
[
dataset_name
]
params
=
conf
.
model
.
params
print
(
OmegaConf
.
to_yaml
(
conf
))
if
isinstance
(
params
.
grad_norm
,
str
):
params
.
grad_norm
=
None
device
=
f
'cuda:
{
conf
.
device
}
'
if
torch
.
cuda
.
is_available
()
else
'cpu'
data
,
in_channels
,
out_channels
=
get_data
(
conf
.
root
,
dataset_name
)
if
conf
.
model
.
norm
:
data
.
adj_t
=
gcn_norm
(
data
.
adj_t
)
elif
conf
.
model
.
loop
:
data
.
adj_t
=
data
.
adj_t
.
set_diag
()
loader
=
SubgraphLoader
(
data
.
adj_t
,
batch_size
=
params
.
batch_size
,
use_metis
=
True
,
num_parts
=
params
.
num_parts
,
shuffle
=
True
,
num_workers
=
params
.
num_workers
,
path
=
f
'../../../metis/
{
model_name
.
lower
()
}
_
{
dataset_name
.
lower
()
}
'
,
log
=
False
,
)
data
=
data
.
to
(
device
)
GNN
=
getattr
(
models
,
model_name
)
model
=
GNN
(
num_nodes
=
data
.
num_nodes
,
in_channels
=
in_channels
,
out_channels
=
out_channels
,
device
=
device
,
**
params
.
architecture
,
).
to
(
device
)
results
=
torch
.
empty
(
params
.
runs
)
pbar
=
tqdm
(
total
=
params
.
runs
*
params
.
epochs
)
for
run
in
range
(
params
.
runs
):
model
.
reset_parameters
()
optimizer
=
torch
.
optim
.
Adam
([
dict
(
params
=
model
.
reg_modules
.
parameters
(),
weight_decay
=
params
.
reg_weight_decay
),
dict
(
params
=
model
.
nonreg_modules
.
parameters
(),
weight_decay
=
params
.
nonreg_weight_decay
)
],
lr
=
params
.
lr
)
with
torch
.
no_grad
():
# Fill history.
model
.
eval
()
model
(
data
.
x
,
data
.
adj_t
)
best_val_acc
=
0
for
epoch
in
range
(
params
.
epochs
):
train
(
run
,
data
,
model
,
loader
,
optimizer
,
params
.
grad_norm
)
val_acc
,
test_acc
=
test
(
run
,
data
,
model
)
if
val_acc
>
best_val_acc
:
best_val_acc
=
val_acc
results
[
run
]
=
test_acc
pbar
.
set_description
(
f
'Mini Acc:
{
100
*
results
[
run
]:.
2
f
}
'
)
pbar
.
update
(
1
)
pbar
.
close
()
print
(
f
'Mini Acc:
{
100
*
results
.
mean
():.
2
f
}
±
{
100
*
results
.
std
():.
2
f
}
'
)
if
__name__
==
"__main__"
:
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment