Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
e0aa8389
"docs/git@developer.sourcefind.cn:OpenDAS/torchaudio.git" did not exist on "301e2e9802d29352cbc6c4824ef44614cb6bd0cb"
Unverified
Commit
e0aa8389
authored
Aug 03, 2023
by
Andrei Ivanov
Committed by
GitHub
Aug 04, 2023
Browse files
Improving the CLUSTER_GAT example. (#6059)
parent
d20db1ec
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
29 additions
and
27 deletions
+29
-27
examples/pytorch/ogb/cluster-gat/main.py
examples/pytorch/ogb/cluster-gat/main.py
+29
-27
No files found.
examples/pytorch/ogb/cluster-gat/main.py
View file @
e0aa8389
...
@@ -112,17 +112,18 @@ class GAT(nn.Module):
...
@@ -112,17 +112,18 @@ class GAT(nn.Module):
num_workers
=
args
.
num_workers
,
num_workers
=
args
.
num_workers
,
)
)
for
input_nodes
,
output_nodes
,
blocks
in
tqdm
.
tqdm
(
dataloader
):
with
dataloader
.
enable_cpu_affinity
():
block
=
blocks
[
0
].
int
().
to
(
device
)
for
input_nodes
,
output_nodes
,
blocks
in
tqdm
.
tqdm
(
dataloader
):
h
=
x
[
input_nodes
].
to
(
device
)
block
=
blocks
[
0
].
int
().
to
(
device
)
if
l
<
self
.
n_layers
-
1
:
h
=
x
[
input_nodes
].
to
(
device
)
h
=
layer
(
block
,
h
).
flatten
(
1
)
if
l
<
self
.
n_layers
-
1
:
else
:
h
=
layer
(
block
,
h
).
flatten
(
1
)
h
=
layer
(
block
,
h
)
else
:
h
=
h
.
mean
(
1
)
h
=
layer
(
block
,
h
)
h
=
h
.
log_softmax
(
dim
=-
1
)
h
=
h
.
mean
(
1
)
h
=
h
.
log_softmax
(
dim
=-
1
)
y
[
output_nodes
]
=
h
.
cpu
()
y
[
output_nodes
]
=
h
.
cpu
()
x
=
y
x
=
y
return
y
return
y
...
@@ -279,7 +280,9 @@ def run(args, device, data, nfeat):
...
@@ -279,7 +280,9 @@ def run(args, device, data, nfeat):
best_eval_acc
,
best_test_acc
best_eval_acc
,
best_test_acc
)
)
)
)
print
(
"Avg epoch time: {}"
.
format
(
avg
/
(
epoch
-
4
)))
if
epoch
>=
5
:
print
(
"Avg epoch time: {}"
.
format
(
avg
/
(
epoch
-
4
)))
return
best_test_acc
.
to
(
th
.
device
(
"cpu"
))
return
best_test_acc
.
to
(
th
.
device
(
"cpu"
))
...
@@ -291,22 +294,22 @@ if __name__ == "__main__":
...
@@ -291,22 +294,22 @@ if __name__ == "__main__":
default
=
0
,
default
=
0
,
help
=
"GPU device ID. Use -1 for CPU training"
,
help
=
"GPU device ID. Use -1 for CPU training"
,
)
)
argparser
.
add_argument
(
"--num
-
epochs"
,
type
=
int
,
default
=
20
)
argparser
.
add_argument
(
"--num
_
epochs"
,
type
=
int
,
default
=
20
)
argparser
.
add_argument
(
"--num
-
hidden"
,
type
=
int
,
default
=
128
)
argparser
.
add_argument
(
"--num
_
hidden"
,
type
=
int
,
default
=
128
)
argparser
.
add_argument
(
"--num
-
layers"
,
type
=
int
,
default
=
3
)
argparser
.
add_argument
(
"--num
_
layers"
,
type
=
int
,
default
=
3
)
argparser
.
add_argument
(
"--num
-
heads"
,
type
=
int
,
default
=
8
)
argparser
.
add_argument
(
"--num
_
heads"
,
type
=
int
,
default
=
8
)
argparser
.
add_argument
(
"--batch
-
size"
,
type
=
int
,
default
=
32
)
argparser
.
add_argument
(
"--batch
_
size"
,
type
=
int
,
default
=
32
)
argparser
.
add_argument
(
"--val
-
batch
-
size"
,
type
=
int
,
default
=
2000
)
argparser
.
add_argument
(
"--val
_
batch
_
size"
,
type
=
int
,
default
=
2000
)
argparser
.
add_argument
(
"--log
-
every"
,
type
=
int
,
default
=
20
)
argparser
.
add_argument
(
"--log
_
every"
,
type
=
int
,
default
=
20
)
argparser
.
add_argument
(
"--eval
-
every"
,
type
=
int
,
default
=
1
)
argparser
.
add_argument
(
"--eval
_
every"
,
type
=
int
,
default
=
1
)
argparser
.
add_argument
(
"--lr"
,
type
=
float
,
default
=
0.001
)
argparser
.
add_argument
(
"--lr"
,
type
=
float
,
default
=
0.001
)
argparser
.
add_argument
(
"--dropout"
,
type
=
float
,
default
=
0.5
)
argparser
.
add_argument
(
"--dropout"
,
type
=
float
,
default
=
0.5
)
argparser
.
add_argument
(
"--save
-
pred"
,
type
=
str
,
default
=
""
)
argparser
.
add_argument
(
"--save
_
pred"
,
type
=
str
,
default
=
""
)
argparser
.
add_argument
(
"--wd"
,
type
=
float
,
default
=
0
)
argparser
.
add_argument
(
"--wd"
,
type
=
float
,
default
=
0
)
argparser
.
add_argument
(
"--num_partitions"
,
type
=
int
,
default
=
15000
)
argparser
.
add_argument
(
"--num_partitions"
,
type
=
int
,
default
=
15000
)
argparser
.
add_argument
(
"--num
-
workers"
,
type
=
int
,
default
=
0
)
argparser
.
add_argument
(
"--num
_
workers"
,
type
=
int
,
default
=
4
)
argparser
.
add_argument
(
argparser
.
add_argument
(
"--data
-
cpu"
,
"--data
_
cpu"
,
action
=
"store_true"
,
action
=
"store_true"
,
help
=
"By default the script puts all node features and labels "
help
=
"By default the script puts all node features and labels "
"on GPU when using it to save time for data copy. This may "
"on GPU when using it to save time for data copy. This may "
...
@@ -352,7 +355,7 @@ if __name__ == "__main__":
...
@@ -352,7 +355,7 @@ if __name__ == "__main__":
batch_size
=
args
.
batch_size
,
batch_size
=
args
.
batch_size
,
shuffle
=
True
,
shuffle
=
True
,
pin_memory
=
True
,
pin_memory
=
True
,
num_workers
=
4
,
num_workers
=
args
.
num_workers
,
collate_fn
=
partial
(
subgraph_collate_fn
,
graph
),
collate_fn
=
partial
(
subgraph_collate_fn
,
graph
),
)
)
...
@@ -375,6 +378,5 @@ if __name__ == "__main__":
...
@@ -375,6 +378,5 @@ if __name__ == "__main__":
nfeat
=
graph
.
ndata
.
pop
(
"feat"
).
to
(
device
)
nfeat
=
graph
.
ndata
.
pop
(
"feat"
).
to
(
device
)
for
i
in
range
(
10
):
for
i
in
range
(
10
):
test_accs
.
append
(
run
(
args
,
device
,
data
,
nfeat
))
test_accs
.
append
(
run
(
args
,
device
,
data
,
nfeat
))
print
(
"Average test accuracy:"
,
np
.
mean
(
test_accs
),
"±"
,
np
.
std
(
test_accs
)
print
(
"Average test accuracy:"
,
np
.
mean
(
test_accs
),
"±"
,
np
.
std
(
test_accs
))
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment