Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dgl
Commits
d06c5403
Unverified
Commit
d06c5403
authored
Oct 29, 2020
by
maqy1995
Committed by
GitHub
Oct 29, 2020
Browse files
[Example] fix print info (#2305)
parent
38b3d0c4
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
8 additions
and
8 deletions
+8
-8
examples/pytorch/graphsage/experimental/train_dist.py
examples/pytorch/graphsage/experimental/train_dist.py
+1
-1
examples/pytorch/graphsage/train_sampling.py
examples/pytorch/graphsage/train_sampling.py
+1
-1
examples/pytorch/graphsage/train_sampling_multi_gpu.py
examples/pytorch/graphsage/train_sampling_multi_gpu.py
+1
-1
examples/pytorch/graphsage/train_sampling_unsupervised.py
examples/pytorch/graphsage/train_sampling_unsupervised.py
+1
-1
examples/pytorch/ogb/cluster-gat/main.py
examples/pytorch/ogb/cluster-gat/main.py
+1
-1
examples/pytorch/ogb/cluster-sage/main.py
examples/pytorch/ogb/cluster-sage/main.py
+1
-1
examples/pytorch/ogb/ogbn-products/gat/main.py
examples/pytorch/ogb/ogbn-products/gat/main.py
+1
-1
examples/pytorch/ogb/ogbn-products/graphsage/main.py
examples/pytorch/ogb/ogbn-products/graphsage/main.py
+1
-1
No files found.
examples/pytorch/graphsage/experimental/train_dist.py
View file @
d06c5403
...
...
@@ -237,7 +237,7 @@ def run(args, device, data):
if
step
%
args
.
log_every
==
0
:
acc
=
compute_acc
(
batch_pred
,
batch_labels
)
gpu_mem_alloc
=
th
.
cuda
.
max_memory_allocated
()
/
1000000
if
th
.
cuda
.
is_available
()
else
0
print
(
'Part {} | Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} M
i
B | time {:.3f} s'
.
format
(
print
(
'Part {} | Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MB | time {:.3f} s'
.
format
(
g
.
rank
(),
epoch
,
step
,
loss
.
item
(),
acc
.
item
(),
np
.
mean
(
iter_tput
[
3
:]),
gpu_mem_alloc
,
np
.
sum
(
step_time
[
-
args
.
log_every
:])))
start
=
time
.
time
()
...
...
examples/pytorch/graphsage/train_sampling.py
View file @
d06c5403
...
...
@@ -174,7 +174,7 @@ def run(args, device, data):
if
step
%
args
.
log_every
==
0
:
acc
=
compute_acc
(
batch_pred
,
batch_labels
)
gpu_mem_alloc
=
th
.
cuda
.
max_memory_allocated
()
/
1000000
if
th
.
cuda
.
is_available
()
else
0
print
(
'Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} M
i
B'
.
format
(
print
(
'Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MB'
.
format
(
epoch
,
step
,
loss
.
item
(),
acc
.
item
(),
np
.
mean
(
iter_tput
[
3
:]),
gpu_mem_alloc
))
tic_step
=
time
.
time
()
...
...
examples/pytorch/graphsage/train_sampling_multi_gpu.py
View file @
d06c5403
...
...
@@ -195,7 +195,7 @@ def run(proc_id, n_gpus, args, devices, data):
iter_tput
.
append
(
len
(
seeds
)
*
n_gpus
/
(
time
.
time
()
-
tic_step
))
if
step
%
args
.
log_every
==
0
and
proc_id
==
0
:
acc
=
compute_acc
(
batch_pred
,
batch_labels
)
print
(
'Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} M
i
B'
.
format
(
print
(
'Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MB'
.
format
(
epoch
,
step
,
loss
.
item
(),
acc
.
item
(),
np
.
mean
(
iter_tput
[
3
:]),
th
.
cuda
.
max_memory_allocated
()
/
1000000
))
if
n_gpus
>
1
:
...
...
examples/pytorch/graphsage/train_sampling_unsupervised.py
View file @
d06c5403
...
...
@@ -271,7 +271,7 @@ def run(proc_id, n_gpus, args, devices, data):
iter_t
.
append
(
t
-
d_step
)
if
step
%
args
.
log_every
==
0
:
gpu_mem_alloc
=
th
.
cuda
.
max_memory_allocated
()
/
1000000
if
th
.
cuda
.
is_available
()
else
0
print
(
'[{}]Epoch {:05d} | Step {:05d} | Loss {:.4f} | Speed (samples/sec) {:.4f}|{:.4f} | Load {:.4f}| train {:.4f} | GPU {:.1f} M
i
B'
.
format
(
print
(
'[{}]Epoch {:05d} | Step {:05d} | Loss {:.4f} | Speed (samples/sec) {:.4f}|{:.4f} | Load {:.4f}| train {:.4f} | GPU {:.1f} MB'
.
format
(
proc_id
,
epoch
,
step
,
loss
.
item
(),
np
.
mean
(
iter_pos
[
3
:]),
np
.
mean
(
iter_neg
[
3
:]),
np
.
mean
(
iter_d
[
3
:]),
np
.
mean
(
iter_t
[
3
:]),
gpu_mem_alloc
))
tic_step
=
time
.
time
()
...
...
examples/pytorch/ogb/cluster-gat/main.py
View file @
d06c5403
...
...
@@ -189,7 +189,7 @@ def run(args, device, data):
if
step
%
args
.
log_every
==
0
:
acc
=
compute_acc
(
batch_pred
,
batch_labels
)
gpu_mem_alloc
=
th
.
cuda
.
max_memory_allocated
()
/
1000000
if
th
.
cuda
.
is_available
()
else
0
print
(
'Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | GPU {:.1f} M
i
B'
.
format
(
print
(
'Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | GPU {:.1f} MB'
.
format
(
epoch
,
step
,
loss
.
item
(),
acc
.
item
(),
gpu_mem_alloc
))
tic_start
=
time
.
time
()
...
...
examples/pytorch/ogb/cluster-sage/main.py
View file @
d06c5403
...
...
@@ -158,7 +158,7 @@ def run(args, device, data):
if
step
%
args
.
log_every
==
0
:
acc
=
compute_acc
(
batch_pred
,
batch_labels
)
gpu_mem_alloc
=
th
.
cuda
.
max_memory_allocated
()
/
1000000
if
th
.
cuda
.
is_available
()
else
0
print
(
'Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | GPU {:.1f} M
i
B'
.
format
(
print
(
'Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | GPU {:.1f} MB'
.
format
(
epoch
,
step
,
loss
.
item
(),
acc
.
item
(),
gpu_mem_alloc
))
toc
=
time
.
time
()
...
...
examples/pytorch/ogb/ogbn-products/gat/main.py
View file @
d06c5403
...
...
@@ -184,7 +184,7 @@ def run(args, device, data):
if
step
%
args
.
log_every
==
0
:
acc
=
compute_acc
(
batch_pred
,
batch_labels
)
gpu_mem_alloc
=
th
.
cuda
.
max_memory_allocated
()
/
1000000
if
th
.
cuda
.
is_available
()
else
0
print
(
'Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} M
i
B'
.
format
(
print
(
'Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MB'
.
format
(
epoch
,
step
,
loss
.
item
(),
acc
.
item
(),
np
.
mean
(
iter_tput
[
3
:]),
gpu_mem_alloc
))
toc
=
time
.
time
()
...
...
examples/pytorch/ogb/ogbn-products/graphsage/main.py
View file @
d06c5403
...
...
@@ -180,7 +180,7 @@ def run(args, device, data):
if
step
%
args
.
log_every
==
0
:
acc
=
compute_acc
(
batch_pred
,
batch_labels
)
gpu_mem_alloc
=
th
.
cuda
.
max_memory_allocated
()
/
1000000
if
th
.
cuda
.
is_available
()
else
0
print
(
'Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} M
i
B'
.
format
(
print
(
'Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MB'
.
format
(
epoch
,
step
,
loss
.
item
(),
acc
.
item
(),
np
.
mean
(
iter_tput
[
3
:]),
gpu_mem_alloc
))
toc
=
time
.
time
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment