Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
Megatron-LM
Commits
3574b8e6
Commit
3574b8e6
authored
Dec 06, 2020
by
Deepak Narayanan
Browse files
Better memory tracking across pipeline-parallel ranks
parent
00ac56ab
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
5 additions
and
3 deletions
+5
-3
megatron/training.py
megatron/training.py
+3
-2
megatron/utils.py
megatron/utils.py
+2
-1
No files found.
megatron/training.py
View file @
3574b8e6
...
@@ -716,8 +716,9 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration,
...
@@ -716,8 +716,9 @@ def training_log(loss_dict, total_loss_dict, learning_rate, iteration,
total_loss_dict
[
skipped_iters_key
]
=
0
total_loss_dict
[
skipped_iters_key
]
=
0
total_loss_dict
[
got_nan_key
]
=
0
total_loss_dict
[
got_nan_key
]
=
0
print_rank_last
(
log_string
)
print_rank_last
(
log_string
)
if
report_memory_flag
:
if
report_memory_flag
and
learning_rate
>
0.
:
report_memory
(
'after {} iterations'
.
format
(
iteration
))
# Report memory after optimizer state has been initialized.
report_memory
(
'(after {} iterations)'
.
format
(
iteration
))
report_memory_flag
=
False
report_memory_flag
=
False
timers
.
log
(
timers_to_log
,
normalizer
=
args
.
log_interval
)
timers
.
log
(
timers_to_log
,
normalizer
=
args
.
log_interval
)
...
...
megatron/utils.py
View file @
3574b8e6
...
@@ -50,7 +50,8 @@ def report_memory(name):
...
@@ -50,7 +50,8 @@ def report_memory(name):
string
+=
' | reserved: {}'
.
format
(
torch
.
cuda
.
memory_reserved
()
/
mega_bytes
)
string
+=
' | reserved: {}'
.
format
(
torch
.
cuda
.
memory_reserved
()
/
mega_bytes
)
string
+=
' | max reserved: {}'
.
format
(
string
+=
' | max reserved: {}'
.
format
(
torch
.
cuda
.
max_memory_reserved
()
/
mega_bytes
)
torch
.
cuda
.
max_memory_reserved
()
/
mega_bytes
)
print_rank_0
(
string
)
if
mpu
.
get_data_parallel_rank
()
==
0
:
print
(
"[Rank {}] {}"
.
format
(
torch
.
distributed
.
get_rank
(),
string
),
flush
=
True
)
def
print_params_min_max_norm
(
optimizer
,
iteration
):
def
print_params_min_max_norm
(
optimizer
,
iteration
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment