Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
FastMoE
Commits
6b5a34cc
Commit
6b5a34cc
authored
Mar 22, 2021
by
Jiezhong Qiu
Browse files
format with black
parent
cf9fd12a
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
183 additions
and
129 deletions
+183
-129
fmoe/megatron.py
fmoe/megatron.py
+183
-129
No files found.
fmoe/megatron.py
View file @
6b5a34cc
...
@@ -366,46 +366,58 @@ class DistributedDataParallel(DistributedGroupedDataParallel):
...
@@ -366,46 +366,58 @@ class DistributedDataParallel(DistributedGroupedDataParallel):
"""
"""
return
self
.
module
.
load_state_dict
(
*
args
,
**
kwargs
)
return
self
.
module
.
load_state_dict
(
*
args
,
**
kwargs
)
def
get_fmoe_checkpoint_name
(
checkpoints_path
,
iteration
,
release
=
False
,
data_parallel_rank
=-
1
):
def
get_fmoe_checkpoint_name
(
checkpoints_path
,
iteration
,
release
=
False
,
data_parallel_rank
=-
1
):
"""A unified checkpoint name, allowing specifying a data parallel rank"""
"""A unified checkpoint name, allowing specifying a data parallel rank"""
from
megatron
import
mpu
from
megatron
import
mpu
from
megatron.checkpointing
import
get_checkpoint_name
from
megatron.checkpointing
import
get_checkpoint_name
if
data_parallel_rank
==
-
1
:
if
data_parallel_rank
==
-
1
:
data_parallel_rank
=
mpu
.
get_data_parallel_rank
()
data_parallel_rank
=
mpu
.
get_data_parallel_rank
()
if
data_parallel_rank
==
0
:
if
data_parallel_rank
==
0
:
return
get_checkpoint_name
(
checkpoints_path
,
iteration
,
release
)
return
get_checkpoint_name
(
checkpoints_path
,
iteration
,
release
)
if
release
:
if
release
:
directory
=
'
release
'
directory
=
"
release
"
else
:
else
:
directory
=
'
iter_{:07d}
'
.
format
(
iteration
)
directory
=
"
iter_{:07d}
"
.
format
(
iteration
)
# Use both the tensor and pipeline MP rank.
# Use both the tensor and pipeline MP rank.
if
mpu
.
get_pipeline_model_parallel_world_size
()
==
1
:
if
mpu
.
get_pipeline_model_parallel_world_size
()
==
1
:
return
os
.
path
.
join
(
checkpoints_path
,
directory
,
return
os
.
path
.
join
(
'mp_rank_{:02d}_dp_rank_{:04d}'
.
format
(
checkpoints_path
,
mpu
.
get_tensor_model_parallel_rank
(),
directory
,
data_parallel_rank
"mp_rank_{:02d}_dp_rank_{:04d}"
.
format
(
),
mpu
.
get_tensor_model_parallel_rank
(),
data_parallel_rank
'model_optim_rng.pt'
)
),
return
os
.
path
.
join
(
checkpoints_path
,
directory
,
"model_optim_rng.pt"
,
'mp_rank_{:02d}_{:03d}_dp_rank_{:04d}'
.
format
(
)
mpu
.
get_tensor_model_parallel_rank
(),
return
os
.
path
.
join
(
mpu
.
get_pipeline_model_parallel_rank
(),
checkpoints_path
,
data_parallel_rank
directory
,
),
"mp_rank_{:02d}_{:03d}_dp_rank_{:04d}"
.
format
(
'model_optim_rng.pt'
)
mpu
.
get_tensor_model_parallel_rank
(),
mpu
.
get_pipeline_model_parallel_rank
(),
data_parallel_rank
,
),
"model_optim_rng.pt"
,
)
def
save_checkpoint
(
iteration
,
model
,
optimizer
,
lr_scheduler
):
def
save_checkpoint
(
iteration
,
model
,
optimizer
,
lr_scheduler
):
"""Save a model checkpoint with expert parallel """
"""Save a model checkpoint with expert parallel """
# TODO: update patch
# TODO: update patch
from
megatron
import
get_args
from
megatron
import
get_args
from
megatron
import
mpu
from
megatron
import
mpu
expert_dp_comm
=
'none'
from
megatron
import
print_rank_last
expert_dp_comm
=
"none"
if
mpu
.
get_data_parallel_rank
()
==
0
:
if
mpu
.
get_data_parallel_rank
()
==
0
:
# at dp rank 0, we still follows the native load_checkpoint by megatron
# at dp rank 0, we still follows the native load_checkpoint by megatron
from
megatron.checkpointing
import
save_checkpoint
as
save_checkpoint_native
from
megatron.checkpointing
import
save_checkpoint
as
save_checkpoint_native
save_checkpoint_native
(
iteration
,
model
,
optimizer
,
lr_scheduler
)
save_checkpoint_native
(
iteration
,
model
,
optimizer
,
lr_scheduler
)
return
return
...
@@ -415,16 +427,17 @@ def save_checkpoint(iteration, model, optimizer, lr_scheduler):
...
@@ -415,16 +427,17 @@ def save_checkpoint(iteration, model, optimizer, lr_scheduler):
if
isinstance
(
model
,
DistributedDataParallel
):
if
isinstance
(
model
,
DistributedDataParallel
):
model
=
model
.
module
model
=
model
.
module
if
torch
.
distributed
.
get_rank
()
==
0
:
print_rank_last
(
print
(
'
saving checkpoint at iteration {:7d} to {}
'
.
format
(
"
saving checkpoint at iteration {:7d} to {}
"
.
format
(
iteration
,
args
.
save
)
iteration
,
args
.
save
),
flush
=
True
)
)
# Arguments, iteration, and model.
# Arguments, iteration, and model.
state_dict
=
{}
state_dict
=
{}
state_dict
[
'model'
]
=
model
.
state_dict_for_save_checkpoint
(
state_dict
[
"model"
]
=
model
.
state_dict_for_save_checkpoint
(
keep_vars
=
(
mpu
.
get_data_parallel_rank
()
>
0
))
keep_vars
=
(
mpu
.
get_data_parallel_rank
()
>
0
)
)
def
extract_expert_param
(
state_dict
,
expert_dp_comm
=
'
none
'
):
def
extract_expert_param
(
state_dict
,
expert_dp_comm
=
"
none
"
):
state_dict_new
=
state_dict
.
__class__
()
state_dict_new
=
state_dict
.
__class__
()
for
k
,
v
in
state_dict
.
items
():
for
k
,
v
in
state_dict
.
items
():
# megatron uses both dict and OrderedDict in its state_dict
# megatron uses both dict and OrderedDict in its state_dict
...
@@ -432,72 +445,80 @@ def save_checkpoint(iteration, model, optimizer, lr_scheduler):
...
@@ -432,72 +445,80 @@ def save_checkpoint(iteration, model, optimizer, lr_scheduler):
v_new
=
extract_expert_param
(
v
,
expert_dp_comm
)
v_new
=
extract_expert_param
(
v
,
expert_dp_comm
)
if
len
(
v_new
)
>
0
:
if
len
(
v_new
)
>
0
:
state_dict_new
[
k
]
=
v_new
state_dict_new
[
k
]
=
v_new
elif
hasattr
(
v
,
'
dp_comm
'
)
and
v
.
dp_comm
==
expert_dp_comm
:
elif
hasattr
(
v
,
"
dp_comm
"
)
and
v
.
dp_comm
==
expert_dp_comm
:
state_dict_new
[
k
]
=
v
.
detach
()
state_dict_new
[
k
]
=
v
.
detach
()
return
state_dict_new
return
state_dict_new
state_dict
[
'model'
]
=
extract_expert_param
(
state_dict
[
"model"
]
=
extract_expert_param
(
state_dict
[
"model"
],
expert_dp_comm
)
state_dict
[
'model'
],
expert_dp_comm
)
# Optimizer stuff.
# Optimizer stuff.
if
not
args
.
no_save_optim
:
if
not
args
.
no_save_optim
:
if
optimizer
is
not
None
:
if
optimizer
is
not
None
:
state_dict
[
'
optimizer
'
]
=
optimizer
.
state_dict
()
state_dict
[
"
optimizer
"
]
=
optimizer
.
state_dict
()
param_global_idx
=
0
param_global_idx
=
0
for
param_group
in
optimizer
.
optimizer
.
param_groups
:
for
param_group
in
optimizer
.
optimizer
.
param_groups
:
for
param
in
param_group
[
'params'
]:
for
param
in
param_group
[
"params"
]:
if
not
(
hasattr
(
param
,
'dp_comm'
)
and
\
if
not
(
param
.
dp_comm
==
expert_dp_comm
):
hasattr
(
param
,
"dp_comm"
)
and
param
.
dp_comm
==
expert_dp_comm
):
# this parameter is not an expert parameter
# this parameter is not an expert parameter
# thus there is no need to save its state in current rank
# thus there is no need to save its state in current rank
# since it has been saved by data parallel rank 0
# since it has been saved by data parallel rank 0
if
args
.
fp16
:
if
args
.
fp16
:
# fp16 optimizer may have empty state due to overflow
# fp16 optimizer may have empty state due to overflow
state_dict
[
'optimizer'
][
'optimizer'
][
'state'
].
pop
(
state_dict
[
"optimizer"
][
"optimizer"
][
"state"
].
pop
(
param_global_idx
,
None
)
param_global_idx
,
None
)
else
:
else
:
state_dict
[
'optimizer'
][
'state'
].
pop
(
state_dict
[
"optimizer"
][
"state"
].
pop
(
param_global_idx
)
param_global_idx
)
param_global_idx
+=
1
param_global_idx
+=
1
if
args
.
fp16
:
if
args
.
fp16
:
state_dict
[
'
optimizer
'
][
'
optimizer
'
].
pop
(
'
param_groups
'
)
state_dict
[
"
optimizer
"
][
"
optimizer
"
].
pop
(
"
param_groups
"
)
# fp32_from_fp16_params in state_dict is not a copy
# fp32_from_fp16_params in state_dict is not a copy
# but a reference to optimizer.fp32_from_fp16_params,
# but a reference to optimizer.fp32_from_fp16_params,
# changing it in state_dict will change
# changing it in state_dict will change
# optimizer.fp32_from_fp16_params as well
# optimizer.fp32_from_fp16_params as well
# thus we create an empty fp32_from_fp16_params in state_dict
# thus we create an empty fp32_from_fp16_params in state_dict
# and only insert expert parameters.
# and only insert expert parameters.
fp32_from_fp16_params
=
\
fp32_from_fp16_params
=
state_dict
[
"optimizer"
][
"fp32_from_fp16_params"
]
state_dict
[
'optimizer'
][
'fp32_from_fp16_params'
]
state_dict
[
"optimizer"
][
"fp32_from_fp16_params"
]
=
[]
state_dict
[
'optimizer'
][
'fp32_from_fp16_params'
]
=
[]
for
param_group
in
fp32_from_fp16_params
:
for
param_group
in
fp32_from_fp16_params
:
param_group_copy
=
[]
param_group_copy
=
[]
for
param
in
param_group
:
for
param
in
param_group
:
param_copy
=
param
if
hasattr
(
param
,
'dp_comm'
)
\
param_copy
=
(
and
param
.
dp_comm
==
expert_dp_comm
else
None
param
if
hasattr
(
param
,
"dp_comm"
)
and
param
.
dp_comm
==
expert_dp_comm
else
None
)
param_group_copy
.
append
(
param_copy
)
param_group_copy
.
append
(
param_copy
)
state_dict
[
'optimizer'
][
'fp32_from_fp16_params'
].
append
(
state_dict
[
"optimizer"
][
"fp32_from_fp16_params"
].
append
(
param_group_copy
)
param_group_copy
)
else
:
else
:
state_dict
[
'
optimizer
'
].
pop
(
'
param_groups
'
)
state_dict
[
"
optimizer
"
].
pop
(
"
param_groups
"
)
# Save.
# Save.
checkpoint_name
=
get_fmoe_checkpoint_name
(
args
.
save
,
iteration
)
checkpoint_name
=
get_fmoe_checkpoint_name
(
args
.
save
,
iteration
)
from
megatron.checkpointing
import
ensure_directory_exists
from
megatron.checkpointing
import
ensure_directory_exists
from
megatron.checkpointing
import
get_checkpoint_tracker_filename
from
megatron.checkpointing
import
get_checkpoint_tracker_filename
ensure_directory_exists
(
checkpoint_name
)
ensure_directory_exists
(
checkpoint_name
)
torch
.
save
(
state_dict
,
checkpoint_name
)
torch
.
save
(
state_dict
,
checkpoint_name
)
# Wait so everyone is done (necessary)
# Wait so everyone is done (necessary)
torch
.
distributed
.
barrier
()
torch
.
distributed
.
barrier
()
if
torch
.
distributed
.
get_rank
()
==
0
:
if
torch
.
distributed
.
get_rank
()
==
0
:
print
(
' successfully saved checkpoint at iteration {:7d} to {}'
.
format
(
print
(
iteration
,
args
.
save
),
flush
=
True
)
" successfully saved checkpoint at iteration {:7d} to {}"
.
format
(
iteration
,
args
.
save
),
flush
=
True
,
)
# And update the latest iteration
# And update the latest iteration
if
torch
.
distributed
.
get_rank
()
==
0
:
if
torch
.
distributed
.
get_rank
()
==
0
:
tracker_filename
=
get_checkpoint_tracker_filename
(
args
.
save
)
tracker_filename
=
get_checkpoint_tracker_filename
(
args
.
save
)
with
open
(
tracker_filename
,
'w'
)
as
f
:
with
open
(
tracker_filename
,
"w"
)
as
f
:
f
.
write
(
str
(
iteration
))
f
.
write
(
str
(
iteration
))
# Wait so everyone is done (not necessary)
# Wait so everyone is done (not necessary)
torch
.
distributed
.
barrier
()
torch
.
distributed
.
barrier
()
...
@@ -507,6 +528,7 @@ def merge_state_dict(state_dict_rank0, state_dict_local, fp16):
...
@@ -507,6 +528,7 @@ def merge_state_dict(state_dict_rank0, state_dict_local, fp16):
"""merge two state dicts, one from data parallel rank 0,
"""merge two state dicts, one from data parallel rank 0,
another only contains expert states"""
another only contains expert states"""
from
megatron
import
print_rank_last
from
megatron
import
print_rank_last
def
merge_model
(
state_dict_rank0
,
state_dict_local
):
def
merge_model
(
state_dict_rank0
,
state_dict_local
):
for
k
,
v
in
state_dict_local
.
items
():
for
k
,
v
in
state_dict_local
.
items
():
# megatron uses both dict and OrderedDict in its state_dict
# megatron uses both dict and OrderedDict in its state_dict
...
@@ -514,37 +536,43 @@ def merge_state_dict(state_dict_rank0, state_dict_local, fp16):
...
@@ -514,37 +536,43 @@ def merge_state_dict(state_dict_rank0, state_dict_local, fp16):
print_rank_last
(
"[merge model] go recursively to {}"
.
format
(
k
))
print_rank_last
(
"[merge model] go recursively to {}"
.
format
(
k
))
merge_model
(
state_dict_rank0
[
k
],
v
)
merge_model
(
state_dict_rank0
[
k
],
v
)
else
:
else
:
before
=
state_dict_rank0
[
k
].
sum
().
item
()
state_dict_rank0
[
k
]
=
v
state_dict_rank0
[
k
]
=
v
after
=
state_dict_rank0
[
k
].
sum
().
item
()
print_rank_last
(
"[merge model] copy parameter {},
\
merge_model
(
state_dict_rank0
[
"model"
],
state_dict_local
[
"model"
])
before.sum={:7f}, after.sum={:7f}"
.
format
(
k
,
before
,
after
))
merge_model
(
state_dict_rank0
[
'model'
],
state_dict_local
[
'model'
])
optimizer_rank0
=
(
state_dict_rank0
[
"optimizer"
][
"optimizer"
]
optimizer_rank0
=
state_dict_rank0
[
'optimizer'
][
'optimizer'
]
\
if
fp16
if
fp16
else
state_dict_rank0
[
'optimizer'
]
else
state_dict_rank0
[
"optimizer"
]
optimizer_local
=
state_dict_local
[
'optimizer'
][
'optimizer'
]
\
)
if
fp16
else
state_dict_local
[
'optimizer'
]
optimizer_local
=
(
state_dict_local
[
"optimizer"
][
"optimizer"
]
for
k
,
v
in
optimizer_local
[
'state'
].
items
():
if
fp16
before
=
{
kk
:
vv
.
sum
().
item
()
\
else
state_dict_local
[
"optimizer"
]
for
kk
,
vv
in
optimizer_rank0
[
'state'
][
k
].
items
()}
)
optimizer_rank0
[
'state'
][
k
]
=
v
after
=
{
kk
:
vv
.
sum
().
item
()
\
for
k
,
v
in
optimizer_local
[
"state"
].
items
():
for
kk
,
vv
in
optimizer_rank0
[
'state'
][
k
].
items
()}
optimizer_rank0
[
"state"
][
k
]
=
v
print_rank_last
(
"[merge optimizer] copy {},
\
before.sum={}, after.sum={}"
.
format
(
k
,
str
(
before
),
str
(
after
)))
if
fp16
:
if
fp16
:
for
group_idx
,
param_group
in
enumerate
(
state_dict_local
[
'optimizer'
][
'fp32_from_fp16_params'
]):
for
group_idx
,
param_group
in
enumerate
(
state_dict_local
[
"optimizer"
][
"fp32_from_fp16_params"
]
):
for
param_in_group_idx
,
param
in
enumerate
(
param_group
):
for
param_in_group_idx
,
param
in
enumerate
(
param_group
):
if
param
is
not
None
:
if
param
is
not
None
:
state_dict_rank0
[
'optimizer'
][
'fp32_from_fp16_params'
][
group_idx
][
param_in_group_idx
]
=
param
state_dict_rank0
[
"optimizer"
][
"fp32_from_fp16_params"
][
group_idx
][
print_rank_last
(
"[merge fp32_from_fp16_params] copy parameter ({:d}, {:d})"
.
format
(
group_idx
,
param_in_group_idx
))
param_in_group_idx
]
=
param
print_rank_last
(
"[merge fp32_from_fp16_params] copy parameter ({:d}, {:d})"
.
format
(
group_idx
,
param_in_group_idx
)
)
return
state_dict_rank0
return
state_dict_rank0
def
load_checkpoint
(
model
,
optimizer
,
lr_scheduler
,
load_arg
=
'load'
):
def
load_checkpoint
(
model
,
optimizer
,
lr_scheduler
,
load_arg
=
"load"
):
"""Load a model checkpoint and return the iteration."""
"""Load a model checkpoint and return the iteration."""
from
megatron
import
get_args
from
megatron
import
get_args
...
@@ -554,9 +582,11 @@ def load_checkpoint(model, optimizer, lr_scheduler, load_arg='load'):
...
@@ -554,9 +582,11 @@ def load_checkpoint(model, optimizer, lr_scheduler, load_arg='load'):
from
megatron.checkpointing
import
set_checkpoint_version
from
megatron.checkpointing
import
set_checkpoint_version
from
megatron.checkpointing
import
check_checkpoint_args
from
megatron.checkpointing
import
check_checkpoint_args
from
megatron.checkpointing
import
update_num_microbatches
from
megatron.checkpointing
import
update_num_microbatches
if
mpu
.
get_data_parallel_rank
()
==
0
:
if
mpu
.
get_data_parallel_rank
()
==
0
:
# at dp rank 0, we still follow the native load_checkpoint by megatron
# at dp rank 0, we still follow the native load_checkpoint by megatron
from
megatron.checkpointing
import
load_checkpoint
as
load_checkpoint_native
from
megatron.checkpointing
import
load_checkpoint
as
load_checkpoint_native
return
load_checkpoint_native
(
model
,
optimizer
,
lr_scheduler
,
load_arg
)
return
load_checkpoint_native
(
model
,
optimizer
,
lr_scheduler
,
load_arg
)
args
=
get_args
()
args
=
get_args
()
...
@@ -569,130 +599,154 @@ def load_checkpoint(model, optimizer, lr_scheduler, load_arg='load'):
...
@@ -569,130 +599,154 @@ def load_checkpoint(model, optimizer, lr_scheduler, load_arg='load'):
# If no tracker file, return iretation zero.
# If no tracker file, return iretation zero.
if
not
os
.
path
.
isfile
(
tracker_filename
):
if
not
os
.
path
.
isfile
(
tracker_filename
):
print_rank_last
(
'WARNING: could not find the metadata file {} '
.
format
(
print_rank_last
(
tracker_filename
))
"WARNING: could not find the metadata file {} "
.
format
(
tracker_filename
)
print_rank_last
(
' will not load any checkpoints and will start from '
)
'random'
)
print_rank_last
(
" will not load any checkpoints and will start from "
"random"
)
return
0
return
0
# Otherwise, read the tracker file and either set the iteration or
# Otherwise, read the tracker file and either set the iteration or
# mark it as a release checkpoint.
# mark it as a release checkpoint.
iteration
=
0
iteration
=
0
release
=
False
release
=
False
with
open
(
tracker_filename
,
'r'
)
as
f
:
with
open
(
tracker_filename
,
"r"
)
as
f
:
metastring
=
f
.
read
().
strip
()
metastring
=
f
.
read
().
strip
()
try
:
try
:
iteration
=
int
(
metastring
)
iteration
=
int
(
metastring
)
except
ValueError
:
except
ValueError
:
release
=
metastring
==
'
release
'
release
=
metastring
==
"
release
"
if
not
release
:
if
not
release
:
print_rank_last
(
'ERROR: Invalid metadata file {}. Exiting'
.
format
(
print_rank_last
(
tracker_filename
))
"ERROR: Invalid metadata file {}. Exiting"
.
format
(
tracker_filename
)
)
sys
.
exit
()
sys
.
exit
()
assert
iteration
>
0
or
release
,
'error parsing metadata file {}'
.
format
(
assert
iteration
>
0
or
release
,
"error parsing metadata file {}"
.
format
(
tracker_filename
)
tracker_filename
)
# Checkpoint.
# Checkpoint.
checkpoint_name_rank0
=
get_fmoe_checkpoint_name
(
checkpoint_name_rank0
=
get_fmoe_checkpoint_name
(
load_dir
,
iteration
,
release
,
0
)
load_dir
,
iteration
,
release
,
0
)
checkpoint_name_local
=
get_fmoe_checkpoint_name
(
checkpoint_name_local
=
get_fmoe_checkpoint_name
(
load_dir
,
iteration
,
release
,
mpu
.
get_data_parallel_rank
())
load_dir
,
iteration
,
release
,
mpu
.
get_data_parallel_rank
()
print_rank_last
(
' loading checkpoint at rank 0 from {} and rank {} from {} at iteration {}, will merge them later'
.
format
(
)
checkpoint_name_rank0
,
mpu
.
get_data_parallel_rank
(),
print_rank_last
(
checkpoint_name_local
,
iteration
))
" loading checkpoint at rank 0 from {} and rank {} from {} at iteration {}, will merge them later"
.
format
(
checkpoint_name_rank0
,
mpu
.
get_data_parallel_rank
(),
checkpoint_name_local
,
iteration
,
)
)
# Load the checkpoint.
# Load the checkpoint.
def
load_state_dict
(
checkpoint_name
):
def
load_state_dict
(
checkpoint_name
):
try
:
try
:
state_dict
=
torch
.
load
(
checkpoint_name
,
map_location
=
'
cpu
'
)
state_dict
=
torch
.
load
(
checkpoint_name
,
map_location
=
"
cpu
"
)
except
ModuleNotFoundError
:
except
ModuleNotFoundError
:
from
megatron.fp16_deprecated
import
loss_scaler
from
megatron.fp16_deprecated
import
loss_scaler
# For backward compatibility.
# For backward compatibility.
print_rank_last
(
' > deserializing using the old code structure ...'
)
print_rank_last
(
" > deserializing using the old code structure ..."
)
sys
.
modules
[
'fp16.loss_scaler'
]
=
sys
.
modules
[
sys
.
modules
[
"fp16.loss_scaler"
]
=
sys
.
modules
[
'megatron.fp16_deprecated.loss_scaler'
]
"megatron.fp16_deprecated.loss_scaler"
sys
.
modules
[
'megatron.fp16.loss_scaler'
]
=
sys
.
modules
[
]
'megatron.fp16_deprecated.loss_scaler'
]
sys
.
modules
[
"megatron.fp16.loss_scaler"
]
=
sys
.
modules
[
state_dict
=
torch
.
load
(
checkpoint_name
,
map_location
=
'cpu'
)
"megatron.fp16_deprecated.loss_scaler"
sys
.
modules
.
pop
(
'fp16.loss_scaler'
,
None
)
]
sys
.
modules
.
pop
(
'megatron.fp16.loss_scaler'
,
None
)
state_dict
=
torch
.
load
(
checkpoint_name
,
map_location
=
"cpu"
)
sys
.
modules
.
pop
(
"fp16.loss_scaler"
,
None
)
sys
.
modules
.
pop
(
"megatron.fp16.loss_scaler"
,
None
)
except
BaseException
:
except
BaseException
:
print_rank_last
(
'
could not load the checkpoint
'
)
print_rank_last
(
"
could not load the checkpoint
"
)
sys
.
exit
()
sys
.
exit
()
return
state_dict
return
state_dict
state_dict_rank0
=
load_state_dict
(
checkpoint_name_rank0
)
state_dict_rank0
=
load_state_dict
(
checkpoint_name_rank0
)
state_dict_local
=
load_state_dict
(
checkpoint_name_local
)
state_dict_local
=
load_state_dict
(
checkpoint_name_local
)
state_dict
=
merge_state_dict
(
state_dict_rank0
,
state_dict_local
,
args
.
fp16
)
state_dict
=
merge_state_dict
(
state_dict_rank0
,
state_dict_local
,
args
.
fp16
)
# set checkpoint version
# set checkpoint version
set_checkpoint_version
(
state_dict
.
get
(
'
checkpoint_version
'
,
0
))
set_checkpoint_version
(
state_dict
.
get
(
"
checkpoint_version
"
,
0
))
# Set iteration.
# Set iteration.
if
args
.
finetune
or
release
:
if
args
.
finetune
or
release
:
iteration
=
0
iteration
=
0
else
:
else
:
try
:
try
:
iteration
=
state_dict
[
'
iteration
'
]
iteration
=
state_dict
[
"
iteration
"
]
except
KeyError
:
except
KeyError
:
try
:
# Backward compatible with older checkpoints
try
:
# Backward compatible with older checkpoints
iteration
=
state_dict
[
'
total_iters
'
]
iteration
=
state_dict
[
"
total_iters
"
]
except
KeyError
:
except
KeyError
:
print_rank_last
(
'A metadata file exists but unable to load '
print_rank_last
(
'iteration from checkpoint {}, exiting'
.
format
(
"A metadata file exists but unable to load "
checkpoint_name_local
))
"iteration from checkpoint {}, exiting"
.
format
(
checkpoint_name_local
)
)
sys
.
exit
()
sys
.
exit
()
# Check arguments.
# Check arguments.
assert
args
.
consumed_train_samples
==
0
assert
args
.
consumed_train_samples
==
0
assert
args
.
consumed_valid_samples
==
0
assert
args
.
consumed_valid_samples
==
0
if
'
args
'
in
state_dict
:
if
"
args
"
in
state_dict
:
checkpoint_args
=
state_dict
[
'
args
'
]
checkpoint_args
=
state_dict
[
"
args
"
]
check_checkpoint_args
(
checkpoint_args
)
check_checkpoint_args
(
checkpoint_args
)
args
.
consumed_train_samples
=
getattr
(
checkpoint_args
,
args
.
consumed_train_samples
=
getattr
(
'consumed_train_samples'
,
0
)
checkpoint_args
,
"consumed_train_samples"
,
0
)
update_num_microbatches
(
consumed_samples
=
args
.
consumed_train_samples
)
update_num_microbatches
(
consumed_samples
=
args
.
consumed_train_samples
)
args
.
consumed_valid_samples
=
getattr
(
checkpoint_args
,
args
.
consumed_valid_samples
=
getattr
(
'consumed_valid_samples'
,
0
)
checkpoint_args
,
"consumed_valid_samples"
,
0
)
else
:
else
:
print_rank_last
(
'
could not find arguments in the checkpoint ...
'
)
print_rank_last
(
"
could not find arguments in the checkpoint ...
"
)
# Model.
# Model.
model
.
load_state_dict
(
state_dict
[
'
model
'
])
model
.
load_state_dict
(
state_dict
[
"
model
"
])
# Optimizer.
# Optimizer.
if
not
release
and
not
args
.
finetune
and
not
args
.
no_load_optim
:
if
not
release
and
not
args
.
finetune
and
not
args
.
no_load_optim
:
try
:
try
:
if
optimizer
is
not
None
:
if
optimizer
is
not
None
:
optimizer
.
load_state_dict
(
state_dict
[
'
optimizer
'
])
optimizer
.
load_state_dict
(
state_dict
[
"
optimizer
"
])
if
lr_scheduler
is
not
None
:
if
lr_scheduler
is
not
None
:
lr_scheduler
.
load_state_dict
(
state_dict
[
'
lr_scheduler
'
])
lr_scheduler
.
load_state_dict
(
state_dict
[
"
lr_scheduler
"
])
except
KeyError
:
except
KeyError
:
print_rank_last
(
'Unable to load optimizer from checkpoint {}. '
print_rank_last
(
'Specify --no-load-optim or --finetune to prevent '
"Unable to load optimizer from checkpoint {}. "
'attempting to load the optimizer state, '
"Specify --no-load-optim or --finetune to prevent "
'exiting ...'
.
format
(
checkpoint_name_local
))
"attempting to load the optimizer state, "
"exiting ..."
.
format
(
checkpoint_name_local
)
)
sys
.
exit
()
sys
.
exit
()
# rng states.
# rng states.
if
not
release
and
not
args
.
finetune
and
not
args
.
no_load_rng
:
if
not
release
and
not
args
.
finetune
and
not
args
.
no_load_rng
:
try
:
try
:
random
.
setstate
(
state_dict
[
'random_rng_state'
])
random
.
setstate
(
state_dict
[
"random_rng_state"
])
np
.
random
.
set_state
(
state_dict
[
'np_rng_state'
])
np
.
random
.
set_state
(
state_dict
[
"np_rng_state"
])
torch
.
set_rng_state
(
state_dict
[
'torch_rng_state'
])
torch
.
set_rng_state
(
state_dict
[
"torch_rng_state"
])
torch
.
cuda
.
set_rng_state
(
state_dict
[
'cuda_rng_state'
])
torch
.
cuda
.
set_rng_state
(
state_dict
[
"cuda_rng_state"
])
mpu
.
get_cuda_rng_tracker
().
set_states
(
mpu
.
get_cuda_rng_tracker
().
set_states
(
state_dict
[
"rng_tracker_states"
])
state_dict
[
'rng_tracker_states'
])
except
KeyError
:
except
KeyError
:
print_rank_last
(
'Unable to load optimizer from checkpoint {}. '
print_rank_last
(
'Specify --no-load-rng or --finetune to prevent '
"Unable to load optimizer from checkpoint {}. "
'attempting to load the optimizer state, '
"Specify --no-load-rng or --finetune to prevent "
'exiting ...'
.
format
(
checkpoint_name_local
))
"attempting to load the optimizer state, "
"exiting ..."
.
format
(
checkpoint_name_local
)
)
sys
.
exit
()
sys
.
exit
()
torch
.
distributed
.
barrier
()
torch
.
distributed
.
barrier
()
print_rank_last
(
' successfully loaded checkpoint (with expert parametes updated) from {} at iteration {}'
.
format
(
print_rank_last
(
args
.
load
,
iteration
))
" successfully loaded checkpoint (with expert parametes updated) from {} at iteration {}"
.
format
(
args
.
load
,
iteration
)
)
return
iteration
return
iteration
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment