Commit cebd3b8b authored by mohammad's avatar mohammad
Browse files

addrressed jareds comments

parent f0a445fa
...@@ -89,8 +89,7 @@ def get_checkpoint_tracker_filename(checkpoints_path): ...@@ -89,8 +89,7 @@ def get_checkpoint_tracker_filename(checkpoints_path):
return os.path.join(checkpoints_path, 'latest_checkpointed_iteration.txt') return os.path.join(checkpoints_path, 'latest_checkpointed_iteration.txt')
def save_checkpoint(iteration, model, optimizer, lr_scheduler, def save_checkpoint(iteration, model, optimizer, lr_scheduler):
consumed_train_samples=None, consumed_valid_samples=None):
"""Save a model checkpoint.""" """Save a model checkpoint."""
args = get_args() args = get_args()
...@@ -104,10 +103,6 @@ def save_checkpoint(iteration, model, optimizer, lr_scheduler, ...@@ -104,10 +103,6 @@ def save_checkpoint(iteration, model, optimizer, lr_scheduler,
state_dict['args'] = args state_dict['args'] = args
state_dict['checkpoint_version'] = 2.0 state_dict['checkpoint_version'] = 2.0
state_dict['iteration'] = iteration state_dict['iteration'] = iteration
if consumed_train_samples:
state_dict['consumed_train_samples'] = consumed_train_samples
if consumed_valid_samples:
state_dict['consumed_valid_samples'] = consumed_valid_samples
state_dict['model'] = model.state_dict_for_save_checkpoint() state_dict['model'] = model.state_dict_for_save_checkpoint()
# Optimizer stuff. # Optimizer stuff.
...@@ -219,17 +214,14 @@ def load_checkpoint(model, optimizer, lr_scheduler, load_arg='load'): ...@@ -219,17 +214,14 @@ def load_checkpoint(model, optimizer, lr_scheduler, load_arg='load'):
checkpoint_name)) checkpoint_name))
sys.exit() sys.exit()
if 'consumed_train_samples' in state_dict: # Check arguments.
assert args.consumed_train_samples == 0 assert args.consumed_train_samples == 0
args.consumed_train_samples = state_dict['consumed_train_samples']
if 'consumed_valid_samples' in state_dict:
assert args.consumed_valid_samples == 0 assert args.consumed_valid_samples == 0
args.consumed_valid_samples = state_dict['consumed_valid_samples']
# Check arguments.
if 'args' in state_dict: if 'args' in state_dict:
checkpoint_args = state_dict['args'] checkpoint_args = state_dict['args']
check_checkpoint_args(checkpoint_args) check_checkpoint_args(checkpoint_args)
args.consumed_train_samples = getattr(args, 'consumed_train_samples', 0)
args.consumed_valid_samples = getattr(args, 'consumed_valid_samples', 0)
else: else:
print_rank_0('could not find arguments in the checkpoint ...') print_rank_0('could not find arguments in the checkpoint ...')
......
...@@ -104,9 +104,7 @@ def pretrain(train_valid_test_dataset_provider, model_provider, ...@@ -104,9 +104,7 @@ def pretrain(train_valid_test_dataset_provider, model_provider,
iteration, False) iteration, False)
if args.save and iteration != 0: if args.save and iteration != 0:
save_checkpoint(iteration, model, optimizer, lr_scheduler, save_checkpoint(iteration, model, optimizer, lr_scheduler)
consumed_train_samples=args.consumed_train_samples,
consumed_valid_samples=args.consumed_valid_samples)
if args.do_test: if args.do_test:
# Run on test data. # Run on test data.
...@@ -438,9 +436,7 @@ def train(forward_step_func, model, optimizer, lr_scheduler, ...@@ -438,9 +436,7 @@ def train(forward_step_func, model, optimizer, lr_scheduler,
# Checkpointing # Checkpointing
if args.save and args.save_interval and \ if args.save and args.save_interval and \
iteration % args.save_interval == 0: iteration % args.save_interval == 0:
save_checkpoint(iteration, model, optimizer, lr_scheduler, save_checkpoint(iteration, model, optimizer, lr_scheduler)
consumed_train_samples=args.consumed_train_samples,
consumed_valid_samples=args.consumed_valid_samples)
# Evaluation # Evaluation
if args.eval_interval and iteration % args.eval_interval == 0 and \ if args.eval_interval and iteration % args.eval_interval == 0 and \
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment