Commit 762956a5 authored by Mehdi Drissi's avatar Mehdi Drissi
Browse files

Two tiny changes to train/eval_lm. For train fix an off by one, while for...

Two tiny changes to train/eval_lm. For train fix an off by one, while for eval_lm make it work when the task is translation'
parent 572a1d55
......@@ -17,8 +17,7 @@ from fairseq.sequence_scorer import SequenceScorer
def main(args):
assert args.path is not None, '--path required for evaluation!'
if args.tokens_per_sample is None:
args.tokens_per_sample = 1024
args.tokens_per_sample = getattr(args, 'tokens_per_sample', 1024)
print(args)
use_cuda = torch.cuda.is_available() and not args.cpu
......
......@@ -82,7 +82,7 @@ def main(args):
train_meter.start()
valid_losses = [None]
valid_subsets = args.valid_subset.split(',')
while lr > args.min_lr and epoch_itr.epoch <= max_epoch and trainer.get_num_updates() < max_update:
while lr > args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:
# train for one epoch
train(args, trainer, task, epoch_itr)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment