Commit 5289b4b9 authored by burcturkoglu's avatar burcturkoglu
Browse files

Division to num_train_optimizer of global_step in lr_this_step is removed.

parent 3ae8c8be
...@@ -315,7 +315,7 @@ def main(): ...@@ -315,7 +315,7 @@ def main():
if args.fp16: if args.fp16:
# modify learning rate with special warm up BERT uses # modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically # if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps, lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step,
args.warmup_proportion) args.warmup_proportion)
for param_group in optimizer.param_groups: for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step param_group['lr'] = lr_this_step
......
...@@ -603,7 +603,7 @@ def main(): ...@@ -603,7 +603,7 @@ def main():
if args.fp16: if args.fp16:
# modify learning rate with special warm up BERT uses # modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically # if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps, lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step,
args.warmup_proportion) args.warmup_proportion)
for param_group in optimizer.param_groups: for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step param_group['lr'] = lr_this_step
......
...@@ -854,7 +854,7 @@ def main(): ...@@ -854,7 +854,7 @@ def main():
if args.fp16: if args.fp16:
# modify learning rate with special warm up BERT uses # modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically # if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps, lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step,
args.warmup_proportion) args.warmup_proportion)
for param_group in optimizer.param_groups: for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step param_group['lr'] = lr_this_step
......
...@@ -1015,7 +1015,7 @@ def main(): ...@@ -1015,7 +1015,7 @@ def main():
if args.fp16: if args.fp16:
# modify learning rate with special warm up BERT uses # modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used and handles this automatically # if args.fp16 is False, BertAdam is used and handles this automatically
lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps, lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step,
args.warmup_proportion) args.warmup_proportion)
for param_group in optimizer.param_groups: for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step param_group['lr'] = lr_this_step
......
...@@ -466,7 +466,7 @@ def main(): ...@@ -466,7 +466,7 @@ def main():
if args.fp16: if args.fp16:
# modify learning rate with special warm up BERT uses # modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically # if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps, lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step,
args.warmup_proportion) args.warmup_proportion)
for param_group in optimizer.param_groups: for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step param_group['lr'] = lr_this_step
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment