"git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "4d727bd2dff377caeab21ff4e1bf4b26c2397c8a"
Unverified Commit 77770ec7 authored by Yanming Wang's avatar Yanming Wang Committed by GitHub
Browse files

Fix trainer logging_nan_inf_filter in torch_xla mode (#13896)



* Fix logging_nan_inf_filter in torch_xla mode

* Update src/transformers/trainer.py
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>

* Fix format
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>
parent aea7c5b0
...@@ -1311,9 +1311,10 @@ class Trainer: ...@@ -1311,9 +1311,10 @@ class Trainer:
else: else:
tr_loss_step = self.training_step(model, inputs) tr_loss_step = self.training_step(model, inputs)
if args.logging_nan_inf_filter and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)): if args.logging_nan_inf_filter and not is_torch_tpu_available():
# if loss is nan or inf simply add the average of previous logged losses if torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step):
tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged) # if loss is nan or inf simply add the average of previous logged losses
tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)
else: else:
tr_loss += tr_loss_step tr_loss += tr_loss_step
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment