Commit e5213b28 authored by Michael Carilli's avatar Michael Carilli
Browse files

Scaler not needed for prepare_backward*fused

parent 5ae6008d
...@@ -217,7 +217,7 @@ def post_backward_no_master_weights(self, scaler): ...@@ -217,7 +217,7 @@ def post_backward_no_master_weights(self, scaler):
post_backward_models_are_masters(scaler, params, stashed_grads) post_backward_models_are_masters(scaler, params, stashed_grads)
def prepare_backward_with_master_weights_fused(self, scaler): def prepare_backward_with_master_weights_fused(self):
stash = self._amp_stash stash = self._amp_stash
if not stash.lazy_init_called: if not stash.lazy_init_called:
...@@ -250,7 +250,7 @@ def post_backward_with_master_weights_fused(self, scaler): ...@@ -250,7 +250,7 @@ def post_backward_with_master_weights_fused(self, scaler):
self._amp_stash.grad_norms = norm_groups self._amp_stash.grad_norms = norm_groups
def prepare_backward_no_master_weights_fused(self, scaler): def prepare_backward_no_master_weights_fused(self):
stash = self._amp_stash stash = self._amp_stash
if not stash.lazy_init_called: if not stash.lazy_init_called:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment