Unverified Commit eee195b3 authored by Karim Foda's avatar Karim Foda Committed by GitHub
Browse files

Fix gradient checkpointing bug in switch transformer (#22081)

parent b9273353
...@@ -1039,6 +1039,13 @@ class SwitchTransformersStack(SwitchTransformersPreTrainedModel): ...@@ -1039,6 +1039,13 @@ class SwitchTransformersStack(SwitchTransformersPreTrainedModel):
else: else:
encoder_extended_attention_mask = None encoder_extended_attention_mask = None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# Prepare head mask if needed # Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers) head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
...@@ -1060,11 +1067,6 @@ class SwitchTransformersStack(SwitchTransformersPreTrainedModel): ...@@ -1060,11 +1067,6 @@ class SwitchTransformersStack(SwitchTransformersPreTrainedModel):
all_hidden_states = all_hidden_states + (hidden_states,) all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training: if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module): def create_custom_forward(module):
def custom_forward(*inputs): def custom_forward(*inputs):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment