Unverified Commit d0876a09 authored by Karim Foda's avatar Karim Foda Committed by GitHub
Browse files

Fix gradient checkpointing bug in xglm (#22127)

parent 0c883766
......@@ -714,6 +714,14 @@ class XGLMModel(XGLMPreTrainedModel):
hidden_states = nn.functional.dropout(hidden_states, p=float(self.dropout), training=self.training)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache = True` is incompatible with gradient checkpointing`. Setting `use_cache ="
" False`..."
)
use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
......@@ -739,12 +747,6 @@ class XGLMModel(XGLMPreTrainedModel):
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache = True` is incompatible with gradient checkpointing`. Setting `use_cache ="
" False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment