"...git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "3994fa5bafa56db6581d962d562f3c54fac291df"
Unverified Commit b9273353 authored by Karim Foda's avatar Karim Foda Committed by GitHub
Browse files

Fix gradient checkpointing bug in Speech2Text (#22079)

* Fix gradient checkpointing bug in Speech2Text

* Update modeling_speech_to_text.py

* Update modeling_speech_to_text_2.py
parent a9bd5df1
...@@ -1024,6 +1024,13 @@ class Speech2TextDecoder(Speech2TextPreTrainedModel): ...@@ -1024,6 +1024,13 @@ class Speech2TextDecoder(Speech2TextPreTrainedModel):
hidden_states = inputs_embeds + positions hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache =" " False`..."
)
use_cache = False
# decoder layers # decoder layers
all_hidden_states = () if output_hidden_states else None all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None all_self_attns = () if output_attentions else None
...@@ -1048,12 +1055,6 @@ class Speech2TextDecoder(Speech2TextPreTrainedModel): ...@@ -1048,12 +1055,6 @@ class Speech2TextDecoder(Speech2TextPreTrainedModel):
past_key_value = past_key_values[idx] if past_key_values is not None else None past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training: if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache ="
" False`..."
)
use_cache = False
def create_custom_forward(module): def create_custom_forward(module):
def custom_forward(*inputs): def custom_forward(*inputs):
......
...@@ -632,6 +632,13 @@ class Speech2Text2Decoder(Speech2Text2PreTrainedModel): ...@@ -632,6 +632,13 @@ class Speech2Text2Decoder(Speech2Text2PreTrainedModel):
hidden_states = inputs_embeds + positions hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache =" " False`..."
)
use_cache = False
# decoder layers # decoder layers
all_hidden_states = () if output_hidden_states else None all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None all_self_attns = () if output_attentions else None
...@@ -657,12 +664,6 @@ class Speech2Text2Decoder(Speech2Text2PreTrainedModel): ...@@ -657,12 +664,6 @@ class Speech2Text2Decoder(Speech2Text2PreTrainedModel):
past_key_value = past_key_values[idx] if past_key_values is not None else None past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training: if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache ="
" False`..."
)
use_cache = False
def create_custom_forward(module): def create_custom_forward(module):
def custom_forward(*inputs): def custom_forward(*inputs):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment