Unverified Commit 40ea9ab2 authored by Tom Aarsen's avatar Tom Aarsen Committed by GitHub
Browse files

Add many missing spaces in adjacent strings (#26751)

Add missing spaces in adjacent strings
parent 3bc65505
...@@ -407,9 +407,9 @@ class Pop2PianoFeatureExtractor(SequenceFeatureExtractor): ...@@ -407,9 +407,9 @@ class Pop2PianoFeatureExtractor(SequenceFeatureExtractor):
) )
else: else:
warnings.warn( warnings.warn(
f"The sampling_rate of the provided audio is different from the target sampling_rate" f"The sampling_rate of the provided audio is different from the target sampling_rate "
f"of the Feature Extractor, {self.sampling_rate} vs {single_sampling_rate}. " f"of the Feature Extractor, {self.sampling_rate} vs {single_sampling_rate}. "
f"In these cases it is recommended to use `resample=True` in the `__call__` method to" f"In these cases it is recommended to use `resample=True` in the `__call__` method to "
f"get the optimal behaviour." f"get the optimal behaviour."
) )
......
...@@ -229,9 +229,9 @@ class SEWConfig(PretrainedConfig): ...@@ -229,9 +229,9 @@ class SEWConfig(PretrainedConfig):
or (len(self.conv_dim) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers)
): ):
raise ValueError( raise ValueError(
"Configuration for convolutional layers is incorrect." "Configuration for convolutional layers is incorrect. "
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, "
f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)" f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) "
f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`." f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`."
) )
......
...@@ -1018,7 +1018,7 @@ class SEWForCTC(SEWPreTrainedModel): ...@@ -1018,7 +1018,7 @@ class SEWForCTC(SEWPreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -1151,7 +1151,7 @@ class SEWForSequenceClassification(SEWPreTrainedModel): ...@@ -1151,7 +1151,7 @@ class SEWForSequenceClassification(SEWPreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
......
...@@ -256,9 +256,9 @@ class SEWDConfig(PretrainedConfig): ...@@ -256,9 +256,9 @@ class SEWDConfig(PretrainedConfig):
or (len(self.conv_dim) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers)
): ):
raise ValueError( raise ValueError(
"Configuration for convolutional layers is incorrect." "Configuration for convolutional layers is incorrect. "
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, "
f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)" f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) "
f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`." f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`."
) )
......
...@@ -1558,7 +1558,7 @@ class SEWDForCTC(SEWDPreTrainedModel): ...@@ -1558,7 +1558,7 @@ class SEWDForCTC(SEWDPreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -1691,7 +1691,7 @@ class SEWDForSequenceClassification(SEWDPreTrainedModel): ...@@ -1691,7 +1691,7 @@ class SEWDForSequenceClassification(SEWDPreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
......
...@@ -199,7 +199,7 @@ class Speech2Text2Tokenizer(PreTrainedTokenizer): ...@@ -199,7 +199,7 @@ class Speech2Text2Tokenizer(PreTrainedTokenizer):
if self.bpe_ranks is None: if self.bpe_ranks is None:
raise ValueError( raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so" "This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding." " that it can only be used for decoding, not for encoding. "
"Make sure to provide `merges.txt` file at instantiation to enable " "Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." "encoding."
) )
......
...@@ -1774,13 +1774,13 @@ class SwitchTransformersForConditionalGeneration(SwitchTransformersPreTrainedMod ...@@ -1774,13 +1774,13 @@ class SwitchTransformersForConditionalGeneration(SwitchTransformersPreTrainedMod
if reordered_layer_past_states[0].shape != layer_past_states[0].shape: if reordered_layer_past_states[0].shape != layer_past_states[0].shape:
raise ValueError( raise ValueError(
"expected reordered_layer_past_states to have the same shape than layer_past_states" "expected reordered_layer_past_states to have the same shape than layer_past_states, "
f"but got {reordered_layer_past_states[0].shape} and {layer_past_states[0].shape}" f"but got {reordered_layer_past_states[0].shape} and {layer_past_states[0].shape}"
) )
if len(reordered_layer_past_states) != len(layer_past_states): if len(reordered_layer_past_states) != len(layer_past_states):
raise ValueError( raise ValueError(
"expected layer_past_states to have the same length as reordered_layer_past_states" "expected layer_past_states to have the same length as reordered_layer_past_states, "
f"got {len(layer_past_states)} and {len(reordered_layer_past_states)}" f"but got {len(layer_past_states)} and {len(reordered_layer_past_states)}"
) )
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
......
...@@ -127,7 +127,7 @@ class T5Config(PretrainedConfig): ...@@ -127,7 +127,7 @@ class T5Config(PretrainedConfig):
if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2: if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
raise ValueError( raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" "'gated-gelu' or 'relu'"
) )
......
...@@ -883,7 +883,7 @@ class T5PreTrainedModel(PreTrainedModel): ...@@ -883,7 +883,7 @@ class T5PreTrainedModel(PreTrainedModel):
if decoder_start_token_id is None: if decoder_start_token_id is None:
raise ValueError( raise ValueError(
"self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id." "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. "
"See T5 docs for more information." "See T5 docs for more information."
) )
......
...@@ -555,7 +555,7 @@ class TFT5Block(tf.keras.layers.Layer): ...@@ -555,7 +555,7 @@ class TFT5Block(tf.keras.layers.Layer):
if len(past_key_value) != expected_num_past_key_values: if len(past_key_value) != expected_num_past_key_values:
raise ValueError( raise ValueError(
f"There should be {expected_num_past_key_values} past states. " f"There should be {expected_num_past_key_values} past states. "
f"{'2 (past / key) for cross attention' if expected_num_past_key_values == 4 else ''}." f"{'2 (past / key) for cross attention' if expected_num_past_key_values == 4 else ''}. "
f"Got {len(past_key_value)} past key / value states" f"Got {len(past_key_value)} past key / value states"
) )
......
...@@ -66,8 +66,8 @@ if is_tensorflow_probability_available(): ...@@ -66,8 +66,8 @@ if is_tensorflow_probability_available():
n = tfp.distributions.Normal(loc=0.0, scale=1.0) n = tfp.distributions.Normal(loc=0.0, scale=1.0)
except ImportError: except ImportError:
logger.error( logger.error(
"TAPAS models are not usable since `tensorflow_probability` can't be loaded." "TAPAS models are not usable since `tensorflow_probability` can't be loaded. "
"It seems you have `tensorflow_probability` installed with the wrong tensorflow version." "It seems you have `tensorflow_probability` installed with the wrong tensorflow version. "
"Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability." "Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability."
) )
......
...@@ -1012,7 +1012,7 @@ class TransfoXLLMHeadModel(TransfoXLPreTrainedModel): ...@@ -1012,7 +1012,7 @@ class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
if not self.trainer_compatible: if not self.trainer_compatible:
warnings.warn( warnings.warn(
"The output of TransfoXL will be updated in v5 to support a single loss as first argument. In order" "The output of TransfoXL will be updated in v5 to support a single loss as first argument. In order "
"to use that updated output, please specify `trainer_compatible=True` as your configuration" "to use that updated output, please specify `trainer_compatible=True` as your configuration"
" attribute.", " attribute.",
DeprecationWarning, DeprecationWarning,
......
...@@ -134,7 +134,7 @@ class UMT5Config(PretrainedConfig): ...@@ -134,7 +134,7 @@ class UMT5Config(PretrainedConfig):
if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2: if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
raise ValueError( raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" "'gated-gelu' or 'relu'"
) )
......
...@@ -566,7 +566,7 @@ class UMT5PreTrainedModel(PreTrainedModel): ...@@ -566,7 +566,7 @@ class UMT5PreTrainedModel(PreTrainedModel):
if decoder_start_token_id is None: if decoder_start_token_id is None:
raise ValueError( raise ValueError(
"self.model.config.decoder_start_token_id has to be defined. In UMT5 it is usually set to the pad_token_id." "self.model.config.decoder_start_token_id has to be defined. In UMT5 it is usually set to the pad_token_id. "
"See UMT5 docs for more information." "See UMT5 docs for more information."
) )
......
...@@ -1256,7 +1256,7 @@ class UniSpeechForPreTraining(UniSpeechPreTrainedModel): ...@@ -1256,7 +1256,7 @@ class UniSpeechForPreTraining(UniSpeechPreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -1427,7 +1427,7 @@ class UniSpeechForCTC(UniSpeechPreTrainedModel): ...@@ -1427,7 +1427,7 @@ class UniSpeechForCTC(UniSpeechPreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -1560,7 +1560,7 @@ class UniSpeechForSequenceClassification(UniSpeechPreTrainedModel): ...@@ -1560,7 +1560,7 @@ class UniSpeechForSequenceClassification(UniSpeechPreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
......
...@@ -1276,7 +1276,7 @@ class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel): ...@@ -1276,7 +1276,7 @@ class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -1434,7 +1434,7 @@ class UniSpeechSatForCTC(UniSpeechSatPreTrainedModel): ...@@ -1434,7 +1434,7 @@ class UniSpeechSatForCTC(UniSpeechSatPreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -1567,7 +1567,7 @@ class UniSpeechSatForSequenceClassification(UniSpeechSatPreTrainedModel): ...@@ -1567,7 +1567,7 @@ class UniSpeechSatForSequenceClassification(UniSpeechSatPreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -1690,7 +1690,7 @@ class UniSpeechSatForAudioFrameClassification(UniSpeechSatPreTrainedModel): ...@@ -1690,7 +1690,7 @@ class UniSpeechSatForAudioFrameClassification(UniSpeechSatPreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -1857,7 +1857,7 @@ class UniSpeechSatForXVector(UniSpeechSatPreTrainedModel): ...@@ -1857,7 +1857,7 @@ class UniSpeechSatForXVector(UniSpeechSatPreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
......
...@@ -709,6 +709,6 @@ class TFVisionEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLos ...@@ -709,6 +709,6 @@ class TFVisionEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLos
def resize_token_embeddings(self, *args, **kwargs): def resize_token_embeddings(self, *args, **kwargs):
raise NotImplementedError( raise NotImplementedError(
"Resizing the embedding layers via the TFVisionEncoderDecoderModel directly is not supported." "Resizing the embedding layers via the TFVisionEncoderDecoderModel directly is not supported. "
"Please use the respective methods of the wrapped objects (model.decoder.resize_token_embeddings(...))" "Please use the respective methods of the wrapped objects (model.decoder.resize_token_embeddings(...))"
) )
...@@ -698,7 +698,7 @@ class ViTForMaskedImageModeling(ViTPreTrainedModel): ...@@ -698,7 +698,7 @@ class ViTForMaskedImageModeling(ViTPreTrainedModel):
if bool_masked_pos is not None and (self.config.patch_size != self.config.encoder_stride): if bool_masked_pos is not None and (self.config.patch_size != self.config.encoder_stride):
raise ValueError( raise ValueError(
"When `bool_masked_pos` is provided, `patch_size` must be equal to `encoder_stride` to ensure that " "When `bool_masked_pos` is provided, `patch_size` must be equal to `encoder_stride` to ensure that "
"the reconstructed image has the same dimensions as the input." "the reconstructed image has the same dimensions as the input. "
f"Got `patch_size` = {self.config.patch_size} and `encoder_stride` = {self.config.encoder_stride}." f"Got `patch_size` = {self.config.patch_size} and `encoder_stride` = {self.config.encoder_stride}."
) )
......
...@@ -1438,7 +1438,7 @@ class TFWav2Vec2ForCTC(TFWav2Vec2PreTrainedModel): ...@@ -1438,7 +1438,7 @@ class TFWav2Vec2ForCTC(TFWav2Vec2PreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -1593,7 +1593,7 @@ class TFWav2Vec2ForSequenceClassification(TFWav2Vec2PreTrainedModel): ...@@ -1593,7 +1593,7 @@ class TFWav2Vec2ForSequenceClassification(TFWav2Vec2PreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
......
...@@ -1480,7 +1480,7 @@ class Wav2Vec2Model(Wav2Vec2PreTrainedModel): ...@@ -1480,7 +1480,7 @@ class Wav2Vec2Model(Wav2Vec2PreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -1627,7 +1627,7 @@ class Wav2Vec2ForPreTraining(Wav2Vec2PreTrainedModel): ...@@ -1627,7 +1627,7 @@ class Wav2Vec2ForPreTraining(Wav2Vec2PreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -1923,7 +1923,7 @@ class Wav2Vec2ForCTC(Wav2Vec2PreTrainedModel): ...@@ -1923,7 +1923,7 @@ class Wav2Vec2ForCTC(Wav2Vec2PreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -2055,7 +2055,7 @@ class Wav2Vec2ForSequenceClassification(Wav2Vec2PreTrainedModel): ...@@ -2055,7 +2055,7 @@ class Wav2Vec2ForSequenceClassification(Wav2Vec2PreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -2176,7 +2176,7 @@ class Wav2Vec2ForAudioFrameClassification(Wav2Vec2PreTrainedModel): ...@@ -2176,7 +2176,7 @@ class Wav2Vec2ForAudioFrameClassification(Wav2Vec2PreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
...@@ -2340,7 +2340,7 @@ class Wav2Vec2ForXVector(Wav2Vec2PreTrainedModel): ...@@ -2340,7 +2340,7 @@ class Wav2Vec2ForXVector(Wav2Vec2PreTrainedModel):
not be updated during training. not be updated during training.
""" """
warnings.warn( warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.", "Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning, FutureWarning,
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment