Unverified Commit c9035e45 authored by Stas Bekman's avatar Stas Bekman Committed by GitHub
Browse files

fix: The 'warn' method is deprecated (#11105)

* The 'warn' method is deprecated

* fix test
parent 247bed38
......@@ -194,7 +194,7 @@ if (
and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ
and "TRANSFORMERS_CACHE" not in os.environ
):
logger.warn(
logger.warning(
"In Transformers v4.0.0, the default path to cache downloaded models changed from "
"'~/.cache/torch/transformers' to '~/.cache/huggingface/transformers'. Since you don't seem to have overridden "
"and '~/.cache/torch/transformers' is a directory that exists, we're moving it to "
......
......@@ -54,7 +54,7 @@ from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy # n
def is_wandb_available():
# any value of WANDB_DISABLED disables wandb
if os.getenv("WANDB_DISABLED", "").upper() in ENV_VARS_TRUE_VALUES:
logger.warn(
logger.warning(
"Using the `WAND_DISABLED` environment variable is deprecated and will be removed in v5. Use the "
"--report_to flag to control the integrations used for logging result (for instance --report_to none)."
)
......
......@@ -290,7 +290,7 @@ def booleans_processing(config, **kwargs):
or kwargs["output_hidden_states"] is not None
or ("use_cache" in kwargs and kwargs["use_cache"] is not None)
):
tf_logger.warn(
tf_logger.warning(
"The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model."
"They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)."
)
......@@ -299,7 +299,9 @@ def booleans_processing(config, **kwargs):
final_booleans["output_hidden_states"] = config.output_hidden_states
if kwargs["return_dict"] is not None:
tf_logger.warn("The parameter `return_dict` cannot be set in graph mode and will always be set to `True`.")
tf_logger.warning(
"The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."
)
final_booleans["return_dict"] = True
if "use_cache" in kwargs:
......@@ -398,7 +400,7 @@ def input_processing(func, config, input_ids, **kwargs):
if isinstance(v, allowed_types) or v is None:
output[k] = v
elif k not in parameter_names and "args" not in parameter_names:
logger.warn(
logger.warning(
f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
)
continue
......
......@@ -409,7 +409,7 @@ class AutoTokenizer:
# if model is an encoder decoder, the encoder tokenizer class is used by default
if isinstance(config, EncoderDecoderConfig):
if type(config.decoder) is not type(config.encoder): # noqa: E721
logger.warn(
logger.warning(
f"The encoder model config class: {config.encoder.__class__} is different from the decoder model "
f"config class: {config.decoder.__class}. It is not recommended to use the "
"`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder "
......
......@@ -1011,7 +1011,7 @@ class BartDecoder(BartPretrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -544,7 +544,7 @@ class BertEncoder(nn.Module):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -450,7 +450,7 @@ class BertGenerationDecoder(BertGenerationPreTrainedModel):
super().__init__(config)
if not config.is_decoder:
logger.warn("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`")
logger.warning("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`")
self.bert = BertGenerationEncoder(config)
self.lm_head = BertGenerationOnlyLMHead(config)
......
......@@ -1586,7 +1586,7 @@ class BigBirdEncoder(nn.Module):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -973,7 +973,7 @@ class BlenderbotDecoder(BlenderbotPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -974,7 +974,7 @@ class BlenderbotSmallDecoder(BlenderbotSmallPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -541,7 +541,7 @@ class ElectraEncoder(nn.Module):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -726,7 +726,7 @@ class GPT2Model(GPT2PreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -823,7 +823,7 @@ class GPTNeoModel(GPTNeoPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -470,7 +470,7 @@ class LayoutLMEncoder(nn.Module):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -2070,7 +2070,7 @@ class LEDDecoder(LEDPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -968,7 +968,7 @@ class M2M100Decoder(M2M100PreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -981,7 +981,7 @@ class MarianDecoder(MarianPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -1020,7 +1020,7 @@ class MBartDecoder(MBartPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -987,7 +987,7 @@ class PegasusDecoder(PegasusPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
......@@ -1475,7 +1475,7 @@ class ProphetNetDecoder(ProphetNetPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment