Unverified Commit ba3f9a71 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

logger.warn --> logger.warning (#15572)



* change logger.warn to logger.warning

* make style
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent a6885db9
......@@ -517,7 +517,7 @@ class PretrainedConfig(PushToHubMixin):
```"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warn(
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
......
......@@ -388,7 +388,7 @@ class ModuleUtilsMixin:
if self.main_input_name in input_dict:
return input_dict[self.main_input_name].numel()
else:
logger.warn(
logger.warning(
"Could not estimate the number of tokens of the input, floating-point operations will not be computed"
)
return 0
......
......@@ -398,7 +398,7 @@ class _BaseAutoModelClass:
"the option `trust_remote_code=True` to remove this error."
)
if kwargs.get("revision", None) is None:
logger.warn(
logger.warning(
"Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure "
"no malicious code has been contributed in a newer revision."
)
......@@ -432,7 +432,7 @@ class _BaseAutoModelClass:
"the option `trust_remote_code=True` to remove this error."
)
if kwargs.get("revision", None) is None:
logger.warn(
logger.warning(
"Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure "
"no malicious code has been contributed in a newer revision."
)
......
......@@ -626,7 +626,7 @@ class AutoConfig:
"the option `trust_remote_code=True` to remove this error."
)
if kwargs.get("revision", None) is None:
logger.warn(
logger.warning(
"Explicitly passing a `revision` is encouraged when loading a configuration with custom code to "
"ensure no malicious code has been contributed in a newer revision."
)
......
......@@ -492,7 +492,7 @@ class AutoTokenizer:
"the option `trust_remote_code=True` to remove this error."
)
if kwargs.get("revision", None) is None:
logger.warn(
logger.warning(
"Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure "
"no malicious code has been contributed in a newer revision."
)
......
......@@ -540,7 +540,7 @@ class MegatronBertEncoder(nn.Module):
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
......
......@@ -535,7 +535,7 @@ class RemBertEncoder(nn.Module):
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
......
......@@ -984,7 +984,7 @@ class T5Stack(T5PreTrainedModel):
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
......
......@@ -952,7 +952,7 @@ class Trainer:
for key, value in params.items():
if not hasattr(self.args, key):
logger.warn(
logger.warning(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
continue
......@@ -1165,7 +1165,7 @@ class Trainer:
config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))
checkpoint_version = config.transformers_version
if checkpoint_version is not None and checkpoint_version != __version__:
logger.warn(
logger.warning(
f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
f"Transformers but your current version is {__version__}. This is not recommended and could "
"yield to errors or unwanted behaviors."
......@@ -1534,7 +1534,7 @@ class Trainer:
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
else:
logger.warn(
logger.warning(
f"Could not locate the best model at {best_model_path}, if you are running a distributed training "
"on multiple nodes, you should activate `--save_on_each_node`."
)
......@@ -1567,9 +1567,11 @@ class Trainer:
):
self.model.tie_weights()
else:
logger.warn(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
if len(load_result.unexpected_keys) != 0:
logger.warn(f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}.")
logger.warning(
f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}."
)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval):
if self.control.should_log:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment