Unverified Commit b19d64d8 authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Respect documentation on passive log level (#21700)

* Respect documentation on passive log level

* Fix test and set log level in examples

* Add doc
parent ee6e71e2
...@@ -209,6 +209,10 @@ def main(): ...@@ -209,6 +209,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level)
......
...@@ -246,6 +246,10 @@ def main(): ...@@ -246,6 +246,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level)
......
...@@ -187,6 +187,10 @@ def main(): ...@@ -187,6 +187,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level)
......
...@@ -186,6 +186,10 @@ def main(): ...@@ -186,6 +186,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level)
......
...@@ -250,6 +250,10 @@ def main(): ...@@ -250,6 +250,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level)
......
...@@ -240,6 +240,10 @@ def main(): ...@@ -240,6 +240,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level) datasets.utils.logging.set_verbosity(log_level)
......
...@@ -240,6 +240,10 @@ def main(): ...@@ -240,6 +240,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level) datasets.utils.logging.set_verbosity(log_level)
......
...@@ -231,6 +231,10 @@ def main(): ...@@ -231,6 +231,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level) datasets.utils.logging.set_verbosity(log_level)
......
...@@ -235,6 +235,11 @@ def main(): ...@@ -235,6 +235,11 @@ def main():
datefmt="%m/%d/%Y %H:%M:%S", datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level) datasets.utils.logging.set_verbosity(log_level)
......
...@@ -238,6 +238,10 @@ def main(): ...@@ -238,6 +238,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level) datasets.utils.logging.set_verbosity(log_level)
......
...@@ -236,6 +236,11 @@ def main(): ...@@ -236,6 +236,11 @@ def main():
datefmt="%m/%d/%Y %H:%M:%S", datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level) datasets.utils.logging.set_verbosity(log_level)
......
...@@ -283,6 +283,10 @@ def main(): ...@@ -283,6 +283,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level) datasets.utils.logging.set_verbosity(log_level)
......
...@@ -276,6 +276,10 @@ def main(): ...@@ -276,6 +276,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level)
......
...@@ -314,6 +314,11 @@ def main(): ...@@ -314,6 +314,11 @@ def main():
datefmt="%m/%d/%Y %H:%M:%S", datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level) datasets.utils.logging.set_verbosity(log_level)
......
...@@ -227,6 +227,10 @@ def main(): ...@@ -227,6 +227,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level) datasets.utils.logging.set_verbosity(log_level)
......
...@@ -186,6 +186,10 @@ def main(): ...@@ -186,6 +186,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level) datasets.utils.logging.set_verbosity(log_level)
......
...@@ -228,6 +228,10 @@ def main(): ...@@ -228,6 +228,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level) datasets.utils.logging.set_verbosity(log_level)
......
...@@ -272,6 +272,10 @@ def main(): ...@@ -272,6 +272,10 @@ def main():
handlers=[logging.StreamHandler(sys.stdout)], handlers=[logging.StreamHandler(sys.stdout)],
) )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level() log_level = training_args.get_process_log_level()
logger.setLevel(log_level) logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level) datasets.utils.logging.set_verbosity(log_level)
......
...@@ -231,9 +231,9 @@ class TrainingArguments: ...@@ -231,9 +231,9 @@ class TrainingArguments:
Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of `warmup_ratio`. Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of `warmup_ratio`.
log_level (`str`, *optional*, defaults to `passive`): log_level (`str`, *optional*, defaults to `passive`):
Logger log level to use on the main process. Possible choices are the log levels as strings: 'debug', Logger log level to use on the main process. Possible choices are the log levels as strings: 'debug',
'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and lets the 'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and keeps the
application set the level. current log level for the Transformers library (which will be `"warning"` by default).
log_level_replica (`str`, *optional*, defaults to `passive`): log_level_replica (`str`, *optional*, defaults to `"warning"`):
Logger log level to use on replicas. Same choices as `log_level`" Logger log level to use on replicas. Same choices as `log_level`"
log_on_each_node (`bool`, *optional*, defaults to `True`): log_on_each_node (`bool`, *optional*, defaults to `True`):
In multinode distributed training, whether to log using `log_level` once per node, or only on the main In multinode distributed training, whether to log using `log_level` once per node, or only on the main
...@@ -690,7 +690,7 @@ class TrainingArguments: ...@@ -690,7 +690,7 @@ class TrainingArguments:
}, },
) )
log_level_replica: Optional[str] = field( log_level_replica: Optional[str] = field(
default="passive", default="warning",
metadata={ metadata={
"help": "Logger log level to use on replica nodes. Same choices and defaults as ``log_level``", "help": "Logger log level to use on replica nodes. Same choices and defaults as ``log_level``",
"choices": trainer_log_levels.keys(), "choices": trainer_log_levels.keys(),
...@@ -1774,7 +1774,8 @@ class TrainingArguments: ...@@ -1774,7 +1774,8 @@ class TrainingArguments:
Returns the log level to be used depending on whether this process is the main process of node 0, main process Returns the log level to be used depending on whether this process is the main process of node 0, main process
of node non-0, or a non-main process. of node non-0, or a non-main process.
For the main process the log level defaults to `logging.INFO` unless overridden by `log_level` argument. For the main process the log level defaults to the logging level set (`logging.WARNING` if you didn't do
anything) unless overridden by `log_level` argument.
For the replica processes the log level defaults to `logging.WARNING` unless overridden by `log_level_replica` For the replica processes the log level defaults to `logging.WARNING` unless overridden by `log_level_replica`
argument. argument.
...@@ -1786,8 +1787,8 @@ class TrainingArguments: ...@@ -1786,8 +1787,8 @@ class TrainingArguments:
log_level = trainer_log_levels[self.log_level] log_level = trainer_log_levels[self.log_level]
log_level_replica = trainer_log_levels[self.log_level_replica] log_level_replica = trainer_log_levels[self.log_level_replica]
log_level_main_node = logging.INFO if log_level == -1 else log_level log_level_main_node = logging.get_verbosity() if log_level == -1 else log_level
log_level_replica_node = logging.WARNING if log_level_replica == -1 else log_level_replica log_level_replica_node = logging.get_verbosity() if log_level_replica == -1 else log_level_replica
return log_level_main_node if self.should_log else log_level_replica_node return log_level_main_node if self.should_log else log_level_replica_node
@property @property
......
...@@ -1098,11 +1098,11 @@ class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon): ...@@ -1098,11 +1098,11 @@ class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon):
logger = logging.get_logger() logger = logging.get_logger()
log_info_string = "Running training" log_info_string = "Running training"
# test with the default log_level - should be info and thus log on the main process # test with the default log_level - should be warning and thus not log on the main process
with CaptureLogger(logger) as cl: with CaptureLogger(logger) as cl:
trainer = get_regression_trainer() trainer = get_regression_trainer()
trainer.train() trainer.train()
self.assertIn(log_info_string, cl.out) self.assertNotIn(log_info_string, cl.out)
# test with low log_level - lower than info # test with low log_level - lower than info
with CaptureLogger(logger) as cl: with CaptureLogger(logger) as cl:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment