"...csrc/git@developer.sourcefind.cn:OpenDAS/vision.git" did not exist on "dc3ac2909c2b9a5f6874ba1f1041ef49bd5482d6"
Unverified Commit c2c9fced authored by Kai Chen's avatar Kai Chen Committed by GitHub
Browse files

Add logging utils (#196)

* add logging utils

* install torch (cpu only) for CI

* fix unittests for runner

* remove python 2.7 from testing list

* add a corner case
parent 34197c5c
......@@ -9,6 +9,7 @@ before_install:
- pip install -U git+git://github.com/lilohuang/PyTurboJPEG.git
install:
- pip install torch==1.4.0+cpu torchvision==0.5.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
- rm -rf .eggs && pip install -e . codecov flake8 yapf isort mock
cache:
......@@ -19,7 +20,6 @@ env:
- COLUMNS=80
python:
- "2.7"
- "3.5"
- "3.6"
- "3.7"
......
# Copyright (c) Open-MMLab. All rights reserved.
from .config import Config, ConfigDict
from .logging import get_logger, print_log
from .misc import (check_prerequisites, concat_list, is_list_of, is_seq_of,
is_str, is_tuple_of, iter_cast, list_cast,
requires_executable, requires_package, slice_list,
......@@ -12,12 +13,11 @@ from .registry import Registry, build_from_cfg
from .timer import Timer, TimerError, check_time
__all__ = [
'ConfigDict', 'Config', 'Registry', 'build_from_cfg', 'is_str',
'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of',
'is_tuple_of', 'slice_list', 'concat_list', 'check_prerequisites',
'requires_package', 'requires_executable', 'is_filepath', 'fopen',
'check_file_exist', 'mkdir_or_exist', 'symlink', 'scandir',
'FileNotFoundError', 'ProgressBar', 'track_progress',
'track_iter_progress', 'track_parallel_progress', 'Timer', 'TimerError',
'check_time'
'Config', 'ConfigDict', 'get_logger', 'print_log', 'is_str', 'iter_cast',
'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of',
'slice_list', 'concat_list', 'check_prerequisites', 'requires_package',
'requires_executable', 'is_filepath', 'fopen', 'check_file_exist',
'mkdir_or_exist', 'symlink', 'scandir', 'FileNotFoundError', 'ProgressBar',
'track_progress', 'track_iter_progress', 'track_parallel_progress',
'Registry', 'build_from_cfg', 'Timer', 'TimerError', 'check_time'
]
import logging
import torch.distributed as dist
logger_initialized = {}
def get_logger(name, log_file=None, log_level=logging.INFO):
"""Initialize and get a logger by name.
If the logger has not been initialized, this method will initialize the
logger by adding one or two handlers, otherwise the initialized logger will
be directly returned. During initialization, a StreamHandler will always be
added. If `log_file` is specified and the process rank is 0, a FileHandler
will also be added.
Args:
name (str): Logger name.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the logger.
log_level (int): The logger level. Note that only the process of
rank 0 is affected, and other processes will set the level to
"Error" thus be silent most of the time.
Returns:
logging.Logger: The expected logger.
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
# handle hierarchical names
# e.g., logger "a" is initialized, then logger "a.b" will skip the
# initialization since it is a child of "a".
for logger_name in logger_initialized:
if name.startswith(logger_name):
return logger
stream_handler = logging.StreamHandler()
handlers = [stream_handler]
if dist.is_available() and dist.is_initialized():
rank = dist.get_rank()
else:
rank = 0
# only rank 0 will add a FileHandler
if rank == 0 and log_file is not None:
file_handler = logging.FileHandler(log_file, 'w')
handlers.append(file_handler)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
for handler in handlers:
handler.setFormatter(formatter)
handler.setLevel(log_level)
logger.addHandler(handler)
if rank == 0:
logger.setLevel(log_level)
else:
logger.setLevel(logging.ERROR)
logger_initialized[name] = True
return logger
def print_log(msg, logger=None, level=logging.INFO):
"""Print a log message.
Args:
msg (str): The message to be logged.
logger (logging.Logger | str | None): The logger to be used.
Some special loggers are:
- "silent": no message will be printed.
- other str: the logger obtained with `get_root_logger(logger)`.
- None: The `print()` method will be used to print log messages.
level (int): Logging level. Only available when `logger` is a Logger
object or "root".
"""
if logger is None:
print(msg)
elif isinstance(logger, logging.Logger):
logger.log(level, msg)
elif logger == 'silent':
pass
elif isinstance(logger, str):
_logger = get_logger(logger)
_logger.log(level, msg)
else:
raise TypeError(
'logger should be either a logging.Logger object, str, '
'"silent" or None, but got {}'.format(type(logger)))
import logging
import re
import tempfile
from unittest.mock import patch
import pytest
from mmcv import get_logger, print_log
@patch('torch.distributed.get_rank', lambda: 0)
@patch('torch.distributed.is_initialized', lambda: True)
@patch('torch.distributed.is_available', lambda: True)
def test_get_logger_rank0():
logger = get_logger('rank0.pkg1')
assert isinstance(logger, logging.Logger)
assert len(logger.handlers) == 1
assert isinstance(logger.handlers[0], logging.StreamHandler)
assert logger.handlers[0].level == logging.INFO
logger = get_logger('rank0.pkg2', log_level=logging.DEBUG)
assert isinstance(logger, logging.Logger)
assert len(logger.handlers) == 1
assert logger.handlers[0].level == logging.DEBUG
with tempfile.NamedTemporaryFile() as f:
logger = get_logger('rank0.pkg3', log_file=f.name)
assert isinstance(logger, logging.Logger)
assert len(logger.handlers) == 2
assert isinstance(logger.handlers[0], logging.StreamHandler)
assert isinstance(logger.handlers[1], logging.FileHandler)
logger_pkg3 = get_logger('rank0.pkg3')
assert id(logger_pkg3) == id(logger)
logger_pkg3 = get_logger('rank0.pkg3.subpkg')
assert logger_pkg3.handlers == logger_pkg3.handlers
@patch('torch.distributed.get_rank', lambda: 1)
@patch('torch.distributed.is_initialized', lambda: True)
@patch('torch.distributed.is_available', lambda: True)
def test_get_logger_rank1():
logger = get_logger('rank1.pkg1')
assert isinstance(logger, logging.Logger)
assert len(logger.handlers) == 1
assert isinstance(logger.handlers[0], logging.StreamHandler)
assert logger.handlers[0].level == logging.INFO
with tempfile.NamedTemporaryFile() as f:
logger = get_logger('rank1.pkg2', log_file=f.name)
assert isinstance(logger, logging.Logger)
assert len(logger.handlers) == 1
assert logger.handlers[0].level == logging.INFO
def test_print_log_print(capsys):
print_log('welcome', logger=None)
out, _ = capsys.readouterr()
assert out == 'welcome\n'
def test_print_log_silent(capsys, caplog):
print_log('welcome', logger='silent')
out, _ = capsys.readouterr()
assert out == ''
assert len(caplog.records) == 0
def test_print_log_logger(caplog):
print_log('welcome', logger='mmcv')
assert caplog.record_tuples[-1] == ('mmcv', logging.INFO, 'welcome')
print_log('welcome', logger='mmcv', level=logging.ERROR)
assert caplog.record_tuples[-1] == ('mmcv', logging.ERROR, 'welcome')
with tempfile.NamedTemporaryFile() as f:
logger = get_logger('abc', log_file=f.name)
print_log('welcome', logger=logger)
assert caplog.record_tuples[-1] == ('abc', logging.INFO, 'welcome')
with open(f.name, 'r') as fin:
log_text = fin.read()
regex_time = r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}'
match = re.fullmatch(regex_time + r' - abc - INFO - welcome\n',
log_text)
assert match is not None
def test_print_log_exception():
with pytest.raises(TypeError):
print_log('welcome', logger=0)
# Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import sys
import tempfile
import warnings
from mock import MagicMock
sys.modules['wandb'] = MagicMock()
def test_save_checkpoint():
try:
......@@ -27,7 +30,7 @@ def test_save_checkpoint():
assert osp.exists(latest_path)
assert osp.exists(epoch1_path)
assert osp.realpath(latest_path) == epoch1_path
assert osp.realpath(latest_path) == osp.realpath(epoch1_path)
torch.load(latest_path)
......@@ -42,9 +45,7 @@ def test_wandb_hook():
return
import mmcv.runner
wandb_mock = MagicMock()
hook = mmcv.runner.hooks.WandbLoggerHook()
hook.wandb = wandb_mock
loader = DataLoader(torch.ones((5, 5)))
model = nn.Linear(1, 1)
......@@ -58,6 +59,6 @@ def test_wandb_hook():
})
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)], 1)
wandb_mock.init.assert_called()
wandb_mock.log.assert_called_with({'accuracy/val': 0.98}, step=5)
wandb_mock.join.assert_called()
hook.wandb.init.assert_called()
hook.wandb.log.assert_called_with({'accuracy/val': 0.98}, step=5)
hook.wandb.join.assert_called()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment