Unverified Commit 5947178e authored by Zaida Zhou's avatar Zaida Zhou Committed by GitHub
Browse files

Remove many functions in utils and migrate them to mmengine (#2217)

* Remove runner, parallel, engine and device

* fix format

* remove outdated docs

* migrate many functions to mmengine

* remove sync_bn.py
parent 9185eee8
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from pathlib import Path
from .misc import is_str
def is_filepath(x):
return is_str(x) or isinstance(x, Path)
def fopen(filepath, *args, **kwargs):
if is_str(filepath):
return open(filepath, *args, **kwargs)
elif isinstance(filepath, Path):
return filepath.open(*args, **kwargs)
raise ValueError('`filepath` should be a string or a Path')
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
if not osp.isfile(filename):
raise FileNotFoundError(msg_tmpl.format(filename))
def mkdir_or_exist(dir_name, mode=0o777):
if dir_name == '':
return
dir_name = osp.expanduser(dir_name)
os.makedirs(dir_name, mode=mode, exist_ok=True)
def symlink(src, dst, overwrite=True, **kwargs):
if os.path.lexists(dst) and overwrite:
os.remove(dst)
os.symlink(src, dst, **kwargs)
def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True):
"""Scan a directory to find the interested files.
Args:
dir_path (str | :obj:`Path`): Path of the directory.
suffix (str | tuple(str), optional): File suffix that we are
interested in. Default: None.
recursive (bool, optional): If set to True, recursively scan the
directory. Default: False.
case_sensitive (bool, optional) : If set to False, ignore the case of
suffix. Default: True.
Returns:
A generator for all the interested files with relative paths.
"""
if isinstance(dir_path, (str, Path)):
dir_path = str(dir_path)
else:
raise TypeError('"dir_path" must be a string or Path object')
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
raise TypeError('"suffix" must be a string or tuple of strings')
if suffix is not None and not case_sensitive:
suffix = suffix.lower() if isinstance(suffix, str) else tuple(
item.lower() for item in suffix)
root = dir_path
def _scandir(dir_path, suffix, recursive, case_sensitive):
for entry in os.scandir(dir_path):
if not entry.name.startswith('.') and entry.is_file():
rel_path = osp.relpath(entry.path, root)
_rel_path = rel_path if case_sensitive else rel_path.lower()
if suffix is None or _rel_path.endswith(suffix):
yield rel_path
elif recursive and os.path.isdir(entry.path):
# scan recursively if entry.path is a directory
yield from _scandir(entry.path, suffix, recursive,
case_sensitive)
return _scandir(dir_path, suffix, recursive, case_sensitive)
def find_vcs_root(path, markers=('.git', )):
"""Finds the root directory (including itself) of specified markers.
Args:
path (str): Path of directory or file.
markers (list[str], optional): List of file or directory names.
Returns:
The directory contained one of the markers or None if not found.
"""
if osp.isfile(path):
path = osp.dirname(path)
prev, cur = None, osp.abspath(osp.expanduser(path))
while cur != prev:
if any(osp.exists(osp.join(cur, marker)) for marker in markers):
return cur
prev, cur = cur, osp.split(cur)[0]
return None
# Copyright (c) OpenMMLab. All rights reserved.
import sys
from collections.abc import Iterable
from multiprocessing import Pool
from shutil import get_terminal_size
from .timer import Timer
class ProgressBar:
"""A progress bar which can print the progress."""
def __init__(self, task_num=0, bar_width=50, start=True, file=sys.stdout):
self.task_num = task_num
self.bar_width = bar_width
self.completed = 0
self.file = file
if start:
self.start()
@property
def terminal_width(self):
width, _ = get_terminal_size()
return width
def start(self):
if self.task_num > 0:
self.file.write(f'[{" " * self.bar_width}] 0/{self.task_num}, '
'elapsed: 0s, ETA:')
else:
self.file.write('completed: 0, elapsed: 0s')
self.file.flush()
self.timer = Timer()
def update(self, num_tasks=1):
assert num_tasks > 0
self.completed += num_tasks
elapsed = self.timer.since_start()
if elapsed > 0:
fps = self.completed / elapsed
else:
fps = float('inf')
if self.task_num > 0:
percentage = self.completed / float(self.task_num)
eta = int(elapsed * (1 - percentage) / percentage + 0.5)
msg = f'\r[{{}}] {self.completed}/{self.task_num}, ' \
f'{fps:.1f} task/s, elapsed: {int(elapsed + 0.5)}s, ' \
f'ETA: {eta:5}s'
bar_width = min(self.bar_width,
int(self.terminal_width - len(msg)) + 2,
int(self.terminal_width * 0.6))
bar_width = max(2, bar_width)
mark_width = int(bar_width * percentage)
bar_chars = '>' * mark_width + ' ' * (bar_width - mark_width)
self.file.write(msg.format(bar_chars))
else:
self.file.write(
f'completed: {self.completed}, elapsed: {int(elapsed + 0.5)}s,'
f' {fps:.1f} tasks/s')
self.file.flush()
def track_progress(func, tasks, bar_width=50, file=sys.stdout, **kwargs):
"""Track the progress of tasks execution with a progress bar.
Tasks are done with a simple for-loop.
Args:
func (callable): The function to be applied to each task.
tasks (list or tuple[Iterable, int]): A list of tasks or
(tasks, total num).
bar_width (int): Width of progress bar.
Returns:
list: The task results.
"""
if isinstance(tasks, tuple):
assert len(tasks) == 2
assert isinstance(tasks[0], Iterable)
assert isinstance(tasks[1], int)
task_num = tasks[1]
tasks = tasks[0]
elif isinstance(tasks, Iterable):
task_num = len(tasks)
else:
raise TypeError(
'"tasks" must be an iterable object or a (iterator, int) tuple')
prog_bar = ProgressBar(task_num, bar_width, file=file)
results = []
for task in tasks:
results.append(func(task, **kwargs))
prog_bar.update()
prog_bar.file.write('\n')
return results
def init_pool(process_num, initializer=None, initargs=None):
if initializer is None:
return Pool(process_num)
elif initargs is None:
return Pool(process_num, initializer)
else:
if not isinstance(initargs, tuple):
raise TypeError('"initargs" must be a tuple')
return Pool(process_num, initializer, initargs)
def track_parallel_progress(func,
tasks,
nproc,
initializer=None,
initargs=None,
bar_width=50,
chunksize=1,
skip_first=False,
keep_order=True,
file=sys.stdout):
"""Track the progress of parallel task execution with a progress bar.
The built-in :mod:`multiprocessing` module is used for process pools and
tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`.
Args:
func (callable): The function to be applied to each task.
tasks (list or tuple[Iterable, int]): A list of tasks or
(tasks, total num).
nproc (int): Process (worker) number.
initializer (None or callable): Refer to :class:`multiprocessing.Pool`
for details.
initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for
details.
chunksize (int): Refer to :class:`multiprocessing.Pool` for details.
bar_width (int): Width of progress bar.
skip_first (bool): Whether to skip the first sample for each worker
when estimating fps, since the initialization step may takes
longer.
keep_order (bool): If True, :func:`Pool.imap` is used, otherwise
:func:`Pool.imap_unordered` is used.
Returns:
list: The task results.
"""
if isinstance(tasks, tuple):
assert len(tasks) == 2
assert isinstance(tasks[0], Iterable)
assert isinstance(tasks[1], int)
task_num = tasks[1]
tasks = tasks[0]
elif isinstance(tasks, Iterable):
task_num = len(tasks)
else:
raise TypeError(
'"tasks" must be an iterable object or a (iterator, int) tuple')
pool = init_pool(nproc, initializer, initargs)
start = not skip_first
task_num -= nproc * chunksize * int(skip_first)
prog_bar = ProgressBar(task_num, bar_width, start, file=file)
results = []
if keep_order:
gen = pool.imap(func, tasks, chunksize)
else:
gen = pool.imap_unordered(func, tasks, chunksize)
for result in gen:
results.append(result)
if skip_first:
if len(results) < nproc * chunksize:
continue
elif len(results) == nproc * chunksize:
prog_bar.start()
continue
prog_bar.update()
prog_bar.file.write('\n')
pool.close()
pool.join()
return results
def track_iter_progress(tasks, bar_width=50, file=sys.stdout):
"""Track the progress of tasks iteration or enumeration with a progress
bar.
Tasks are yielded with a simple for-loop.
Args:
tasks (list or tuple[Iterable, int]): A list of tasks or
(tasks, total num).
bar_width (int): Width of progress bar.
Yields:
list: The task results.
"""
if isinstance(tasks, tuple):
assert len(tasks) == 2
assert isinstance(tasks[0], Iterable)
assert isinstance(tasks[1], int)
task_num = tasks[1]
tasks = tasks[0]
elif isinstance(tasks, Iterable):
task_num = len(tasks)
else:
raise TypeError(
'"tasks" must be an iterable object or a (iterator, int) tuple')
prog_bar = ProgressBar(task_num, bar_width, file=file)
for task in tasks:
yield task
prog_bar.update()
prog_bar.file.write('\n')
# Copyright (c) OpenMMLab. All rights reserved.
import inspect
import warnings
from functools import partial
from typing import Any, Dict, Optional
from .misc import deprecated_api_warning, is_seq_of
def build_from_cfg(cfg: Dict,
registry: 'Registry',
default_args: Optional[Dict] = None) -> Any:
"""Build a module from config dict when it is a class configuration, or
call a function from config dict when it is a function configuration.
Example:
>>> MODELS = Registry('models')
>>> @MODELS.register_module()
>>> class ResNet:
>>> pass
>>> resnet = build_from_cfg(dict(type='Resnet'), MODELS)
>>> # Returns an instantiated object
>>> @MODELS.register_module()
>>> def resnet50():
>>> pass
>>> resnet = build_from_cfg(dict(type='resnet50'), MODELS)
>>> # Return a result of the calling function
Args:
cfg (dict): Config dict. It should at least contain the key "type".
registry (:obj:`Registry`): The registry to search the type from.
default_args (dict, optional): Default initialization arguments.
Returns:
object: The constructed object.
"""
if not isinstance(cfg, dict):
raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
if 'type' not in cfg:
if default_args is None or 'type' not in default_args:
raise KeyError(
'`cfg` or `default_args` must contain the key "type", '
f'but got {cfg}\n{default_args}')
if not isinstance(registry, Registry):
raise TypeError('registry must be an mmcv.Registry object, '
f'but got {type(registry)}')
if not (isinstance(default_args, dict) or default_args is None):
raise TypeError('default_args must be a dict or None, '
f'but got {type(default_args)}')
args = cfg.copy()
if default_args is not None:
for name, value in default_args.items():
args.setdefault(name, value)
obj_type = args.pop('type')
if isinstance(obj_type, str):
obj_cls = registry.get(obj_type)
if obj_cls is None:
raise KeyError(
f'{obj_type} is not in the {registry.name} registry')
elif inspect.isclass(obj_type) or inspect.isfunction(obj_type):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
try:
return obj_cls(**args)
except Exception as e:
# Normal TypeError does not print class name.
raise type(e)(f'{obj_cls.__name__}: {e}')
class Registry:
"""A registry to map strings to classes or functions.
Registered object could be built from registry. Meanwhile, registered
functions could be called from registry.
Example:
>>> MODELS = Registry('models')
>>> @MODELS.register_module()
>>> class ResNet:
>>> pass
>>> resnet = MODELS.build(dict(type='ResNet'))
>>> @MODELS.register_module()
>>> def resnet50():
>>> pass
>>> resnet = MODELS.build(dict(type='resnet50'))
Please refer to
https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for
advanced usage.
Args:
name (str): Registry name.
build_func(func, optional): Build function to construct instance from
Registry, func:`build_from_cfg` is used if neither ``parent`` or
``build_func`` is specified. If ``parent`` is specified and
``build_func`` is not given, ``build_func`` will be inherited
from ``parent``. Default: None.
parent (Registry, optional): Parent registry. The class registered in
children registry could be built from parent. Default: None.
scope (str, optional): The scope of registry. It is the key to search
for children registry. If not specified, scope will be the name of
the package where class is defined, e.g. mmdet, mmcls, mmseg.
Default: None.
"""
def __init__(self, name, build_func=None, parent=None, scope=None):
self._name = name
self._module_dict = dict()
self._children = dict()
self._scope = self.infer_scope() if scope is None else scope
# self.build_func will be set with the following priority:
# 1. build_func
# 2. parent.build_func
# 3. build_from_cfg
if build_func is None:
if parent is not None:
self.build_func = parent.build_func
else:
self.build_func = build_from_cfg
else:
self.build_func = build_func
if parent is not None:
assert isinstance(parent, Registry)
parent._add_children(self)
self.parent = parent
else:
self.parent = None
def __len__(self):
return len(self._module_dict)
def __contains__(self, key):
return self.get(key) is not None
def __repr__(self):
format_str = self.__class__.__name__ + \
f'(name={self._name}, ' \
f'items={self._module_dict})'
return format_str
@staticmethod
def infer_scope():
"""Infer the scope of registry.
The name of the package where registry is defined will be returned.
Example:
>>> # in mmdet/models/backbone/resnet.py
>>> MODELS = Registry('models')
>>> @MODELS.register_module()
>>> class ResNet:
>>> pass
The scope of ``ResNet`` will be ``mmdet``.
Returns:
str: The inferred scope name.
"""
# We access the caller using inspect.currentframe() instead of
# inspect.stack() for performance reasons. See details in PR #1844
frame = inspect.currentframe()
# get the frame where `infer_scope()` is called
infer_scope_caller = frame.f_back.f_back
filename = inspect.getmodule(infer_scope_caller).__name__
split_filename = filename.split('.')
return split_filename[0]
@staticmethod
def split_scope_key(key):
"""Split scope and key.
The first scope will be split from key.
Examples:
>>> Registry.split_scope_key('mmdet.ResNet')
'mmdet', 'ResNet'
>>> Registry.split_scope_key('ResNet')
None, 'ResNet'
Return:
tuple[str | None, str]: The former element is the first scope of
the key, which can be ``None``. The latter is the remaining key.
"""
split_index = key.find('.')
if split_index != -1:
return key[:split_index], key[split_index + 1:]
else:
return None, key
@property
def name(self):
return self._name
@property
def scope(self):
return self._scope
@property
def module_dict(self):
return self._module_dict
@property
def children(self):
return self._children
def get(self, key):
"""Get the registry record.
Args:
key (str): The class name in string format.
Returns:
class: The corresponding class.
"""
scope, real_key = self.split_scope_key(key)
if scope is None or scope == self._scope:
# get from self
if real_key in self._module_dict:
return self._module_dict[real_key]
else:
# get from self._children
if scope in self._children:
return self._children[scope].get(real_key)
else:
# goto root
parent = self.parent
while parent.parent is not None:
parent = parent.parent
return parent.get(key)
def build(self, *args, **kwargs):
return self.build_func(*args, **kwargs, registry=self)
def _add_children(self, registry):
"""Add children for a registry.
The ``registry`` will be added as children based on its scope.
The parent registry could build objects from children registry.
Example:
>>> models = Registry('models')
>>> mmdet_models = Registry('models', parent=models)
>>> @mmdet_models.register_module()
>>> class ResNet:
>>> pass
>>> resnet = models.build(dict(type='mmdet.ResNet'))
"""
assert isinstance(registry, Registry)
assert registry.scope is not None
assert registry.scope not in self.children, \
f'scope {registry.scope} exists in {self.name} registry'
self.children[registry.scope] = registry
@deprecated_api_warning(name_dict=dict(module_class='module'))
def _register_module(self, module, module_name=None, force=False):
if not inspect.isclass(module) and not inspect.isfunction(module):
raise TypeError('module must be a class or a function, '
f'but got {type(module)}')
if module_name is None:
module_name = module.__name__
if isinstance(module_name, str):
module_name = [module_name]
for name in module_name:
if not force and name in self._module_dict:
raise KeyError(f'{name} is already registered '
f'in {self.name}')
self._module_dict[name] = module
def deprecated_register_module(self, cls=None, force=False):
warnings.warn(
'The old API of register_module(module, force=False) '
'is deprecated and will be removed, please use the new API '
'register_module(name=None, force=False, module=None) instead.',
DeprecationWarning)
if cls is None:
return partial(self.deprecated_register_module, force=force)
self._register_module(cls, force=force)
return cls
def register_module(self, name=None, force=False, module=None):
"""Register a module.
A record will be added to `self._module_dict`, whose key is the class
name or the specified name, and value is the class itself.
It can be used as a decorator or a normal function.
Example:
>>> backbones = Registry('backbone')
>>> @backbones.register_module()
>>> class ResNet:
>>> pass
>>> backbones = Registry('backbone')
>>> @backbones.register_module(name='mnet')
>>> class MobileNet:
>>> pass
>>> backbones = Registry('backbone')
>>> class ResNet:
>>> pass
>>> backbones.register_module(ResNet)
Args:
name (str | None): The module name to be registered. If not
specified, the class name will be used.
force (bool, optional): Whether to override an existing class with
the same name. Default: False.
module (type): Module class or function to be registered.
"""
if not isinstance(force, bool):
raise TypeError(f'force must be a boolean, but got {type(force)}')
# NOTE: This is a walkaround to be compatible with the old api,
# while it may introduce unexpected bugs.
if isinstance(name, type):
return self.deprecated_register_module(name, force=force)
# raise the error ahead of time
if not (name is None or isinstance(name, str) or is_seq_of(name, str)):
raise TypeError(
'name must be either of None, an instance of str or a sequence'
f' of str, but got {type(name)}')
# use it as a normal method: x.register_module(module=SomeClass)
if module is not None:
self._register_module(module=module, module_name=name, force=force)
return module
# use it as a decorator: @x.register_module()
def _register(module):
self._register_module(module=module, module_name=name, force=force)
return module
return _register
# Copyright (c) OpenMMLab. All rights reserved.
import random
import numpy as np
import torch
def worker_init_fn(worker_id: int, num_workers: int, rank: int, seed: int):
"""Function to initialize each worker.
The seed of each worker equals to
``num_worker * rank + worker_id + user_seed``.
Args:
worker_id (int): Id for each worker.
num_workers (int): Number of workers.
rank (int): Rank in distributed training.
seed (int): Random seed.
"""
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
# Copyright (c) Open-MMLab.
import sys
from collections.abc import Iterable
from runpy import run_path
from shlex import split
from typing import Any, Dict, List
from unittest.mock import patch
def check_python_script(cmd):
"""Run the python cmd script with `__main__`. The difference between
`os.system` is that, this function exectues code in the current process, so
that it can be tracked by coverage tools. Currently it supports two forms:
- ./tests/data/scripts/hello.py zz
- python tests/data/scripts/hello.py zz
"""
args = split(cmd)
if args[0] == 'python':
args = args[1:]
with patch.object(sys, 'argv', args):
run_path(args[0], run_name='__main__')
def _any(judge_result):
"""Since built-in ``any`` works only when the element of iterable is not
iterable, implement the function."""
if not isinstance(judge_result, Iterable):
return judge_result
try:
for element in judge_result:
if _any(element):
return True
except TypeError:
# Maybe encounter the case: torch.tensor(True) | torch.tensor(False)
if judge_result:
return True
return False
def assert_dict_contains_subset(dict_obj: Dict[Any, Any],
expected_subset: Dict[Any, Any]) -> bool:
"""Check if the dict_obj contains the expected_subset.
Args:
dict_obj (Dict[Any, Any]): Dict object to be checked.
expected_subset (Dict[Any, Any]): Subset expected to be contained in
dict_obj.
Returns:
bool: Whether the dict_obj contains the expected_subset.
"""
for key, value in expected_subset.items():
if key not in dict_obj.keys() or _any(dict_obj[key] != value):
return False
return True
def assert_attrs_equal(obj: Any, expected_attrs: Dict[str, Any]) -> bool:
"""Check if attribute of class object is correct.
Args:
obj (object): Class object to be checked.
expected_attrs (Dict[str, Any]): Dict of the expected attrs.
Returns:
bool: Whether the attribute of class object is correct.
"""
for attr, value in expected_attrs.items():
if not hasattr(obj, attr) or _any(getattr(obj, attr) != value):
return False
return True
def assert_dict_has_keys(obj: Dict[str, Any],
expected_keys: List[str]) -> bool:
"""Check if the obj has all the expected_keys.
Args:
obj (Dict[str, Any]): Object to be checked.
expected_keys (List[str]): Keys expected to contained in the keys of
the obj.
Returns:
bool: Whether the obj has the expected keys.
"""
return set(expected_keys).issubset(set(obj.keys()))
def assert_keys_equal(result_keys: List[str], target_keys: List[str]) -> bool:
"""Check if target_keys is equal to result_keys.
Args:
result_keys (List[str]): Result keys to be checked.
target_keys (List[str]): Target keys to be checked.
Returns:
bool: Whether target_keys is equal to result_keys.
"""
return set(result_keys) == set(target_keys)
def assert_is_norm_layer(module) -> bool:
"""Check if the module is a norm layer.
Args:
module (nn.Module): The module to be checked.
Returns:
bool: Whether the module is a norm layer.
"""
from torch.nn import GroupNorm, LayerNorm
from .parrots_wrapper import _BatchNorm, _InstanceNorm
norm_layer_candidates = (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)
return isinstance(module, norm_layer_candidates)
def assert_params_all_zeros(module) -> bool:
"""Check if the parameters of the module is all zeros.
Args:
module (nn.Module): The module to be checked.
Returns:
bool: Whether the parameters of the module is all zeros.
"""
weight_data = module.weight.data
is_weight_zero = weight_data.allclose(
weight_data.new_zeros(weight_data.size()))
if hasattr(module, 'bias') and module.bias is not None:
bias_data = module.bias.data
is_bias_zero = bias_data.allclose(
bias_data.new_zeros(bias_data.size()))
else:
is_bias_zero = True
return is_weight_zero and is_bias_zero
# Copyright (c) OpenMMLab. All rights reserved.
from time import time
class TimerError(Exception):
def __init__(self, message):
self.message = message
super().__init__(message)
class Timer:
"""A flexible Timer class.
Examples:
>>> import time
>>> import mmcv
>>> with mmcv.Timer():
>>> # simulate a code block that will run for 1s
>>> time.sleep(1)
1.000
>>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'):
>>> # simulate a code block that will run for 1s
>>> time.sleep(1)
it takes 1.0 seconds
>>> timer = mmcv.Timer()
>>> time.sleep(0.5)
>>> print(timer.since_start())
0.500
>>> time.sleep(0.5)
>>> print(timer.since_last_check())
0.500
>>> print(timer.since_start())
1.000
"""
def __init__(self, start=True, print_tmpl=None):
self._is_running = False
self.print_tmpl = print_tmpl if print_tmpl else '{:.3f}'
if start:
self.start()
@property
def is_running(self):
"""bool: indicate whether the timer is running"""
return self._is_running
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
print(self.print_tmpl.format(self.since_last_check()))
self._is_running = False
def start(self):
"""Start the timer."""
if not self._is_running:
self._t_start = time()
self._is_running = True
self._t_last = time()
def since_start(self):
"""Total time since the timer is started.
Returns:
float: Time in seconds.
"""
if not self._is_running:
raise TimerError('timer is not running')
self._t_last = time()
return self._t_last - self._t_start
def since_last_check(self):
"""Time since the last checking.
Either :func:`since_start` or :func:`since_last_check` is a checking
operation.
Returns:
float: Time in seconds.
"""
if not self._is_running:
raise TimerError('timer is not running')
dur = time() - self._t_last
self._t_last = time()
return dur
_g_timers = {} # global timers
def check_time(timer_id):
"""Add check points in a single line.
This method is suitable for running a task on a list of items. A timer will
be registered when the method is called for the first time.
Examples:
>>> import time
>>> import mmcv
>>> for i in range(1, 6):
>>> # simulate a code block
>>> time.sleep(i)
>>> mmcv.check_time('task1')
2.000
3.000
4.000
5.000
Args:
str: Timer identifier.
"""
if timer_id not in _g_timers:
_g_timers[timer_id] = Timer()
return 0
else:
return _g_timers[timer_id].since_last_check()
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from .parrots_wrapper import TORCH_VERSION
from .version_utils import digit_version
_torch_version_meshgrid_indexing = (
'parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.10.0a0'))
def torch_meshgrid(*tensors):
"""A wrapper of torch.meshgrid to compat different PyTorch versions.
Since PyTorch 1.10.0a0, torch.meshgrid supports the arguments ``indexing``.
So we implement a wrapper here to avoid warning when using high-version
PyTorch and avoid compatibility issues when using previous versions of
PyTorch.
Args:
tensors (List[Tensor]): List of scalars or 1 dimensional tensors.
Returns:
Sequence[Tensor]: Sequence of meshgrid tensors.
"""
if _torch_version_meshgrid_indexing:
return torch.meshgrid(*tensors, indexing='ij')
else:
return torch.meshgrid(*tensors) # Uses indexing='ij' by default
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
from mmcv.utils import digit_version
def is_jit_tracing() -> bool:
if (torch.__version__ != 'parrots'
and digit_version(torch.__version__) >= digit_version('1.6.0')):
on_trace = torch.jit.is_tracing()
# In PyTorch 1.6, torch.jit.is_tracing has a bug.
# Refers to https://github.com/pytorch/pytorch/issues/42448
if isinstance(on_trace, bool):
return on_trace
else:
return torch._C._is_tracing()
else:
warnings.warn(
'torch.jit.is_tracing is only supported after v1.6.0. '
'Therefore is_tracing returns False automatically. Please '
'set on_trace manually if you are using trace.', UserWarning)
return False
# Copyright (c) OpenMMLab. All rights reserved.
import os
import subprocess
import warnings
from packaging.version import parse
def digit_version(version_str: str, length: int = 4):
"""Convert a version string into a tuple of integers.
This method is usually used for comparing two versions. For pre-release
versions: alpha < beta < rc.
Args:
version_str (str): The version string.
length (int): The maximum number of version levels. Default: 4.
Returns:
tuple[int]: The version info in digits (integers).
"""
assert 'parrots' not in version_str
version = parse(version_str)
assert version.release, f'failed to parse version {version_str}'
release = list(version.release)
release = release[:length]
if len(release) < length:
release = release + [0] * (length - len(release))
if version.is_prerelease:
mapping = {'a': -3, 'b': -2, 'rc': -1}
val = -4
# version.pre can be None
if version.pre:
if version.pre[0] not in mapping:
warnings.warn(f'unknown prerelease version {version.pre[0]}, '
'version checking may go wrong')
else:
val = mapping[version.pre[0]]
release.extend([val, version.pre[-1]])
else:
release.extend([val, 0])
elif version.is_postrelease:
release.extend([1, version.post]) # type: ignore
else:
release.extend([0, 0])
return tuple(release)
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
def get_git_hash(fallback='unknown', digits=None):
"""Get the git hash of the current repo.
Args:
fallback (str, optional): The fallback string when git hash is
unavailable. Defaults to 'unknown'.
digits (int, optional): kept digits of the hash. Defaults to None,
meaning all digits are kept.
Returns:
str: Git commit hash.
"""
if digits is not None and not isinstance(digits, int):
raise TypeError('digits must be None or an integer')
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
if digits is not None:
sha = sha[:digits]
except OSError:
sha = fallback
return sha
......@@ -6,9 +6,8 @@ import cv2
from cv2 import (CAP_PROP_FOURCC, CAP_PROP_FPS, CAP_PROP_FRAME_COUNT,
CAP_PROP_FRAME_HEIGHT, CAP_PROP_FRAME_WIDTH,
CAP_PROP_POS_FRAMES, VideoWriter_fourcc)
from mmcv.utils import (check_file_exist, mkdir_or_exist, scandir,
track_progress)
from mmengine.utils import (check_file_exist, mkdir_or_exist, scandir,
track_progress)
class Cache:
......
......@@ -4,10 +4,10 @@ from typing import Tuple, Union
import cv2
import numpy as np
from mmengine.utils import is_str
from mmcv.arraymisc import dequantize, quantize
from mmcv.image import imread, imwrite
from mmcv.utils import is_str
def flowread(flow_or_path: Union[np.ndarray, str],
......
......@@ -5,7 +5,7 @@ import subprocess
import tempfile
from typing import List, Optional, Union
from mmcv.utils import requires_executable
from mmengine.utils import requires_executable
@requires_executable('ffmpeg')
......
......@@ -3,8 +3,7 @@ from enum import Enum
from typing import Union
import numpy as np
from mmcv.utils import is_str
from mmengine.utils import is_str
class Color(Enum):
......
......@@ -6,6 +6,7 @@ import pytest
import torch
import torch.nn as nn
from mmengine.registry import MODELS
from mmengine.utils.parrots_wrapper import _BatchNorm
from mmcv.cnn.bricks import (build_activation_layer, build_conv_layer,
build_norm_layer, build_padding_layer,
......@@ -13,7 +14,6 @@ from mmcv.cnn.bricks import (build_activation_layer, build_conv_layer,
from mmcv.cnn.bricks.norm import infer_abbr as infer_norm_abbr
from mmcv.cnn.bricks.plugin import infer_abbr as infer_plugin_abbr
from mmcv.cnn.bricks.upsample import PixelShufflePack
from mmcv.utils.parrots_wrapper import _BatchNorm
def test_build_conv_layer():
......
......@@ -6,9 +6,9 @@ import pytest
import torch
import torch.nn as nn
from mmengine.registry import MODELS
from mmengine.utils import TORCH_VERSION, digit_version
from mmcv.cnn.bricks import ConvModule, HSigmoid, HSwish
from mmcv.utils import TORCH_VERSION, digit_version
@MODELS.register_module()
......
# Copyright (c) OpenMMLab. All rights reserved.
import os
import platform
import numpy as np
import pytest
import torch
import torch.distributed as dist
from mmcv.cnn.bricks import ConvModule
from mmcv.cnn.utils import revert_sync_batchnorm
if platform.system() == 'Windows':
import regex as re
else:
import re
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_revert_syncbn():
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
x = torch.randn(1, 3, 10, 10)
# Expect a ValueError prompting that SyncBN is not supported on CPU
with pytest.raises(ValueError):
y = conv(x)
conv = revert_sync_batchnorm(conv)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
def test_revert_mmsyncbn():
if 'SLURM_NTASKS' not in os.environ or int(os.environ['SLURM_NTASKS']) < 2:
print('Must run on slurm with more than 1 process!\n'
'srun -p test --gres=gpu:2 -n2')
return
rank = int(os.environ['SLURM_PROCID'])
world_size = int(os.environ['SLURM_NTASKS'])
local_rank = int(os.environ['SLURM_LOCALID'])
node_list = str(os.environ['SLURM_NODELIST'])
node_parts = re.findall('[0-9]+', node_list)
os.environ['MASTER_ADDR'] = (f'{node_parts[1]}.{node_parts[2]}' +
f'.{node_parts[3]}.{node_parts[4]}')
os.environ['MASTER_PORT'] = '12341'
os.environ['WORLD_SIZE'] = str(world_size)
os.environ['RANK'] = str(rank)
dist.init_process_group('nccl')
torch.cuda.set_device(local_rank)
x = torch.randn(1, 3, 10, 10).cuda()
dist.broadcast(x, src=0)
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='MMSyncBN')).cuda()
conv.eval()
y_mmsyncbn = conv(x).detach().cpu().numpy()
conv = revert_sync_batchnorm(conv)
y_bn = conv(x).detach().cpu().numpy()
assert np.all(np.isclose(y_bn, y_mmsyncbn, 1e-3))
conv, x = conv.to('cpu'), x.to('cpu')
y_bn_cpu = conv(x).detach().numpy()
assert np.all(np.isclose(y_bn, y_bn_cpu, 1e-3))
......@@ -2,8 +2,7 @@
import numpy as np
import pytest
import torch
from mmcv.utils import TORCH_VERSION, digit_version
from mmengine.utils import TORCH_VERSION, digit_version
try:
# If PyTorch version >= 1.6.0 and fp16 is enabled, torch.cuda.amp.autocast
......
......@@ -4,8 +4,7 @@ import os
import numpy
import pytest
import torch
from mmcv.utils import TORCH_VERSION, digit_version
from mmengine.utils import TORCH_VERSION, digit_version
try:
# If PyTorch version >= 1.6.0 and fp16 is enabled, torch.cuda.amp.autocast
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment