Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
MMCV
Commits
fdeee889
Commit
fdeee889
authored
May 25, 2025
by
limm
Browse files
release v1.6.1 of mmcv
parent
df465820
Changes
490
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
958 additions
and
449 deletions
+958
-449
mmcv/runner/hooks/ema.py
mmcv/runner/hooks/ema.py
+9
-7
mmcv/runner/hooks/evaluation.py
mmcv/runner/hooks/evaluation.py
+42
-36
mmcv/runner/hooks/logger/__init__.py
mmcv/runner/hooks/logger/__init__.py
+4
-1
mmcv/runner/hooks/logger/base.py
mmcv/runner/hooks/logger/base.py
+30
-24
mmcv/runner/hooks/logger/clearml.py
mmcv/runner/hooks/logger/clearml.py
+63
-0
mmcv/runner/hooks/logger/dvclive.py
mmcv/runner/hooks/logger/dvclive.py
+39
-28
mmcv/runner/hooks/logger/mlflow.py
mmcv/runner/hooks/logger/mlflow.py
+42
-39
mmcv/runner/hooks/logger/neptune.py
mmcv/runner/hooks/logger/neptune.py
+41
-34
mmcv/runner/hooks/logger/pavi.py
mmcv/runner/hooks/logger/pavi.py
+52
-20
mmcv/runner/hooks/logger/segmind.py
mmcv/runner/hooks/logger/segmind.py
+48
-0
mmcv/runner/hooks/logger/tensorboard.py
mmcv/runner/hooks/logger/tensorboard.py
+23
-11
mmcv/runner/hooks/logger/text.py
mmcv/runner/hooks/logger/text.py
+29
-29
mmcv/runner/hooks/logger/wandb.py
mmcv/runner/hooks/logger/wandb.py
+67
-16
mmcv/runner/hooks/lr_updater.py
mmcv/runner/hooks/lr_updater.py
+185
-101
mmcv/runner/hooks/memory.py
mmcv/runner/hooks/memory.py
+4
-1
mmcv/runner/hooks/momentum_updater.py
mmcv/runner/hooks/momentum_updater.py
+154
-53
mmcv/runner/hooks/optimizer.py
mmcv/runner/hooks/optimizer.py
+87
-32
mmcv/runner/hooks/profiler.py
mmcv/runner/hooks/profiler.py
+11
-1
mmcv/runner/hooks/sync_buffer.py
mmcv/runner/hooks/sync_buffer.py
+1
-1
mmcv/runner/iter_based_runner.py
mmcv/runner/iter_based_runner.py
+27
-15
No files found.
Too many changes to show.
To preserve performance only
490 of 490+
files are displayed.
Plain diff
Email patch
mmcv/runner/hooks/ema.py
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
from
typing
import
Optional
from
...parallel
import
is_module_wrapper
from
..hooks.hook
import
HOOKS
,
Hook
...
...
@@ -13,8 +15,8 @@ class EMAHook(Hook):
.. math::
\text{
Xema\_{t+1}
}
= (1 - \text{momentum}) \times
\text{
Xema\_{t}
}
+ \text{momentum} \times X_t
Xema\_{t+1} = (1 - \text{momentum}) \times
Xema\_{t} + \text{momentum} \times X_t
Args:
momentum (float): The momentum used for updating ema parameter.
...
...
@@ -23,14 +25,14 @@ class EMAHook(Hook):
Defaults to 1.
warm_up (int): During first warm_up steps, we may use smaller momentum
to update ema parameters more slowly. Defaults to 100.
resume_from (str): The checkpoint path. Defaults to None.
resume_from (str
, optional
): The checkpoint path. Defaults to None.
"""
def
__init__
(
self
,
momentum
=
0.0002
,
interval
=
1
,
warm_up
=
100
,
resume_from
=
None
):
momentum
:
float
=
0.0002
,
interval
:
int
=
1
,
warm_up
:
int
=
100
,
resume_from
:
Optional
[
str
]
=
None
):
assert
isinstance
(
interval
,
int
)
and
interval
>
0
self
.
warm_up
=
warm_up
self
.
interval
=
interval
...
...
mmcv/runner/hooks/evaluation.py
View file @
fdeee889
...
...
@@ -2,6 +2,7 @@
import
os.path
as
osp
import
warnings
from
math
import
inf
from
typing
import
Callable
,
List
,
Optional
import
torch.distributed
as
dist
from
torch.nn.modules.batchnorm
import
_BatchNorm
...
...
@@ -65,7 +66,7 @@ class EvalHook(Hook):
**eval_kwargs: Evaluation arguments fed into the evaluate function of
the dataset.
Note
s
:
Note:
If new arguments are added for EvalHook, tools/test.py,
tools/eval_metric.py may be affected.
"""
...
...
@@ -83,17 +84,17 @@ class EvalHook(Hook):
_default_less_keys
=
[
'loss'
]
def
__init__
(
self
,
dataloader
,
start
=
None
,
interval
=
1
,
by_epoch
=
True
,
save_best
=
None
,
rule
=
None
,
test_fn
=
None
,
greater_keys
=
None
,
less_keys
=
None
,
out_dir
=
None
,
file_client_args
=
None
,
dataloader
:
DataLoader
,
start
:
Optional
[
int
]
=
None
,
interval
:
int
=
1
,
by_epoch
:
bool
=
True
,
save_best
:
Optional
[
str
]
=
None
,
rule
:
Optional
[
str
]
=
None
,
test_fn
:
Optional
[
Callable
]
=
None
,
greater_keys
:
Optional
[
List
[
str
]]
=
None
,
less_keys
:
Optional
[
List
[
str
]]
=
None
,
out_dir
:
Optional
[
str
]
=
None
,
file_client_args
:
Optional
[
dict
]
=
None
,
**
eval_kwargs
):
if
not
isinstance
(
dataloader
,
DataLoader
):
raise
TypeError
(
f
'dataloader must be a pytorch DataLoader, '
...
...
@@ -131,6 +132,7 @@ class EvalHook(Hook):
self
.
greater_keys
=
self
.
_default_greater_keys
else
:
if
not
isinstance
(
greater_keys
,
(
list
,
tuple
)):
assert
isinstance
(
greater_keys
,
str
)
greater_keys
=
(
greater_keys
,
)
assert
is_seq_of
(
greater_keys
,
str
)
self
.
greater_keys
=
greater_keys
...
...
@@ -139,6 +141,7 @@ class EvalHook(Hook):
self
.
less_keys
=
self
.
_default_less_keys
else
:
if
not
isinstance
(
less_keys
,
(
list
,
tuple
)):
assert
isinstance
(
greater_keys
,
str
)
less_keys
=
(
less_keys
,
)
assert
is_seq_of
(
less_keys
,
str
)
self
.
less_keys
=
less_keys
...
...
@@ -150,7 +153,7 @@ class EvalHook(Hook):
self
.
out_dir
=
out_dir
self
.
file_client_args
=
file_client_args
def
_init_rule
(
self
,
rule
,
key_indicator
):
def
_init_rule
(
self
,
rule
:
Optional
[
str
]
,
key_indicator
:
str
):
"""Initialize rule, key_indicator, comparison_func, and best score.
Here is the rule to determine which rule is used for key indicator
...
...
@@ -160,10 +163,10 @@ class EvalHook(Hook):
specified as 'greater'.
2. Or if the key indicator is in ``self.less_keys``, the rule will be
specified as 'less'.
3. Or if
the key indicator is equal to the substring in any one item
in ``self.greater_keys``
, the rule will be specified as 'greater'.
4. Or if
the key indicator is equal to the substring in any one item
in ``self.less_keys``
, the rule will be specified as 'less'.
3. Or if
any one item in ``self.greater_keys`` is a substring of
key_indicator
, the rule will be specified as 'greater'.
4. Or if
any one item in ``self.less_keys`` is a substring of
key_indicator
, the rule will be specified as 'less'.
Args:
rule (str | None): Comparison rule for best score.
...
...
@@ -178,6 +181,7 @@ class EvalHook(Hook):
if
key_indicator
!=
'auto'
:
# `_lc` here means we use the lower case of keys for
# case-insensitive matching
assert
isinstance
(
key_indicator
,
str
)
key_indicator_lc
=
key_indicator
.
lower
()
greater_keys
=
[
key
.
lower
()
for
key
in
self
.
greater_keys
]
less_keys
=
[
key
.
lower
()
for
key
in
self
.
less_keys
]
...
...
@@ -214,8 +218,8 @@ class EvalHook(Hook):
basename
=
osp
.
basename
(
runner
.
work_dir
.
rstrip
(
osp
.
sep
))
self
.
out_dir
=
self
.
file_client
.
join_path
(
self
.
out_dir
,
basename
)
runner
.
logger
.
info
(
(
f
'The best checkpoint will be saved to
{
self
.
out_dir
}
by '
f
'
{
self
.
file_client
.
name
}
'
)
)
f
'The best checkpoint will be saved to
{
self
.
out_dir
}
by '
f
'
{
self
.
file_client
.
name
}
'
)
if
self
.
save_best
is
not
None
:
if
runner
.
meta
is
None
:
...
...
@@ -335,8 +339,8 @@ class EvalHook(Hook):
self
.
best_ckpt_path
):
self
.
file_client
.
remove
(
self
.
best_ckpt_path
)
runner
.
logger
.
info
(
(
f
'The previous best checkpoint
{
self
.
best_ckpt_path
}
was '
'removed'
)
)
f
'The previous best checkpoint
{
self
.
best_ckpt_path
}
was '
'removed'
)
best_ckpt_name
=
f
'best_
{
self
.
key_indicator
}
_
{
current
}
.pth'
self
.
best_ckpt_path
=
self
.
file_client
.
join_path
(
...
...
@@ -344,7 +348,9 @@ class EvalHook(Hook):
runner
.
meta
[
'hook_msgs'
][
'best_ckpt'
]
=
self
.
best_ckpt_path
runner
.
save_checkpoint
(
self
.
out_dir
,
best_ckpt_name
,
create_symlink
=
False
)
self
.
out_dir
,
filename_tmpl
=
best_ckpt_name
,
create_symlink
=
False
)
runner
.
logger
.
info
(
f
'Now best checkpoint is saved as
{
best_ckpt_name
}
.'
)
runner
.
logger
.
info
(
...
...
@@ -437,20 +443,20 @@ class DistEvalHook(EvalHook):
"""
def
__init__
(
self
,
dataloader
,
start
=
None
,
interval
=
1
,
by_epoch
=
True
,
save_best
=
None
,
rule
=
None
,
test_fn
=
None
,
greater_keys
=
None
,
less_keys
=
None
,
broadcast_bn_buffer
=
True
,
tmpdir
=
None
,
gpu_collect
=
False
,
out_dir
=
None
,
file_client_args
=
None
,
dataloader
:
DataLoader
,
start
:
Optional
[
int
]
=
None
,
interval
:
int
=
1
,
by_epoch
:
bool
=
True
,
save_best
:
Optional
[
str
]
=
None
,
rule
:
Optional
[
str
]
=
None
,
test_fn
:
Optional
[
Callable
]
=
None
,
greater_keys
:
Optional
[
List
[
str
]]
=
None
,
less_keys
:
Optional
[
List
[
str
]]
=
None
,
broadcast_bn_buffer
:
bool
=
True
,
tmpdir
:
Optional
[
str
]
=
None
,
gpu_collect
:
bool
=
False
,
out_dir
:
Optional
[
str
]
=
None
,
file_client_args
:
Optional
[
dict
]
=
None
,
**
eval_kwargs
):
if
test_fn
is
None
:
...
...
mmcv/runner/hooks/logger/__init__.py
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
from
.base
import
LoggerHook
from
.clearml
import
ClearMLLoggerHook
from
.dvclive
import
DvcliveLoggerHook
from
.mlflow
import
MlflowLoggerHook
from
.neptune
import
NeptuneLoggerHook
from
.pavi
import
PaviLoggerHook
from
.segmind
import
SegmindLoggerHook
from
.tensorboard
import
TensorboardLoggerHook
from
.text
import
TextLoggerHook
from
.wandb
import
WandbLoggerHook
...
...
@@ -11,5 +13,6 @@ from .wandb import WandbLoggerHook
__all__
=
[
'LoggerHook'
,
'MlflowLoggerHook'
,
'PaviLoggerHook'
,
'TensorboardLoggerHook'
,
'TextLoggerHook'
,
'WandbLoggerHook'
,
'NeptuneLoggerHook'
,
'DvcliveLoggerHook'
'NeptuneLoggerHook'
,
'DvcliveLoggerHook'
,
'SegmindLoggerHook'
,
'ClearMLLoggerHook'
]
mmcv/runner/hooks/logger/base.py
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
import
numbers
from
abc
import
ABCMeta
,
abstractmethod
from
typing
import
Dict
import
numpy
as
np
import
torch
...
...
@@ -12,20 +13,21 @@ class LoggerHook(Hook):
"""Base class for logger hooks.
Args:
interval (int): Logging interval (every k iterations).
interval (int): Logging interval (every k iterations).
Default 10.
ignore_last (bool): Ignore the log of last iterations in each epoch
if less than `interval`.
if less than `interval`.
Default True.
reset_flag (bool): Whether to clear the output buffer after logging.
by_epoch (bool): Whether EpochBasedRunner is used.
Default False.
by_epoch (bool): Whether EpochBasedRunner is used. Default True.
"""
__metaclass__
=
ABCMeta
def
__init__
(
self
,
interval
=
10
,
ignore_last
=
True
,
reset_flag
=
False
,
by_epoch
=
True
):
interval
:
int
=
10
,
ignore_last
:
bool
=
True
,
reset_flag
:
bool
=
False
,
by_epoch
:
bool
=
True
):
self
.
interval
=
interval
self
.
ignore_last
=
ignore_last
self
.
reset_flag
=
reset_flag
...
...
@@ -36,7 +38,9 @@ class LoggerHook(Hook):
pass
@
staticmethod
def
is_scalar
(
val
,
include_np
=
True
,
include_torch
=
True
):
def
is_scalar
(
val
,
include_np
:
bool
=
True
,
include_torch
:
bool
=
True
)
->
bool
:
"""Tell the input variable is a scalar or not.
Args:
...
...
@@ -56,7 +60,7 @@ class LoggerHook(Hook):
else
:
return
False
def
get_mode
(
self
,
runner
):
def
get_mode
(
self
,
runner
)
->
str
:
if
runner
.
mode
==
'train'
:
if
'time'
in
runner
.
log_buffer
.
output
:
mode
=
'train'
...
...
@@ -69,7 +73,7 @@ class LoggerHook(Hook):
f
'but got
{
runner
.
mode
}
'
)
return
mode
def
get_epoch
(
self
,
runner
):
def
get_epoch
(
self
,
runner
)
->
int
:
if
runner
.
mode
==
'train'
:
epoch
=
runner
.
epoch
+
1
elif
runner
.
mode
==
'val'
:
...
...
@@ -81,7 +85,7 @@ class LoggerHook(Hook):
f
'but got
{
runner
.
mode
}
'
)
return
epoch
def
get_iter
(
self
,
runner
,
inner_iter
=
False
)
:
def
get_iter
(
self
,
runner
,
inner_iter
:
bool
=
False
)
->
int
:
"""Get the current training iteration step."""
if
self
.
by_epoch
and
inner_iter
:
current_iter
=
runner
.
inner_iter
+
1
...
...
@@ -89,7 +93,7 @@ class LoggerHook(Hook):
current_iter
=
runner
.
iter
+
1
return
current_iter
def
get_lr_tags
(
self
,
runner
):
def
get_lr_tags
(
self
,
runner
)
->
Dict
[
str
,
float
]
:
tags
=
{}
lrs
=
runner
.
current_lr
()
if
isinstance
(
lrs
,
dict
):
...
...
@@ -99,7 +103,7 @@ class LoggerHook(Hook):
tags
[
'learning_rate'
]
=
lrs
[
0
]
return
tags
def
get_momentum_tags
(
self
,
runner
):
def
get_momentum_tags
(
self
,
runner
)
->
Dict
[
str
,
float
]
:
tags
=
{}
momentums
=
runner
.
current_momentum
()
if
isinstance
(
momentums
,
dict
):
...
...
@@ -109,12 +113,14 @@ class LoggerHook(Hook):
tags
[
'momentum'
]
=
momentums
[
0
]
return
tags
def
get_loggable_tags
(
self
,
def
get_loggable_tags
(
self
,
runner
,
allow_scalar
=
True
,
allow_text
=
False
,
add_mode
=
True
,
tags_to_skip
=
(
'time'
,
'data_time'
)):
allow_scalar
:
bool
=
True
,
allow_text
:
bool
=
False
,
add_mode
:
bool
=
True
,
tags_to_skip
:
tuple
=
(
'time'
,
'data_time'
)
)
->
Dict
:
tags
=
{}
for
var
,
val
in
runner
.
log_buffer
.
output
.
items
():
if
var
in
tags_to_skip
:
...
...
@@ -130,16 +136,16 @@ class LoggerHook(Hook):
tags
.
update
(
self
.
get_momentum_tags
(
runner
))
return
tags
def
before_run
(
self
,
runner
):
def
before_run
(
self
,
runner
)
->
None
:
for
hook
in
runner
.
hooks
[::
-
1
]:
if
isinstance
(
hook
,
LoggerHook
):
hook
.
reset_flag
=
True
break
def
before_epoch
(
self
,
runner
):
def
before_epoch
(
self
,
runner
)
->
None
:
runner
.
log_buffer
.
clear
()
# clear logs of last epoch
def
after_train_iter
(
self
,
runner
):
def
after_train_iter
(
self
,
runner
)
->
None
:
if
self
.
by_epoch
and
self
.
every_n_inner_iters
(
runner
,
self
.
interval
):
runner
.
log_buffer
.
average
(
self
.
interval
)
elif
not
self
.
by_epoch
and
self
.
every_n_iters
(
runner
,
self
.
interval
):
...
...
@@ -153,13 +159,13 @@ class LoggerHook(Hook):
if
self
.
reset_flag
:
runner
.
log_buffer
.
clear_output
()
def
after_train_epoch
(
self
,
runner
):
def
after_train_epoch
(
self
,
runner
)
->
None
:
if
runner
.
log_buffer
.
ready
:
self
.
log
(
runner
)
if
self
.
reset_flag
:
runner
.
log_buffer
.
clear_output
()
def
after_val_epoch
(
self
,
runner
):
def
after_val_epoch
(
self
,
runner
)
->
None
:
runner
.
log_buffer
.
average
()
self
.
log
(
runner
)
if
self
.
reset_flag
:
...
...
mmcv/runner/hooks/logger/clearml.py
0 → 100644
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
from
typing
import
Dict
,
Optional
from
...dist_utils
import
master_only
from
..hook
import
HOOKS
from
.base
import
LoggerHook
@
HOOKS
.
register_module
()
class
ClearMLLoggerHook
(
LoggerHook
):
"""Class to log metrics with clearml.
It requires `clearml`_ to be installed.
Args:
init_kwargs (dict): A dict contains the `clearml.Task.init`
initialization keys. See `taskinit`_ for more details.
interval (int): Logging interval (every k iterations). Default 10.
ignore_last (bool): Ignore the log of last iterations in each epoch
if less than `interval`. Default: True.
reset_flag (bool): Whether to clear the output buffer after logging.
Default: False.
by_epoch (bool): Whether EpochBasedRunner is used. Default: True.
.. _clearml:
https://clear.ml/docs/latest/docs/
.. _taskinit:
https://clear.ml/docs/latest/docs/references/sdk/task/#taskinit
"""
def
__init__
(
self
,
init_kwargs
:
Optional
[
Dict
]
=
None
,
interval
:
int
=
10
,
ignore_last
:
bool
=
True
,
reset_flag
:
bool
=
False
,
by_epoch
:
bool
=
True
):
super
().
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
self
.
import_clearml
()
self
.
init_kwargs
=
init_kwargs
def
import_clearml
(
self
):
try
:
import
clearml
except
ImportError
:
raise
ImportError
(
'Please run "pip install clearml" to install clearml'
)
self
.
clearml
=
clearml
@
master_only
def
before_run
(
self
,
runner
)
->
None
:
super
().
before_run
(
runner
)
task_kwargs
=
self
.
init_kwargs
if
self
.
init_kwargs
else
{}
self
.
task
=
self
.
clearml
.
Task
.
init
(
**
task_kwargs
)
self
.
task_logger
=
self
.
task
.
get_logger
()
@
master_only
def
log
(
self
,
runner
)
->
None
:
tags
=
self
.
get_loggable_tags
(
runner
)
for
tag
,
val
in
tags
.
items
():
self
.
task_logger
.
report_scalar
(
tag
,
tag
,
val
,
self
.
get_iter
(
runner
))
mmcv/runner/hooks/logger/dvclive.py
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
from
pathlib
import
Path
from
typing
import
Optional
from
...dist_utils
import
master_only
from
..hook
import
HOOKS
from
.base
import
LoggerHook
...
...
@@ -11,48 +14,56 @@ class DvcliveLoggerHook(LoggerHook):
It requires `dvclive`_ to be installed.
Args:
path (str): Directory where dvclive will write TSV log files.
interval (int): Logging interval (every k iterations)
.
Default 10.
model_file (str): Default None. If not None, after each epoch the
model will be saved to {model_file}
.
interval (int): Logging interval (every k iterations).
Default 10.
ignore_last (bool): Ignore the log of last iterations in each epoch
if less than `interval`.
Default: True.
if less than `interval`. Default: True.
reset_flag (bool): Whether to clear the output buffer after logging.
Default:
Tru
e.
by_epoch (bool): Whether EpochBasedRunner is used.
Default: True
.
Default:
Fals
e.
by_epoch (bool): Whether EpochBasedRunner is used.
Default: True.
kwargs: Arguments for instantiating `Live`_
.
.. _dvclive:
https://dvc.org/doc/dvclive
.. _Live:
https://dvc.org/doc/dvclive/api-reference/live#parameters
"""
def
__init__
(
self
,
path
,
interval
=
10
,
ignore_last
=
True
,
reset_flag
=
True
,
by_epoch
=
True
):
super
(
DvcliveLoggerHook
,
self
).
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
self
.
path
=
path
self
.
import_dvclive
()
def
import_dvclive
(
self
):
model_file
:
Optional
[
str
]
=
None
,
interval
:
int
=
10
,
ignore_last
:
bool
=
True
,
reset_flag
:
bool
=
False
,
by_epoch
:
bool
=
True
,
**
kwargs
):
super
().
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
self
.
model_file
=
model_file
self
.
import_dvclive
(
**
kwargs
)
def
import_dvclive
(
self
,
**
kwargs
)
->
None
:
try
:
import
dvcl
ive
from
dvclive
import
L
ive
except
ImportError
:
raise
ImportError
(
'Please run "pip install dvclive" to install dvclive'
)
self
.
dvclive
=
dvclive
self
.
dvclive
=
Live
(
**
kwargs
)
@
master_only
def
before_run
(
self
,
runner
):
self
.
dvclive
.
init
(
self
.
path
)
@
master_only
def
log
(
self
,
runner
):
def
log
(
self
,
runner
)
->
None
:
tags
=
self
.
get_loggable_tags
(
runner
)
if
tags
:
self
.
dvclive
.
set_step
(
self
.
get_iter
(
runner
))
for
k
,
v
in
tags
.
items
():
self
.
dvclive
.
log
(
k
,
v
,
step
=
self
.
get_iter
(
runner
))
self
.
dvclive
.
log
(
k
,
v
)
@
master_only
def
after_train_epoch
(
self
,
runner
)
->
None
:
super
().
after_train_epoch
(
runner
)
if
self
.
model_file
is
not
None
:
runner
.
save_checkpoint
(
Path
(
self
.
model_file
).
parent
,
filename_tmpl
=
Path
(
self
.
model_file
).
name
,
create_symlink
=
False
,
)
mmcv/runner/hooks/logger/mlflow.py
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
from
typing
import
Dict
,
Optional
from
mmcv.utils
import
TORCH_VERSION
from
...dist_utils
import
master_only
from
..hook
import
HOOKS
from
.base
import
LoggerHook
...
...
@@ -6,49 +9,46 @@ from .base import LoggerHook
@
HOOKS
.
register_module
()
class
MlflowLoggerHook
(
LoggerHook
):
def
__init__
(
self
,
exp_name
=
None
,
tags
=
None
,
log_model
=
True
,
interval
=
10
,
ignore_last
=
True
,
reset_flag
=
False
,
by_epoch
=
True
):
"""Class to log metrics and (optionally) a trained model to MLflow.
It requires `MLflow`_ to be installed.
Args:
exp_name (str, optional): Name of the experiment to be used.
Default None.
If not None, set the active experiment.
Default None. If not None, set the active experiment.
If experiment does not exist, an experiment with provided name
will be created.
tags (dict of str: str, optional): Tags for the current run.
Default None.
If not None, set tags for the current run.
tags (Dict[str], optional): Tags for the current run.
Default None. If not None, set tags for the current run.
log_model (bool, optional): Whether to log an MLflow artifact.
Default True.
If True, log runner.model as an MLflow artifact
Default True. If True, log runner.model as an MLflow artifact
for the current run.
interval (int): Logging interval (every k iterations).
interval (int): Logging interval (every k iterations).
Default: 10.
ignore_last (bool): Ignore the log of last iterations in each epoch
if less than `interval`.
reset_flag (bool): Whether to clear the output buffer after logging
by_epoch (bool): Whether EpochBasedRunner is used.
if less than `interval`. Default: True.
reset_flag (bool): Whether to clear the output buffer after logging.
Default: False.
by_epoch (bool): Whether EpochBasedRunner is used. Default: True.
.. _MLflow:
https://www.mlflow.org/docs/latest/index.html
"""
super
(
MlflowLoggerHook
,
self
).
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
def
__init__
(
self
,
exp_name
:
Optional
[
str
]
=
None
,
tags
:
Optional
[
Dict
]
=
None
,
log_model
:
bool
=
True
,
interval
:
int
=
10
,
ignore_last
:
bool
=
True
,
reset_flag
:
bool
=
False
,
by_epoch
:
bool
=
True
):
super
().
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
self
.
import_mlflow
()
self
.
exp_name
=
exp_name
self
.
tags
=
tags
self
.
log_model
=
log_model
def
import_mlflow
(
self
):
def
import_mlflow
(
self
)
->
None
:
try
:
import
mlflow
import
mlflow.pytorch
as
mlflow_pytorch
...
...
@@ -59,20 +59,23 @@ class MlflowLoggerHook(LoggerHook):
self
.
mlflow_pytorch
=
mlflow_pytorch
@
master_only
def
before_run
(
self
,
runner
):
super
(
MlflowLoggerHook
,
self
).
before_run
(
runner
)
def
before_run
(
self
,
runner
)
->
None
:
super
().
before_run
(
runner
)
if
self
.
exp_name
is
not
None
:
self
.
mlflow
.
set_experiment
(
self
.
exp_name
)
if
self
.
tags
is
not
None
:
self
.
mlflow
.
set_tags
(
self
.
tags
)
@
master_only
def
log
(
self
,
runner
):
def
log
(
self
,
runner
)
->
None
:
tags
=
self
.
get_loggable_tags
(
runner
)
if
tags
:
self
.
mlflow
.
log_metrics
(
tags
,
step
=
self
.
get_iter
(
runner
))
@
master_only
def
after_run
(
self
,
runner
):
def
after_run
(
self
,
runner
)
->
None
:
if
self
.
log_model
:
self
.
mlflow_pytorch
.
log_model
(
runner
.
model
,
'models'
)
self
.
mlflow_pytorch
.
log_model
(
runner
.
model
,
'models'
,
pip_requirements
=
[
f
'torch==
{
TORCH_VERSION
}
'
])
mmcv/runner/hooks/logger/neptune.py
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
from
typing
import
Dict
,
Optional
from
...dist_utils
import
master_only
from
..hook
import
HOOKS
from
.base
import
LoggerHook
...
...
@@ -8,48 +10,53 @@ from .base import LoggerHook
class
NeptuneLoggerHook
(
LoggerHook
):
"""Class to log metrics to NeptuneAI.
It requires `
n
eptune
-client
` to be installed.
It requires `
N
eptune`
_
to be installed.
Args:
init_kwargs (dict): a dict contains the initialization keys as below:
- project (str): Name of a project in a form of
namespace/project_name. If None, the value of
NEPTUNE_PROJECT environment variable will be taken.
- api_token (str): User’s API token.
If None, the value of NEPTUNE_API_TOKEN environment
variable will be taken. Note: It is strongly recommended
to use NEPTUNE_API_TOKEN environment variable rather than
placing your API token in plain text in your source code.
- name (str, optional, default is 'Untitled'): Editable name of
the run. Name is displayed in the run's Details and in
Runs table as a column.
Check https://docs.neptune.ai/api-reference/neptune#init for
more init arguments.
interval (int): Logging interval (every k iterations).
namespace/project_name. If None, the value of NEPTUNE_PROJECT
environment variable will be taken.
- api_token (str): User’s API token. If None, the value of
NEPTUNE_API_TOKEN environment variable will be taken. Note: It is
strongly recommended to use NEPTUNE_API_TOKEN environment
variable rather than placing your API token in plain text in your
source code.
- name (str, optional, default is 'Untitled'): Editable name of the
run. Name is displayed in the run's Details and in Runs table as
a column.
Check https://docs.neptune.ai/api-reference/neptune#init for more
init arguments.
interval (int): Logging interval (every k iterations). Default: 10.
ignore_last (bool): Ignore the log of last iterations in each epoch
if less than `interval`.
reset_flag (bool): Whether to clear the output buffer after logging
by_epoch (bool): Whether EpochBasedRunner is used.
if less than ``interval``. Default: True.
reset_flag (bool): Whether to clear the output buffer after logging.
Default: True.
with_step (bool): If True, the step will be logged from
``self.get_iters``. Otherwise, step will not be logged.
Default: True.
by_epoch (bool): Whether EpochBasedRunner is used. Default: True.
.. _Neptune
AI
:
https://docs.neptune.ai
/you-should-know/logging-metadata
.. _Neptune:
https://docs.neptune.ai
"""
def
__init__
(
self
,
init_kwargs
=
None
,
interval
=
10
,
ignore_last
=
True
,
reset_flag
=
True
,
with_step
=
True
,
by_epoch
=
True
):
init_kwargs
:
Optional
[
Dict
]
=
None
,
interval
:
int
=
10
,
ignore_last
:
bool
=
True
,
reset_flag
:
bool
=
True
,
with_step
:
bool
=
True
,
by_epoch
:
bool
=
True
):
super
(
NeptuneLoggerHook
,
self
).
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
super
().
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
self
.
import_neptune
()
self
.
init_kwargs
=
init_kwargs
self
.
with_step
=
with_step
def
import_neptune
(
self
):
def
import_neptune
(
self
)
->
None
:
try
:
import
neptune.new
as
neptune
except
ImportError
:
...
...
@@ -59,24 +66,24 @@ class NeptuneLoggerHook(LoggerHook):
self
.
run
=
None
@
master_only
def
before_run
(
self
,
runner
):
def
before_run
(
self
,
runner
)
->
None
:
if
self
.
init_kwargs
:
self
.
run
=
self
.
neptune
.
init
(
**
self
.
init_kwargs
)
else
:
self
.
run
=
self
.
neptune
.
init
()
@
master_only
def
log
(
self
,
runner
):
def
log
(
self
,
runner
)
->
None
:
tags
=
self
.
get_loggable_tags
(
runner
)
if
tags
:
for
tag_name
,
tag_value
in
tags
.
items
():
if
self
.
with_step
:
self
.
run
[
tag_name
].
log
(
self
.
run
[
tag_name
].
log
(
# type: ignore
tag_value
,
step
=
self
.
get_iter
(
runner
))
else
:
tags
[
'global_step'
]
=
self
.
get_iter
(
runner
)
self
.
run
[
tag_name
].
log
(
tags
)
self
.
run
[
tag_name
].
log
(
tags
)
# type: ignore
@
master_only
def
after_run
(
self
,
runner
):
self
.
run
.
stop
()
def
after_run
(
self
,
runner
)
->
None
:
self
.
run
.
stop
()
# type: ignore
mmcv/runner/hooks/logger/pavi.py
View file @
fdeee889
...
...
@@ -2,6 +2,7 @@
import
json
import
os
import
os.path
as
osp
from
typing
import
Dict
,
Optional
import
torch
import
yaml
...
...
@@ -15,37 +16,68 @@ from .base import LoggerHook
@
HOOKS
.
register_module
()
class
PaviLoggerHook
(
LoggerHook
):
"""Class to visual model, log metrics (for internal use).
Args:
init_kwargs (dict): A dict contains the initialization keys as below:
- name (str, optional): Custom training name. Defaults to None,
which means current work_dir.
- project (str, optional): Project name. Defaults to "default".
- model (str, optional): Training model name. Defaults to current
model.
- session_text (str, optional): Session string in YAML format.
Defaults to current config.
- training_id (int, optional): Training ID in PAVI, if you want to
use an existing training. Defaults to None.
- compare_id (int, optional): Compare ID in PAVI, if you want to
add the task to an existing compare. Defaults to None.
- overwrite_last_training (bool, optional): Whether to upload data
to the training with the same name in the same project, rather
than creating a new one. Defaults to False.
add_graph (bool): Whether to visual model. Default: False.
add_last_ckpt (bool): Whether to save checkpoint after run.
Default: False.
interval (int): Logging interval (every k iterations). Default: True.
ignore_last (bool): Ignore the log of last iterations in each epoch
if less than `interval`. Default: True.
reset_flag (bool): Whether to clear the output buffer after logging.
Default: False.
by_epoch (bool): Whether EpochBasedRunner is used. Default: True.
img_key (string): Get image data from Dataset. Default: 'img_info'.
"""
def
__init__
(
self
,
init_kwargs
=
None
,
add_graph
=
False
,
add_last_ckpt
=
False
,
interval
=
10
,
ignore_last
=
True
,
reset_flag
=
False
,
by_epoch
=
True
,
img_key
=
'img_info'
):
super
(
PaviLoggerHook
,
self
).
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
init_kwargs
:
Optional
[
Dict
]
=
None
,
add_graph
:
bool
=
False
,
add_last_ckpt
:
bool
=
False
,
interval
:
int
=
10
,
ignore_last
:
bool
=
True
,
reset_flag
:
bool
=
False
,
by_epoch
:
bool
=
True
,
img_key
:
str
=
'img_info'
):
super
().
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
self
.
init_kwargs
=
init_kwargs
self
.
add_graph
=
add_graph
self
.
add_last_ckpt
=
add_last_ckpt
self
.
img_key
=
img_key
@
master_only
def
before_run
(
self
,
runner
):
super
(
PaviLoggerHook
,
self
).
before_run
(
runner
)
def
before_run
(
self
,
runner
)
->
None
:
super
().
before_run
(
runner
)
try
:
from
pavi
import
SummaryWriter
except
ImportError
:
raise
ImportError
(
'Please run "pip install pavi" to install pavi.'
)
raise
ImportError
(
'No module named pavi, please contact pavi team or visit'
'document for pavi installation instructions.'
)
self
.
run_name
=
runner
.
work_dir
.
split
(
'/'
)[
-
1
]
if
not
self
.
init_kwargs
:
self
.
init_kwargs
=
dict
()
self
.
init_kwargs
[
'name'
]
=
self
.
run_name
self
.
init_kwargs
[
'model'
]
=
runner
.
_model_name
self
.
init_kwargs
.
setdefault
(
'name'
,
self
.
run_name
)
self
.
init_kwargs
.
setdefault
(
'model'
,
runner
.
_model_name
)
if
runner
.
meta
is
not
None
:
if
'config_dict'
in
runner
.
meta
:
config_dict
=
runner
.
meta
[
'config_dict'
]
...
...
@@ -68,10 +100,10 @@ class PaviLoggerHook(LoggerHook):
config_dict
=
json
.
loads
(
mmcv
.
dump
(
config_dict
,
file_format
=
'json'
))
session_text
=
yaml
.
dump
(
config_dict
)
self
.
init_kwargs
[
'session_text'
]
=
session_text
self
.
init_kwargs
.
setdefault
(
'session_text'
,
session_text
)
self
.
writer
=
SummaryWriter
(
**
self
.
init_kwargs
)
def
get_step
(
self
,
runner
):
def
get_step
(
self
,
runner
)
->
int
:
"""Get the total training step/epoch."""
if
self
.
get_mode
(
runner
)
==
'val'
and
self
.
by_epoch
:
return
self
.
get_epoch
(
runner
)
...
...
@@ -79,14 +111,14 @@ class PaviLoggerHook(LoggerHook):
return
self
.
get_iter
(
runner
)
@
master_only
def
log
(
self
,
runner
):
def
log
(
self
,
runner
)
->
None
:
tags
=
self
.
get_loggable_tags
(
runner
,
add_mode
=
False
)
if
tags
:
self
.
writer
.
add_scalars
(
self
.
get_mode
(
runner
),
tags
,
self
.
get_step
(
runner
))
@
master_only
def
after_run
(
self
,
runner
):
def
after_run
(
self
,
runner
)
->
None
:
if
self
.
add_last_ckpt
:
ckpt_path
=
osp
.
join
(
runner
.
work_dir
,
'latest.pth'
)
if
osp
.
islink
(
ckpt_path
):
...
...
@@ -104,7 +136,7 @@ class PaviLoggerHook(LoggerHook):
self
.
writer
.
close
()
@
master_only
def
before_epoch
(
self
,
runner
):
def
before_epoch
(
self
,
runner
)
->
None
:
if
runner
.
epoch
==
0
and
self
.
add_graph
:
if
is_module_wrapper
(
runner
.
model
):
_model
=
runner
.
model
.
module
...
...
mmcv/runner/hooks/logger/segmind.py
0 → 100644
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
from
...dist_utils
import
master_only
from
..hook
import
HOOKS
from
.base
import
LoggerHook
@
HOOKS
.
register_module
()
class
SegmindLoggerHook
(
LoggerHook
):
"""Class to log metrics to Segmind.
It requires `Segmind`_ to be installed.
Args:
interval (int): Logging interval (every k iterations). Default: 10.
ignore_last (bool): Ignore the log of last iterations in each epoch
if less than `interval`. Default True.
reset_flag (bool): Whether to clear the output buffer after logging.
Default False.
by_epoch (bool): Whether EpochBasedRunner is used. Default True.
.. _Segmind:
https://docs.segmind.com/python-library
"""
def
__init__
(
self
,
interval
:
int
=
10
,
ignore_last
:
bool
=
True
,
reset_flag
:
bool
=
False
,
by_epoch
=
True
):
super
().
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
self
.
import_segmind
()
def
import_segmind
(
self
)
->
None
:
try
:
import
segmind
except
ImportError
:
raise
ImportError
(
"Please run 'pip install segmind' to install segmind"
)
self
.
log_metrics
=
segmind
.
tracking
.
fluent
.
log_metrics
self
.
mlflow_log
=
segmind
.
utils
.
logging_utils
.
try_mlflow_log
@
master_only
def
log
(
self
,
runner
)
->
None
:
tags
=
self
.
get_loggable_tags
(
runner
)
if
tags
:
# logging metrics to segmind
self
.
mlflow_log
(
self
.
log_metrics
,
tags
,
step
=
runner
.
epoch
,
epoch
=
runner
.
epoch
)
mmcv/runner/hooks/logger/tensorboard.py
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
import
os.path
as
osp
from
typing
import
Optional
from
mmcv.utils
import
TORCH_VERSION
,
digit_version
from
...dist_utils
import
master_only
...
...
@@ -9,20 +10,31 @@ from .base import LoggerHook
@
HOOKS
.
register_module
()
class
TensorboardLoggerHook
(
LoggerHook
):
"""Class to log metrics to Tensorboard.
Args:
log_dir (string): Save directory location. Default: None. If default
values are used, directory location is ``runner.work_dir``/tf_logs.
interval (int): Logging interval (every k iterations). Default: True.
ignore_last (bool): Ignore the log of last iterations in each epoch
if less than `interval`. Default: True.
reset_flag (bool): Whether to clear the output buffer after logging.
Default: False.
by_epoch (bool): Whether EpochBasedRunner is used. Default: True.
"""
def
__init__
(
self
,
log_dir
=
None
,
interval
=
10
,
ignore_last
=
True
,
reset_flag
=
False
,
by_epoch
=
True
):
super
(
TensorboardLoggerHook
,
self
).
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
log_dir
:
Optional
[
str
]
=
None
,
interval
:
int
=
10
,
ignore_last
:
bool
=
True
,
reset_flag
:
bool
=
False
,
by_epoch
:
bool
=
True
):
super
().
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
self
.
log_dir
=
log_dir
@
master_only
def
before_run
(
self
,
runner
):
super
(
TensorboardLoggerHook
,
self
).
before_run
(
runner
)
def
before_run
(
self
,
runner
)
->
None
:
super
().
before_run
(
runner
)
if
(
TORCH_VERSION
==
'parrots'
or
digit_version
(
TORCH_VERSION
)
<
digit_version
(
'1.1'
)):
try
:
...
...
@@ -44,7 +56,7 @@ class TensorboardLoggerHook(LoggerHook):
self
.
writer
=
SummaryWriter
(
self
.
log_dir
)
@
master_only
def
log
(
self
,
runner
):
def
log
(
self
,
runner
)
->
None
:
tags
=
self
.
get_loggable_tags
(
runner
,
allow_text
=
True
)
for
tag
,
val
in
tags
.
items
():
if
isinstance
(
val
,
str
):
...
...
@@ -53,5 +65,5 @@ class TensorboardLoggerHook(LoggerHook):
self
.
writer
.
add_scalar
(
tag
,
val
,
self
.
get_iter
(
runner
))
@
master_only
def
after_run
(
self
,
runner
):
def
after_run
(
self
,
runner
)
->
None
:
self
.
writer
.
close
()
mmcv/runner/hooks/logger/text.py
View file @
fdeee889
...
...
@@ -3,6 +3,7 @@ import datetime
import
os
import
os.path
as
osp
from
collections
import
OrderedDict
from
typing
import
Dict
,
Optional
,
Union
import
torch
import
torch.distributed
as
dist
...
...
@@ -53,17 +54,16 @@ class TextLoggerHook(LoggerHook):
"""
def
__init__
(
self
,
by_epoch
=
True
,
interval
=
10
,
ignore_last
=
True
,
reset_flag
=
False
,
interval_exp_name
=
1000
,
out_dir
=
None
,
out_suffix
=
(
'.log.json'
,
'.log'
,
'.py'
),
keep_local
=
True
,
file_client_args
=
None
):
super
(
TextLoggerHook
,
self
).
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
by_epoch
:
bool
=
True
,
interval
:
int
=
10
,
ignore_last
:
bool
=
True
,
reset_flag
:
bool
=
False
,
interval_exp_name
:
int
=
1000
,
out_dir
:
Optional
[
str
]
=
None
,
out_suffix
:
Union
[
str
,
tuple
]
=
(
'.log.json'
,
'.log'
,
'.py'
),
keep_local
:
bool
=
True
,
file_client_args
:
Optional
[
Dict
]
=
None
):
super
().
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
self
.
by_epoch
=
by_epoch
self
.
time_sec_tot
=
0
self
.
interval_exp_name
=
interval_exp_name
...
...
@@ -86,8 +86,8 @@ class TextLoggerHook(LoggerHook):
self
.
file_client
=
FileClient
.
infer_client
(
file_client_args
,
self
.
out_dir
)
def
before_run
(
self
,
runner
):
super
(
TextLoggerHook
,
self
).
before_run
(
runner
)
def
before_run
(
self
,
runner
)
->
None
:
super
().
before_run
(
runner
)
if
self
.
out_dir
is
not
None
:
self
.
file_client
=
FileClient
.
infer_client
(
self
.
file_client_args
,
...
...
@@ -97,8 +97,8 @@ class TextLoggerHook(LoggerHook):
basename
=
osp
.
basename
(
runner
.
work_dir
.
rstrip
(
osp
.
sep
))
self
.
out_dir
=
self
.
file_client
.
join_path
(
self
.
out_dir
,
basename
)
runner
.
logger
.
info
(
(
f
'Text logs will be saved to
{
self
.
out_dir
}
by '
f
'
{
self
.
file_client
.
name
}
after the training process.'
)
)
f
'Text logs will be saved to
{
self
.
out_dir
}
by '
f
'
{
self
.
file_client
.
name
}
after the training process.'
)
self
.
start_iter
=
runner
.
iter
self
.
json_log_path
=
osp
.
join
(
runner
.
work_dir
,
...
...
@@ -106,17 +106,17 @@ class TextLoggerHook(LoggerHook):
if
runner
.
meta
is
not
None
:
self
.
_dump_log
(
runner
.
meta
,
runner
)
def
_get_max_memory
(
self
,
runner
):
def
_get_max_memory
(
self
,
runner
)
->
int
:
device
=
getattr
(
runner
.
model
,
'output_device'
,
None
)
mem
=
torch
.
cuda
.
max_memory_allocated
(
device
=
device
)
mem_mb
=
torch
.
tensor
([
mem
/
(
1024
*
1024
)],
mem_mb
=
torch
.
tensor
([
int
(
mem
)
/
/
(
1024
*
1024
)],
dtype
=
torch
.
int
,
device
=
device
)
if
runner
.
world_size
>
1
:
dist
.
reduce
(
mem_mb
,
0
,
op
=
dist
.
ReduceOp
.
MAX
)
return
mem_mb
.
item
()
def
_log_info
(
self
,
log_dict
,
runner
):
def
_log_info
(
self
,
log_dict
:
Dict
,
runner
)
->
None
:
# print exp name for users to distinguish experiments
# at every ``interval_exp_name`` iterations and the end of each epoch
if
runner
.
meta
is
not
None
and
'exp_name'
in
runner
.
meta
:
...
...
@@ -130,9 +130,9 @@ class TextLoggerHook(LoggerHook):
lr_str
=
[]
for
k
,
val
in
log_dict
[
'lr'
].
items
():
lr_str
.
append
(
f
'lr_
{
k
}
:
{
val
:.
3
e
}
'
)
lr_str
=
' '
.
join
(
lr_str
)
lr_str
=
' '
.
join
(
lr_str
)
# type: ignore
else
:
lr_str
=
f
'lr:
{
log_dict
[
"lr"
]:.
3
e
}
'
lr_str
=
f
'lr:
{
log_dict
[
"lr"
]:.
3
e
}
'
# type: ignore
# by epoch: Epoch [4][100/1000]
# by iter: Iter [100/100000]
...
...
@@ -182,7 +182,7 @@ class TextLoggerHook(LoggerHook):
runner
.
logger
.
info
(
log_str
)
def
_dump_log
(
self
,
log_dict
,
runner
):
def
_dump_log
(
self
,
log_dict
:
Dict
,
runner
)
->
None
:
# dump log in json format
json_log
=
OrderedDict
()
for
k
,
v
in
log_dict
.
items
():
...
...
@@ -201,7 +201,7 @@ class TextLoggerHook(LoggerHook):
else
:
return
items
def
log
(
self
,
runner
):
def
log
(
self
,
runner
)
->
OrderedDict
:
if
'eval_iter_num'
in
runner
.
log_buffer
.
output
:
# this doesn't modify runner.iter and is regardless of by_epoch
cur_iter
=
runner
.
log_buffer
.
output
.
pop
(
'eval_iter_num'
)
...
...
@@ -229,28 +229,28 @@ class TextLoggerHook(LoggerHook):
if
torch
.
cuda
.
is_available
():
log_dict
[
'memory'
]
=
self
.
_get_max_memory
(
runner
)
log_dict
=
dict
(
log_dict
,
**
runner
.
log_buffer
.
output
)
log_dict
=
dict
(
log_dict
,
**
runner
.
log_buffer
.
output
)
# type: ignore
self
.
_log_info
(
log_dict
,
runner
)
self
.
_dump_log
(
log_dict
,
runner
)
return
log_dict
def
after_run
(
self
,
runner
):
def
after_run
(
self
,
runner
)
->
None
:
# copy or upload logs to self.out_dir
if
self
.
out_dir
is
not
None
:
for
filename
in
scandir
(
runner
.
work_dir
,
self
.
out_suffix
,
True
):
local_filepath
=
osp
.
join
(
runner
.
work_dir
,
filename
)
out_filepath
=
self
.
file_client
.
join_path
(
self
.
out_dir
,
filename
)
with
open
(
local_filepath
,
'r'
)
as
f
:
with
open
(
local_filepath
)
as
f
:
self
.
file_client
.
put_text
(
f
.
read
(),
out_filepath
)
runner
.
logger
.
info
(
(
f
'The file
{
local_filepath
}
has been uploaded to '
f
'
{
out_filepath
}
.'
)
)
f
'The file
{
local_filepath
}
has been uploaded to '
f
'
{
out_filepath
}
.'
)
if
not
self
.
keep_local
:
os
.
remove
(
local_filepath
)
runner
.
logger
.
info
(
(
f
'
{
local_filepath
}
was removed due to the '
'`self.keep_local=False`'
)
)
f
'
{
local_filepath
}
was removed due to the '
'`self.keep_local=False`'
)
mmcv/runner/hooks/logger/wandb.py
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
import
os.path
as
osp
from
typing
import
Dict
,
Optional
,
Union
from
mmcv.utils
import
scandir
from
...dist_utils
import
master_only
from
..hook
import
HOOKS
from
.base
import
LoggerHook
...
...
@@ -6,23 +10,63 @@ from .base import LoggerHook
@
HOOKS
.
register_module
()
class
WandbLoggerHook
(
LoggerHook
):
"""Class to log metrics with wandb.
It requires `wandb`_ to be installed.
Args:
init_kwargs (dict): A dict contains the initialization keys. Check
https://docs.wandb.ai/ref/python/init for more init arguments.
interval (int): Logging interval (every k iterations).
Default 10.
ignore_last (bool): Ignore the log of last iterations in each epoch
if less than `interval`.
Default: True.
reset_flag (bool): Whether to clear the output buffer after logging.
Default: False.
commit (bool): Save the metrics dict to the wandb server and increment
the step. If false ``wandb.log`` just updates the current metrics
dict with the row argument and metrics won't be saved until
``wandb.log`` is called with ``commit=True``.
Default: True.
by_epoch (bool): Whether EpochBasedRunner is used.
Default: True.
with_step (bool): If True, the step will be logged from
``self.get_iters``. Otherwise, step will not be logged.
Default: True.
log_artifact (bool): If True, artifacts in {work_dir} will be uploaded
to wandb after training ends.
Default: True
`New in version 1.4.3.`
out_suffix (str or tuple[str], optional): Those filenames ending with
``out_suffix`` will be uploaded to wandb.
Default: ('.log.json', '.log', '.py').
`New in version 1.4.3.`
.. _wandb:
https://docs.wandb.ai
"""
def
__init__
(
self
,
init_kwargs
=
None
,
interval
=
10
,
ignore_last
=
True
,
reset_flag
=
False
,
commit
=
True
,
by_epoch
=
True
,
with_step
=
True
):
super
(
WandbLoggerHook
,
self
).
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
init_kwargs
:
Optional
[
Dict
]
=
None
,
interval
:
int
=
10
,
ignore_last
:
bool
=
True
,
reset_flag
:
bool
=
False
,
commit
:
bool
=
True
,
by_epoch
:
bool
=
True
,
with_step
:
bool
=
True
,
log_artifact
:
bool
=
True
,
out_suffix
:
Union
[
str
,
tuple
]
=
(
'.log.json'
,
'.log'
,
'.py'
)):
super
().
__init__
(
interval
,
ignore_last
,
reset_flag
,
by_epoch
)
self
.
import_wandb
()
self
.
init_kwargs
=
init_kwargs
self
.
commit
=
commit
self
.
with_step
=
with_step
self
.
log_artifact
=
log_artifact
self
.
out_suffix
=
out_suffix
def
import_wandb
(
self
):
def
import_wandb
(
self
)
->
None
:
try
:
import
wandb
except
ImportError
:
...
...
@@ -31,17 +75,17 @@ class WandbLoggerHook(LoggerHook):
self
.
wandb
=
wandb
@
master_only
def
before_run
(
self
,
runner
):
super
(
WandbLoggerHook
,
self
).
before_run
(
runner
)
def
before_run
(
self
,
runner
)
->
None
:
super
().
before_run
(
runner
)
if
self
.
wandb
is
None
:
self
.
import_wandb
()
if
self
.
init_kwargs
:
self
.
wandb
.
init
(
**
self
.
init_kwargs
)
self
.
wandb
.
init
(
**
self
.
init_kwargs
)
# type: ignore
else
:
self
.
wandb
.
init
()
self
.
wandb
.
init
()
# type: ignore
@
master_only
def
log
(
self
,
runner
):
def
log
(
self
,
runner
)
->
None
:
tags
=
self
.
get_loggable_tags
(
runner
)
if
tags
:
if
self
.
with_step
:
...
...
@@ -52,5 +96,12 @@ class WandbLoggerHook(LoggerHook):
self
.
wandb
.
log
(
tags
,
commit
=
self
.
commit
)
@
master_only
def
after_run
(
self
,
runner
):
def
after_run
(
self
,
runner
)
->
None
:
if
self
.
log_artifact
:
wandb_artifact
=
self
.
wandb
.
Artifact
(
name
=
'artifacts'
,
type
=
'model'
)
for
filename
in
scandir
(
runner
.
work_dir
,
self
.
out_suffix
,
True
):
local_filepath
=
osp
.
join
(
runner
.
work_dir
,
filename
)
wandb_artifact
.
add_file
(
local_filepath
)
self
.
wandb
.
log_artifact
(
wandb_artifact
)
self
.
wandb
.
join
()
mmcv/runner/hooks/lr_updater.py
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
import
numbers
from
math
import
cos
,
pi
from
typing
import
Callable
,
List
,
Optional
,
Union
import
mmcv
from
mmcv
import
runner
from
.hook
import
HOOKS
,
Hook
...
...
@@ -23,17 +25,17 @@ class LrUpdaterHook(Hook):
"""
def
__init__
(
self
,
by_epoch
=
True
,
warmup
=
None
,
warmup_iters
=
0
,
warmup_ratio
=
0.1
,
warmup_by_epoch
=
False
)
:
by_epoch
:
bool
=
True
,
warmup
:
Optional
[
str
]
=
None
,
warmup_iters
:
int
=
0
,
warmup_ratio
:
float
=
0.1
,
warmup_by_epoch
:
bool
=
False
)
->
None
:
# validate the "warmup" argument
if
warmup
is
not
None
:
if
warmup
not
in
[
'constant'
,
'linear'
,
'exp'
]:
raise
ValueError
(
f
'"
{
warmup
}
" is not a supported type for warming up, valid'
' types are "constant"
and
"linear"'
)
' types are "constant"
,
"linear"
and "exp"
'
)
if
warmup
is
not
None
:
assert
warmup_iters
>
0
,
\
'"warmup_iters" must be a positive integer'
...
...
@@ -42,18 +44,18 @@ class LrUpdaterHook(Hook):
self
.
by_epoch
=
by_epoch
self
.
warmup
=
warmup
self
.
warmup_iters
=
warmup_iters
self
.
warmup_iters
:
Optional
[
int
]
=
warmup_iters
self
.
warmup_ratio
=
warmup_ratio
self
.
warmup_by_epoch
=
warmup_by_epoch
if
self
.
warmup_by_epoch
:
self
.
warmup_epochs
=
self
.
warmup_iters
self
.
warmup_epochs
:
Optional
[
int
]
=
self
.
warmup_iters
self
.
warmup_iters
=
None
else
:
self
.
warmup_epochs
=
None
self
.
base_lr
=
[]
# initial lr for all param groups
self
.
regular_lr
=
[]
# expected lr if no warming up is performed
self
.
base_lr
:
Union
[
list
,
dict
]
=
[]
# initial lr for all param groups
self
.
regular_lr
:
list
=
[]
# expected lr if no warming up is performed
def
_set_lr
(
self
,
runner
,
lr_groups
):
if
isinstance
(
runner
.
optimizer
,
dict
):
...
...
@@ -65,10 +67,10 @@ class LrUpdaterHook(Hook):
lr_groups
):
param_group
[
'lr'
]
=
lr
def
get_lr
(
self
,
runner
,
base_lr
):
def
get_lr
(
self
,
runner
:
'runner.BaseRunner'
,
base_lr
:
float
):
raise
NotImplementedError
def
get_regular_lr
(
self
,
runner
):
def
get_regular_lr
(
self
,
runner
:
'runner.BaseRunner'
):
if
isinstance
(
runner
.
optimizer
,
dict
):
lr_groups
=
{}
for
k
in
runner
.
optimizer
.
keys
():
...
...
@@ -82,7 +84,7 @@ class LrUpdaterHook(Hook):
else
:
return
[
self
.
get_lr
(
runner
,
_base_lr
)
for
_base_lr
in
self
.
base_lr
]
def
get_warmup_lr
(
self
,
cur_iters
):
def
get_warmup_lr
(
self
,
cur_iters
:
int
):
def
_get_warmup_lr
(
cur_iters
,
regular_lr
):
if
self
.
warmup
==
'constant'
:
...
...
@@ -104,7 +106,7 @@ class LrUpdaterHook(Hook):
else
:
return
_get_warmup_lr
(
cur_iters
,
self
.
regular_lr
)
def
before_run
(
self
,
runner
):
def
before_run
(
self
,
runner
:
'runner.BaseRunner'
):
# NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved,
# it will be set according to the optimizer params
if
isinstance
(
runner
.
optimizer
,
dict
):
...
...
@@ -117,16 +119,17 @@ class LrUpdaterHook(Hook):
]
self
.
base_lr
.
update
({
k
:
_base_lr
})
else
:
for
group
in
runner
.
optimizer
.
param_groups
:
for
group
in
runner
.
optimizer
.
param_groups
:
# type: ignore
group
.
setdefault
(
'initial_lr'
,
group
[
'lr'
])
self
.
base_lr
=
[
group
[
'initial_lr'
]
for
group
in
runner
.
optimizer
.
param_groups
group
[
'initial_lr'
]
for
group
in
runner
.
optimizer
.
param_groups
# type: ignore
]
def
before_train_epoch
(
self
,
runner
):
def
before_train_epoch
(
self
,
runner
:
'runner.BaseRunner'
):
if
self
.
warmup_iters
is
None
:
epoch_len
=
len
(
runner
.
data_loader
)
self
.
warmup_iters
=
self
.
warmup_epochs
*
epoch_len
epoch_len
=
len
(
runner
.
data_loader
)
# type: ignore
self
.
warmup_iters
=
self
.
warmup_epochs
*
epoch_len
# type: ignore
if
not
self
.
by_epoch
:
return
...
...
@@ -134,8 +137,9 @@ class LrUpdaterHook(Hook):
self
.
regular_lr
=
self
.
get_regular_lr
(
runner
)
self
.
_set_lr
(
runner
,
self
.
regular_lr
)
def
before_train_iter
(
self
,
runner
):
def
before_train_iter
(
self
,
runner
:
'runner.BaseRunner'
):
cur_iter
=
runner
.
iter
assert
isinstance
(
self
.
warmup_iters
,
int
)
if
not
self
.
by_epoch
:
self
.
regular_lr
=
self
.
get_regular_lr
(
runner
)
if
self
.
warmup
is
None
or
cur_iter
>=
self
.
warmup_iters
:
...
...
@@ -157,7 +161,7 @@ class LrUpdaterHook(Hook):
class
FixedLrUpdaterHook
(
LrUpdaterHook
):
def
__init__
(
self
,
**
kwargs
):
super
(
FixedLrUpdaterHook
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
def
get_lr
(
self
,
runner
,
base_lr
):
return
base_lr
...
...
@@ -171,13 +175,17 @@ class StepLrUpdaterHook(LrUpdaterHook):
step (int | list[int]): Step to decay the LR. If an int value is given,
regard it as the decay interval. If a list is given, decay LR at
these steps.
gamma (float
, optional
): Decay LR ratio. Default
:
0.1.
gamma (float): Decay LR ratio. Default
s to
0.1.
min_lr (float, optional): Minimum LR value to keep. If LR after decay
is lower than `min_lr`, it will be clipped to this value. If None
is given, we don't perform lr clipping. Default: None.
"""
def
__init__
(
self
,
step
,
gamma
=
0.1
,
min_lr
=
None
,
**
kwargs
):
def
__init__
(
self
,
step
:
Union
[
int
,
List
[
int
]],
gamma
:
float
=
0.1
,
min_lr
:
Optional
[
float
]
=
None
,
**
kwargs
)
->
None
:
if
isinstance
(
step
,
list
):
assert
mmcv
.
is_list_of
(
step
,
int
)
assert
all
([
s
>
0
for
s
in
step
])
...
...
@@ -188,9 +196,9 @@ class StepLrUpdaterHook(LrUpdaterHook):
self
.
step
=
step
self
.
gamma
=
gamma
self
.
min_lr
=
min_lr
super
(
StepLrUpdaterHook
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
def
get_lr
(
self
,
runner
,
base_lr
):
def
get_lr
(
self
,
runner
:
'runner.BaseRunner'
,
base_lr
:
float
):
progress
=
runner
.
epoch
if
self
.
by_epoch
else
runner
.
iter
# calculate exponential term
...
...
@@ -213,11 +221,11 @@ class StepLrUpdaterHook(LrUpdaterHook):
@
HOOKS
.
register_module
()
class
ExpLrUpdaterHook
(
LrUpdaterHook
):
def
__init__
(
self
,
gamma
,
**
kwargs
):
def
__init__
(
self
,
gamma
:
float
,
**
kwargs
)
->
None
:
self
.
gamma
=
gamma
super
(
ExpLrUpdaterHook
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
def
get_lr
(
self
,
runner
,
base_lr
):
def
get_lr
(
self
,
runner
:
'runner.BaseRunner'
,
base_lr
:
float
):
progress
=
runner
.
epoch
if
self
.
by_epoch
else
runner
.
iter
return
base_lr
*
self
.
gamma
**
progress
...
...
@@ -225,12 +233,15 @@ class ExpLrUpdaterHook(LrUpdaterHook):
@
HOOKS
.
register_module
()
class
PolyLrUpdaterHook
(
LrUpdaterHook
):
def
__init__
(
self
,
power
=
1.
,
min_lr
=
0.
,
**
kwargs
):
def
__init__
(
self
,
power
:
float
=
1.
,
min_lr
:
float
=
0.
,
**
kwargs
)
->
None
:
self
.
power
=
power
self
.
min_lr
=
min_lr
super
(
PolyLrUpdaterHook
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
def
get_lr
(
self
,
runner
,
base_lr
):
def
get_lr
(
self
,
runner
:
'runner.BaseRunner'
,
base_lr
:
float
):
if
self
.
by_epoch
:
progress
=
runner
.
epoch
max_progress
=
runner
.
max_epochs
...
...
@@ -244,26 +255,37 @@ class PolyLrUpdaterHook(LrUpdaterHook):
@
HOOKS
.
register_module
()
class
InvLrUpdaterHook
(
LrUpdaterHook
):
def
__init__
(
self
,
gamma
,
power
=
1.
,
**
kwargs
):
def
__init__
(
self
,
gamma
:
float
,
power
:
float
=
1.
,
**
kwargs
)
->
None
:
self
.
gamma
=
gamma
self
.
power
=
power
super
(
InvLrUpdaterHook
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
def
get_lr
(
self
,
runner
,
base_lr
):
def
get_lr
(
self
,
runner
:
'runner.BaseRunner'
,
base_lr
:
float
):
progress
=
runner
.
epoch
if
self
.
by_epoch
else
runner
.
iter
return
base_lr
*
(
1
+
self
.
gamma
*
progress
)
**
(
-
self
.
power
)
@
HOOKS
.
register_module
()
class
CosineAnnealingLrUpdaterHook
(
LrUpdaterHook
):
"""CosineAnnealing LR scheduler.
Args:
min_lr (float, optional): The minimum lr. Default: None.
min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.
Either `min_lr` or `min_lr_ratio` should be specified.
Default: None.
"""
def
__init__
(
self
,
min_lr
=
None
,
min_lr_ratio
=
None
,
**
kwargs
):
def
__init__
(
self
,
min_lr
:
Optional
[
float
]
=
None
,
min_lr_ratio
:
Optional
[
float
]
=
None
,
**
kwargs
)
->
None
:
assert
(
min_lr
is
None
)
^
(
min_lr_ratio
is
None
)
self
.
min_lr
=
min_lr
self
.
min_lr_ratio
=
min_lr_ratio
super
(
CosineAnnealingLrUpdaterHook
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
def
get_lr
(
self
,
runner
,
base_lr
):
def
get_lr
(
self
,
runner
:
'runner.BaseRunner'
,
base_lr
:
float
):
if
self
.
by_epoch
:
progress
=
runner
.
epoch
max_progress
=
runner
.
max_epochs
...
...
@@ -274,7 +296,7 @@ class CosineAnnealingLrUpdaterHook(LrUpdaterHook):
if
self
.
min_lr_ratio
is
not
None
:
target_lr
=
base_lr
*
self
.
min_lr_ratio
else
:
target_lr
=
self
.
min_lr
target_lr
=
self
.
min_lr
# type:ignore
return
annealing_cos
(
base_lr
,
target_lr
,
progress
/
max_progress
)
...
...
@@ -296,10 +318,10 @@ class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook):
"""
def
__init__
(
self
,
start_percent
=
0.75
,
min_lr
=
None
,
min_lr_ratio
=
None
,
**
kwargs
):
start_percent
:
float
=
0.75
,
min_lr
:
Optional
[
float
]
=
None
,
min_lr_ratio
:
Optional
[
float
]
=
None
,
**
kwargs
)
->
None
:
assert
(
min_lr
is
None
)
^
(
min_lr_ratio
is
None
)
if
start_percent
<
0
or
start_percent
>
1
or
not
isinstance
(
start_percent
,
float
):
...
...
@@ -309,9 +331,9 @@ class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook):
self
.
start_percent
=
start_percent
self
.
min_lr
=
min_lr
self
.
min_lr_ratio
=
min_lr_ratio
super
(
FlatCosineAnnealingLrUpdaterHook
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
def
get_lr
(
self
,
runner
,
base_lr
):
def
get_lr
(
self
,
runner
:
'runner.BaseRunner'
,
base_lr
:
float
):
if
self
.
by_epoch
:
start
=
round
(
runner
.
max_epochs
*
self
.
start_percent
)
progress
=
runner
.
epoch
-
start
...
...
@@ -324,7 +346,7 @@ class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook):
if
self
.
min_lr_ratio
is
not
None
:
target_lr
=
base_lr
*
self
.
min_lr_ratio
else
:
target_lr
=
self
.
min_lr
target_lr
=
self
.
min_lr
# type:ignore
if
progress
<
0
:
return
base_lr
...
...
@@ -338,8 +360,8 @@ class CosineRestartLrUpdaterHook(LrUpdaterHook):
Args:
periods (list[int]): Periods for each cosine anneling cycle.
restart_weights (list[float]
, optional
): Restart weights at each
restart iteration. Default
:
[1].
restart_weights (list[float]): Restart weights at each
restart iteration. Default
s to
[1].
min_lr (float, optional): The minimum lr. Default: None.
min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.
Either `min_lr` or `min_lr_ratio` should be specified.
...
...
@@ -347,11 +369,11 @@ class CosineRestartLrUpdaterHook(LrUpdaterHook):
"""
def
__init__
(
self
,
periods
,
restart_weights
=
[
1
],
min_lr
=
None
,
min_lr_ratio
=
None
,
**
kwargs
):
periods
:
List
[
int
]
,
restart_weights
:
List
[
float
]
=
[
1
],
min_lr
:
Optional
[
float
]
=
None
,
min_lr_ratio
:
Optional
[
float
]
=
None
,
**
kwargs
)
->
None
:
assert
(
min_lr
is
None
)
^
(
min_lr_ratio
is
None
)
self
.
periods
=
periods
self
.
min_lr
=
min_lr
...
...
@@ -359,13 +381,13 @@ class CosineRestartLrUpdaterHook(LrUpdaterHook):
self
.
restart_weights
=
restart_weights
assert
(
len
(
self
.
periods
)
==
len
(
self
.
restart_weights
)
),
'periods and restart_weights should have the same length.'
super
(
CosineRestartLrUpdaterHook
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
self
.
cumulative_periods
=
[
sum
(
self
.
periods
[
0
:
i
+
1
])
for
i
in
range
(
0
,
len
(
self
.
periods
))
]
def
get_lr
(
self
,
runner
,
base_lr
):
def
get_lr
(
self
,
runner
:
'runner.BaseRunner'
,
base_lr
:
float
):
if
self
.
by_epoch
:
progress
=
runner
.
epoch
else
:
...
...
@@ -374,7 +396,7 @@ class CosineRestartLrUpdaterHook(LrUpdaterHook):
if
self
.
min_lr_ratio
is
not
None
:
target_lr
=
base_lr
*
self
.
min_lr_ratio
else
:
target_lr
=
self
.
min_lr
target_lr
=
self
.
min_lr
# type:ignore
idx
=
get_position_from_periods
(
progress
,
self
.
cumulative_periods
)
current_weight
=
self
.
restart_weights
[
idx
]
...
...
@@ -385,7 +407,7 @@ class CosineRestartLrUpdaterHook(LrUpdaterHook):
return
annealing_cos
(
base_lr
,
target_lr
,
alpha
,
current_weight
)
def
get_position_from_periods
(
iteration
,
cumulative_periods
):
def
get_position_from_periods
(
iteration
:
int
,
cumulative_periods
:
List
[
int
]
):
"""Get the position from a period list.
It will return the index of the right-closest number in the period list.
...
...
@@ -420,24 +442,29 @@ class CyclicLrUpdaterHook(LrUpdaterHook):
3D detection area.
Args:
by_epoch (bool): Whether to update LR by epoch.
target_ratio (tuple[float]): Relative ratio of the highest LR
and the
lowest LR to the initial LR.
cyclic_times (int): Number of cycles during training
step_ratio_up (float): The ratio of the increasing process of
LR in
the total cycle.
anneal_strategy (str): {'cos', 'linear'}
by_epoch (bool
, optional
): Whether to update LR by epoch.
target_ratio (tuple[float]
, optional
): Relative ratio of the highest LR
and the
lowest LR to the initial LR.
cyclic_times (int
, optional
): Number of cycles during training
step_ratio_up (float
, optional
): The ratio of the increasing process of
LR in
the total cycle.
anneal_strategy (str
, optional
): {'cos', 'linear'}
Specifies the annealing strategy: 'cos' for cosine annealing,
'linear' for linear annealing. Default: 'cos'.
gamma (float, optional): Cycle decay ratio. Default: 1.
It takes values in the range (0, 1]. The difference between the
maximum learning rate and the minimum learning rate decreases
periodically when it is less than 1. `New in version 1.4.4.`
"""
def
__init__
(
self
,
by_epoch
=
False
,
target_ratio
=
(
10
,
1e-4
),
cyclic_times
=
1
,
step_ratio_up
=
0.4
,
anneal_strategy
=
'cos'
,
**
kwargs
):
by_epoch
:
bool
=
False
,
target_ratio
:
Union
[
float
,
tuple
]
=
(
10
,
1e-4
),
cyclic_times
:
int
=
1
,
step_ratio_up
:
float
=
0.4
,
anneal_strategy
:
str
=
'cos'
,
gamma
:
float
=
1
,
**
kwargs
)
->
None
:
if
isinstance
(
target_ratio
,
float
):
target_ratio
=
(
target_ratio
,
target_ratio
/
1e5
)
elif
isinstance
(
target_ratio
,
tuple
):
...
...
@@ -451,43 +478,60 @@ class CyclicLrUpdaterHook(LrUpdaterHook):
'"target_ratio" must be list or tuple of two floats'
assert
0
<=
step_ratio_up
<
1.0
,
\
'"step_ratio_up" must be in range [0,1)'
assert
0
<
gamma
<=
1
,
\
'"gamma" must be in range (0, 1]'
self
.
target_ratio
=
target_ratio
self
.
cyclic_times
=
cyclic_times
self
.
step_ratio_up
=
step_ratio_up
self
.
lr_phases
=
[]
# init lr_phases
self
.
gamma
=
gamma
self
.
max_iter_per_phase
=
None
self
.
lr_phases
:
list
=
[]
# init lr_phases
# validate anneal_strategy
if
anneal_strategy
not
in
[
'cos'
,
'linear'
]:
raise
ValueError
(
'anneal_strategy must be one of "cos" or '
f
'"linear", instead got
{
anneal_strategy
}
'
)
elif
anneal_strategy
==
'cos'
:
self
.
anneal_func
=
annealing_cos
self
.
anneal_func
:
Callable
[[
float
,
float
,
float
],
float
]
=
annealing_cos
elif
anneal_strategy
==
'linear'
:
self
.
anneal_func
=
annealing_linear
assert
not
by_epoch
,
\
'currently only support "by_epoch" = False'
super
(
CyclicLrUpdaterHook
,
self
).
__init__
(
by_epoch
,
**
kwargs
)
super
().
__init__
(
by_epoch
,
**
kwargs
)
def
before_run
(
self
,
runner
):
super
(
CyclicLrUpdaterHook
,
self
).
before_run
(
runner
)
def
before_run
(
self
,
runner
:
'runner.BaseRunner'
):
super
().
before_run
(
runner
)
# initiate lr_phases
# total lr_phases are separated as up and down
max_iter_per_phase
=
runner
.
max_iters
//
self
.
cyclic_times
iter_up_phase
=
int
(
self
.
step_ratio_up
*
max_iter_per_phase
)
self
.
lr_phases
.
append
(
[
0
,
iter_up_phase
,
max_
iter_p
er
_phase
,
1
,
self
.
target_ratio
[
0
]])
self
.
max_iter_per_phase
=
runner
.
max_iters
//
self
.
cyclic_times
iter_up_phase
=
int
(
self
.
step_ratio_up
*
self
.
max_iter_per_phase
)
# type: ignore
self
.
lr_phases
.
append
([
0
,
iter_
u
p_phase
,
1
,
self
.
target_ratio
[
0
]])
self
.
lr_phases
.
append
([
iter_up_phase
,
max_iter_per_phase
,
max_iter_per_phase
,
self
.
target_ratio
[
0
],
self
.
target_ratio
[
1
]
iter_up_phase
,
self
.
max_iter_per_phase
,
self
.
target_ratio
[
0
]
,
self
.
target_ratio
[
1
]
])
def
get_lr
(
self
,
runner
,
base_lr
):
curr_iter
=
runner
.
iter
for
(
start_iter
,
end_iter
,
max_iter_per_phase
,
start_ratio
,
end_ratio
)
in
self
.
lr_phases
:
curr_iter
%=
max_iter_per_phase
def
get_lr
(
self
,
runner
:
'runner.BaseRunner'
,
base_lr
:
float
):
curr_iter
=
runner
.
iter
%
self
.
max_iter_per_phase
# type: ignore
curr_cycle
=
runner
.
iter
//
self
.
max_iter_per_phase
# type: ignore
# Update weight decay
scale
=
self
.
gamma
**
curr_cycle
for
(
start_iter
,
end_iter
,
start_ratio
,
end_ratio
)
in
self
.
lr_phases
:
if
start_iter
<=
curr_iter
<
end_iter
:
# Apply cycle scaling to gradually reduce the difference
# between max_lr and base lr. The target end_ratio can be
# expressed as:
# end_ratio = (base_lr + scale * (max_lr - base_lr)) / base_lr
# iteration: 0-iter_up_phase:
if
start_iter
==
0
:
end_ratio
=
1
-
scale
+
end_ratio
*
scale
# iteration: iter_up_phase-self.max_iter_per_phase
else
:
start_ratio
=
1
-
scale
+
start_ratio
*
scale
progress
=
curr_iter
-
start_iter
return
self
.
anneal_func
(
base_lr
*
start_ratio
,
base_lr
*
end_ratio
,
...
...
@@ -530,14 +574,14 @@ class OneCycleLrUpdaterHook(LrUpdaterHook):
"""
def
__init__
(
self
,
max_lr
,
total_steps
=
None
,
pct_start
=
0.3
,
anneal_strategy
=
'cos'
,
div_factor
=
25
,
final_div_factor
=
1e4
,
three_phase
=
False
,
**
kwargs
):
max_lr
:
Union
[
float
,
List
]
,
total_steps
:
Optional
[
int
]
=
None
,
pct_start
:
float
=
0.3
,
anneal_strategy
:
str
=
'cos'
,
div_factor
:
float
=
25
,
final_div_factor
:
float
=
1e4
,
three_phase
:
bool
=
False
,
**
kwargs
)
->
None
:
# validate by_epoch, currently only support by_epoch = False
if
'by_epoch'
not
in
kwargs
:
kwargs
[
'by_epoch'
]
=
False
...
...
@@ -563,16 +607,17 @@ class OneCycleLrUpdaterHook(LrUpdaterHook):
raise
ValueError
(
'anneal_strategy must be one of "cos" or '
f
'"linear", instead got
{
anneal_strategy
}
'
)
elif
anneal_strategy
==
'cos'
:
self
.
anneal_func
=
annealing_cos
self
.
anneal_func
:
Callable
[[
float
,
float
,
float
],
float
]
=
annealing_cos
elif
anneal_strategy
==
'linear'
:
self
.
anneal_func
=
annealing_linear
self
.
div_factor
=
div_factor
self
.
final_div_factor
=
final_div_factor
self
.
three_phase
=
three_phase
self
.
lr_phases
=
[]
# init lr_phases
super
(
OneCycleLrUpdaterHook
,
self
).
__init__
(
**
kwargs
)
self
.
lr_phases
:
list
=
[]
# init lr_phases
super
().
__init__
(
**
kwargs
)
def
before_run
(
self
,
runner
):
def
before_run
(
self
,
runner
:
'runner.BaseRunner'
):
if
hasattr
(
self
,
'total_steps'
):
total_steps
=
self
.
total_steps
else
:
...
...
@@ -594,7 +639,8 @@ class OneCycleLrUpdaterHook(LrUpdaterHook):
k
=
type
(
runner
.
optimizer
).
__name__
_max_lr
=
format_param
(
k
,
runner
.
optimizer
,
self
.
_max_lr
)
self
.
base_lr
=
[
lr
/
self
.
div_factor
for
lr
in
_max_lr
]
for
group
,
lr
in
zip
(
runner
.
optimizer
.
param_groups
,
self
.
base_lr
):
optim_param_groups
=
runner
.
optimizer
.
param_groups
# type: ignore
for
group
,
lr
in
zip
(
optim_param_groups
,
self
.
base_lr
):
group
.
setdefault
(
'initial_lr'
,
lr
)
if
self
.
three_phase
:
...
...
@@ -611,7 +657,7 @@ class OneCycleLrUpdaterHook(LrUpdaterHook):
self
.
lr_phases
.
append
(
[
total_steps
-
1
,
self
.
div_factor
,
1
/
self
.
final_div_factor
])
def
get_lr
(
self
,
runner
,
base_lr
):
def
get_lr
(
self
,
runner
:
'runner.BaseRunner'
,
base_lr
:
float
):
curr_iter
=
runner
.
iter
start_iter
=
0
for
i
,
(
end_iter
,
start_lr
,
end_lr
)
in
enumerate
(
self
.
lr_phases
):
...
...
@@ -624,7 +670,45 @@ class OneCycleLrUpdaterHook(LrUpdaterHook):
return
lr
def
annealing_cos
(
start
,
end
,
factor
,
weight
=
1
):
@
HOOKS
.
register_module
()
class
LinearAnnealingLrUpdaterHook
(
LrUpdaterHook
):
"""Linear annealing LR Scheduler decays the learning rate of each parameter
group linearly.
Args:
min_lr (float, optional): The minimum lr. Default: None.
min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.
Either `min_lr` or `min_lr_ratio` should be specified.
Default: None.
"""
def
__init__
(
self
,
min_lr
:
Optional
[
float
]
=
None
,
min_lr_ratio
:
Optional
[
float
]
=
None
,
**
kwargs
):
assert
(
min_lr
is
None
)
^
(
min_lr_ratio
is
None
)
self
.
min_lr
=
min_lr
self
.
min_lr_ratio
=
min_lr_ratio
super
().
__init__
(
**
kwargs
)
def
get_lr
(
self
,
runner
:
'runner.BaseRunner'
,
base_lr
:
float
):
if
self
.
by_epoch
:
progress
=
runner
.
epoch
max_progress
=
runner
.
max_epochs
else
:
progress
=
runner
.
iter
max_progress
=
runner
.
max_iters
if
self
.
min_lr_ratio
is
not
None
:
target_lr
=
base_lr
*
self
.
min_lr_ratio
else
:
target_lr
=
self
.
min_lr
# type:ignore
return
annealing_linear
(
base_lr
,
target_lr
,
progress
/
max_progress
)
def
annealing_cos
(
start
:
float
,
end
:
float
,
factor
:
float
,
weight
:
float
=
1.
)
->
float
:
"""Calculate annealing cos learning rate.
Cosine anneal from `weight * start + (1 - weight) * end` to `end` as
...
...
@@ -642,7 +726,7 @@ def annealing_cos(start, end, factor, weight=1):
return
end
+
0.5
*
weight
*
(
start
-
end
)
*
cos_out
def
annealing_linear
(
start
,
end
,
factor
)
:
def
annealing_linear
(
start
:
float
,
end
:
float
,
factor
:
float
)
->
float
:
"""Calculate annealing linear learning rate.
Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0.
...
...
mmcv/runner/hooks/memory.py
View file @
fdeee889
...
...
@@ -7,7 +7,10 @@ from .hook import HOOKS, Hook
@
HOOKS
.
register_module
()
class
EmptyCacheHook
(
Hook
):
def
__init__
(
self
,
before_epoch
=
False
,
after_epoch
=
True
,
after_iter
=
False
):
def
__init__
(
self
,
before_epoch
:
bool
=
False
,
after_epoch
:
bool
=
True
,
after_iter
:
bool
=
False
):
self
.
_before_epoch
=
before_epoch
self
.
_after_epoch
=
after_epoch
self
.
_after_iter
=
after_iter
...
...
mmcv/runner/hooks/momentum_updater.py
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
from
typing
import
Callable
,
Dict
,
List
,
Optional
,
Tuple
,
Union
import
mmcv
from
.hook
import
HOOKS
,
Hook
from
.lr_updater
import
annealing_cos
,
annealing_linear
,
format_param
...
...
@@ -7,10 +9,10 @@ from .lr_updater import annealing_cos, annealing_linear, format_param
class
MomentumUpdaterHook
(
Hook
):
def
__init__
(
self
,
by_epoch
=
True
,
warmup
=
None
,
warmup_iters
=
0
,
warmup_ratio
=
0.9
):
by_epoch
:
bool
=
True
,
warmup
:
Optional
[
str
]
=
None
,
warmup_iters
:
int
=
0
,
warmup_ratio
:
float
=
0.9
):
# validate the "warmup" argument
if
warmup
is
not
None
:
if
warmup
not
in
[
'constant'
,
'linear'
,
'exp'
]:
...
...
@@ -28,9 +30,10 @@ class MomentumUpdaterHook(Hook):
self
.
warmup_iters
=
warmup_iters
self
.
warmup_ratio
=
warmup_ratio
self
.
base_momentum
=
[]
# initial momentum for all param groups
self
.
regular_momentum
=
[
]
# expected momentum if no warming up is performed
# initial momentum for all param groups
self
.
base_momentum
:
Union
[
list
,
dict
]
=
[]
# expected momentum if no warming up is performed
self
.
regular_momentum
:
Union
[
list
,
dict
]
=
[]
def
_set_momentum
(
self
,
runner
,
momentum_groups
):
if
isinstance
(
runner
.
optimizer
,
dict
):
...
...
@@ -49,44 +52,52 @@ class MomentumUpdaterHook(Hook):
elif
'betas'
in
param_group
.
keys
():
param_group
[
'betas'
]
=
(
mom
,
param_group
[
'betas'
][
1
])
def
get_momentum
(
self
,
runner
,
base_momentum
):
def
get_momentum
(
self
,
runner
,
base_momentum
)
->
float
:
raise
NotImplementedError
def
get_regular_momentum
(
self
,
runner
):
def
get_regular_momentum
(
self
,
runner
)
->
Union
[
list
,
Dict
[
str
,
list
]]
:
if
isinstance
(
runner
.
optimizer
,
dict
):
momentum_groups
=
{}
assert
isinstance
(
self
.
base_momentum
,
dict
)
momentum_groups
:
Dict
[
str
,
List
[
float
]]
=
{}
for
k
in
runner
.
optimizer
.
keys
():
_momentum_group
=
[
_momentum_group
:
List
[
float
]
=
[
self
.
get_momentum
(
runner
,
_base_momentum
)
for
_base_momentum
in
self
.
base_momentum
[
k
]
]
momentum_groups
.
update
({
k
:
_momentum_group
})
return
momentum_groups
else
:
assert
isinstance
(
self
.
base_momentum
,
list
)
return
[
self
.
get_momentum
(
runner
,
_base_momentum
)
for
_base_momentum
in
self
.
base_momentum
]
def
get_warmup_momentum
(
self
,
cur_iters
):
def
get_warmup_momentum
(
self
,
cur_iters
:
int
)
->
Union
[
List
[
float
],
Dict
[
str
,
List
[
float
]]]:
def
_get_warmup_momentum
(
cur_iters
,
regular_momentum
):
if
self
.
warmup
==
'constant'
:
warmup_momentum
=
[
_momentum
/
self
.
warmup_ratio
for
_momentum
in
self
.
regular_momentum
for
_momentum
in
regular_momentum
]
elif
self
.
warmup
==
'linear'
:
k
=
(
1
-
cur_iters
/
self
.
warmup_iters
)
*
(
1
-
self
.
warmup_ratio
)
warmup_momentum
=
[
_momentum
/
(
1
-
k
)
for
_momentum
in
self
.
regular_mom
_momentum
/
(
1
-
k
)
for
_momentum
in
regular_mom
entum
]
elif
self
.
warmup
==
'exp'
:
k
=
self
.
warmup_ratio
**
(
1
-
cur_iters
/
self
.
warmup_iters
)
warmup_momentum
=
[
_momentum
/
k
for
_momentum
in
self
.
regular_mom
_momentum
/
k
for
_momentum
in
regular_mom
entum
]
else
:
raise
ValueError
(
'Expected values of `self.warmup` to be "constant", '
f
'"linear", or "exp", got
{
self
.
warmup
}
'
)
return
warmup_momentum
if
isinstance
(
self
.
regular_momentum
,
dict
):
...
...
@@ -128,15 +139,15 @@ class MomentumUpdaterHook(Hook):
def
before_train_epoch
(
self
,
runner
):
if
not
self
.
by_epoch
:
return
self
.
regular_mom
=
self
.
get_regular_momentum
(
runner
)
self
.
_set_momentum
(
runner
,
self
.
regular_mom
)
self
.
regular_mom
entum
=
self
.
get_regular_momentum
(
runner
)
self
.
_set_momentum
(
runner
,
self
.
regular_mom
entum
)
def
before_train_iter
(
self
,
runner
):
cur_iter
=
runner
.
iter
if
not
self
.
by_epoch
:
self
.
regular_mom
=
self
.
get_regular_momentum
(
runner
)
self
.
regular_mom
entum
=
self
.
get_regular_momentum
(
runner
)
if
self
.
warmup
is
None
or
cur_iter
>=
self
.
warmup_iters
:
self
.
_set_momentum
(
runner
,
self
.
regular_mom
)
self
.
_set_momentum
(
runner
,
self
.
regular_mom
entum
)
else
:
warmup_momentum
=
self
.
get_warmup_momentum
(
cur_iter
)
self
.
_set_momentum
(
runner
,
warmup_momentum
)
...
...
@@ -144,7 +155,7 @@ class MomentumUpdaterHook(Hook):
if
self
.
warmup
is
None
or
cur_iter
>
self
.
warmup_iters
:
return
elif
cur_iter
==
self
.
warmup_iters
:
self
.
_set_momentum
(
runner
,
self
.
regular_mom
)
self
.
_set_momentum
(
runner
,
self
.
regular_mom
entum
)
else
:
warmup_momentum
=
self
.
get_warmup_momentum
(
cur_iter
)
self
.
_set_momentum
(
runner
,
warmup_momentum
)
...
...
@@ -165,7 +176,11 @@ class StepMomentumUpdaterHook(MomentumUpdaterHook):
Default: None.
"""
def
__init__
(
self
,
step
,
gamma
=
0.5
,
min_momentum
=
None
,
**
kwargs
):
def
__init__
(
self
,
step
:
Union
[
int
,
List
[
int
]],
gamma
:
float
=
0.5
,
min_momentum
:
Optional
[
float
]
=
None
,
**
kwargs
):
if
isinstance
(
step
,
list
):
assert
mmcv
.
is_list_of
(
step
,
int
)
assert
all
([
s
>
0
for
s
in
step
])
...
...
@@ -176,9 +191,9 @@ class StepMomentumUpdaterHook(MomentumUpdaterHook):
self
.
step
=
step
self
.
gamma
=
gamma
self
.
min_momentum
=
min_momentum
super
(
StepMomentumUpdaterHook
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
def
get_momentum
(
self
,
runner
,
base_momentum
)
:
def
get_momentum
(
self
,
runner
,
base_momentum
:
float
)
->
float
:
progress
=
runner
.
epoch
if
self
.
by_epoch
else
runner
.
iter
# calculate exponential term
...
...
@@ -200,14 +215,26 @@ class StepMomentumUpdaterHook(MomentumUpdaterHook):
@
HOOKS
.
register_module
()
class
CosineAnnealingMomentumUpdaterHook
(
MomentumUpdaterHook
):
"""Cosine annealing LR Momentum decays the Momentum of each parameter group
linearly.
Args:
min_momentum (float, optional): The minimum momentum. Default: None.
min_momentum_ratio (float, optional): The ratio of minimum momentum to
the base momentum. Either `min_momentum` or `min_momentum_ratio`
should be specified. Default: None.
"""
def
__init__
(
self
,
min_momentum
=
None
,
min_momentum_ratio
=
None
,
**
kwargs
):
def
__init__
(
self
,
min_momentum
:
Optional
[
float
]
=
None
,
min_momentum_ratio
:
Optional
[
float
]
=
None
,
**
kwargs
):
assert
(
min_momentum
is
None
)
^
(
min_momentum_ratio
is
None
)
self
.
min_momentum
=
min_momentum
self
.
min_momentum_ratio
=
min_momentum_ratio
super
(
CosineAnnealingMomentumUpdaterHook
,
self
).
__init__
(
**
kwargs
)
super
().
__init__
(
**
kwargs
)
def
get_momentum
(
self
,
runner
,
base_momentum
)
:
def
get_momentum
(
self
,
runner
,
base_momentum
:
float
)
->
float
:
if
self
.
by_epoch
:
progress
=
runner
.
epoch
max_progress
=
runner
.
max_epochs
...
...
@@ -217,11 +244,49 @@ class CosineAnnealingMomentumUpdaterHook(MomentumUpdaterHook):
if
self
.
min_momentum_ratio
is
not
None
:
target_momentum
=
base_momentum
*
self
.
min_momentum_ratio
else
:
assert
self
.
min_momentum
is
not
None
target_momentum
=
self
.
min_momentum
return
annealing_cos
(
base_momentum
,
target_momentum
,
progress
/
max_progress
)
@
HOOKS
.
register_module
()
class
LinearAnnealingMomentumUpdaterHook
(
MomentumUpdaterHook
):
"""Linear annealing LR Momentum decays the Momentum of each parameter group
linearly.
Args:
min_momentum (float, optional): The minimum momentum. Default: None.
min_momentum_ratio (float, optional): The ratio of minimum momentum to
the base momentum. Either `min_momentum` or `min_momentum_ratio`
should be specified. Default: None.
"""
def
__init__
(
self
,
min_momentum
:
Optional
[
float
]
=
None
,
min_momentum_ratio
:
Optional
[
float
]
=
None
,
**
kwargs
):
assert
(
min_momentum
is
None
)
^
(
min_momentum_ratio
is
None
)
self
.
min_momentum
=
min_momentum
self
.
min_momentum_ratio
=
min_momentum_ratio
super
().
__init__
(
**
kwargs
)
def
get_momentum
(
self
,
runner
,
base_momentum
:
float
)
->
float
:
if
self
.
by_epoch
:
progress
=
runner
.
epoch
max_progress
=
runner
.
max_epochs
else
:
progress
=
runner
.
iter
max_progress
=
runner
.
max_iters
if
self
.
min_momentum_ratio
is
not
None
:
target_momentum
=
base_momentum
*
self
.
min_momentum_ratio
else
:
assert
self
.
min_momentum
is
not
None
target_momentum
=
self
.
min_momentum
return
annealing_linear
(
base_momentum
,
target_momentum
,
progress
/
max_progress
)
@
HOOKS
.
register_module
()
class
CyclicMomentumUpdaterHook
(
MomentumUpdaterHook
):
"""Cyclic momentum Scheduler.
...
...
@@ -232,20 +297,29 @@ class CyclicMomentumUpdaterHook(MomentumUpdaterHook):
This momentum scheduler usually used together with the CyclicLRUpdater
to improve the performance in the 3D detection area.
A
ttribute
s:
A
rg
s:
target_ratio (tuple[float]): Relative ratio of the lowest momentum and
the highest momentum to the initial momentum.
cyclic_times (int): Number of cycles during training
step_ratio_up (float): The ratio of the increasing process of momentum
in the total cycle.
by_epoch (bool): Whether to update momentum by epoch.
anneal_strategy (str, optional): {'cos', 'linear'}
Specifies the annealing strategy: 'cos' for cosine annealing,
'linear' for linear annealing. Default: 'cos'.
gamma (float, optional): Cycle decay ratio. Default: 1.
It takes values in the range (0, 1]. The difference between the
maximum learning rate and the minimum learning rate decreases
periodically when it is less than 1. `New in version 1.4.4.`
"""
def
__init__
(
self
,
by_epoch
=
False
,
target_ratio
=
(
0.85
/
0.95
,
1
),
cyclic_times
=
1
,
step_ratio_up
=
0.4
,
by_epoch
:
bool
=
False
,
target_ratio
:
Tuple
[
float
,
float
]
=
(
0.85
/
0.95
,
1.
),
cyclic_times
:
int
=
1
,
step_ratio_up
:
float
=
0.4
,
anneal_strategy
:
str
=
'cos'
,
gamma
:
float
=
1.
,
**
kwargs
):
if
isinstance
(
target_ratio
,
float
):
target_ratio
=
(
target_ratio
,
target_ratio
/
1e5
)
...
...
@@ -264,35 +338,60 @@ class CyclicMomentumUpdaterHook(MomentumUpdaterHook):
self
.
target_ratio
=
target_ratio
self
.
cyclic_times
=
cyclic_times
self
.
step_ratio_up
=
step_ratio_up
self
.
momentum_phases
=
[]
# init momentum_phases
self
.
gamma
=
gamma
self
.
momentum_phases
:
List
[
list
]
=
[]
# init momentum_phases
self
.
anneal_func
:
Callable
[[
float
,
float
,
float
],
float
]
if
anneal_strategy
not
in
[
'cos'
,
'linear'
]:
raise
ValueError
(
'anneal_strategy must be one of "cos" or '
f
'"linear", instead got
{
anneal_strategy
}
'
)
elif
anneal_strategy
==
'cos'
:
self
.
anneal_func
=
annealing_cos
elif
anneal_strategy
==
'linear'
:
self
.
anneal_func
=
annealing_linear
# currently only support by_epoch=False
assert
not
by_epoch
,
\
'currently only support "by_epoch" = False'
super
(
CyclicMomentumUpdaterHook
,
self
).
__init__
(
by_epoch
,
**
kwargs
)
super
().
__init__
(
by_epoch
,
**
kwargs
)
def
before_run
(
self
,
runner
):
super
(
CyclicMomentumUpdaterHook
,
self
).
before_run
(
runner
)
super
().
before_run
(
runner
)
# initiate momentum_phases
# total momentum_phases are separated as up and down
max_iter_per_phase
=
runner
.
max_iters
//
self
.
cyclic_times
iter_up_phase
=
int
(
self
.
step_ratio_up
*
max_iter_per_phase
)
self
.
max_iter_per_phase
=
max_iter_per_phase
self
.
momentum_phases
.
append
(
[
0
,
iter_up_phase
,
max_iter_per_phase
,
1
,
self
.
target_ratio
[
0
]])
[
0
,
iter_up_phase
,
1
,
self
.
target_ratio
[
0
]])
self
.
momentum_phases
.
append
([
iter_up_phase
,
max_iter_per_phase
,
max_iter_per_phase
,
self
.
target_ratio
[
0
],
self
.
target_ratio
[
1
]
iter_up_phase
,
max_iter_per_phase
,
self
.
target_ratio
[
0
]
,
self
.
target_ratio
[
1
]
])
def
get_momentum
(
self
,
runner
,
base_momentum
):
curr_iter
=
runner
.
iter
for
(
start_iter
,
end_iter
,
max_iter_per_phase
,
start_ratio
,
end_ratio
)
in
self
.
momentum_phases
:
curr_iter
%=
max_iter_per_phase
def
get_momentum
(
self
,
runner
,
base_momentum
:
float
)
->
float
:
curr_iter
=
runner
.
iter
%
self
.
max_iter_per_phase
curr_cycle
=
runner
.
iter
//
self
.
max_iter_per_phase
scale
=
self
.
gamma
**
curr_cycle
for
(
start_iter
,
end_iter
,
start_ratio
,
end_ratio
)
\
in
self
.
momentum_phases
:
if
start_iter
<=
curr_iter
<
end_iter
:
# Apply cycle scaling to gradually reduce the difference
# between max_momentum and base momentum. The target end_ratio
# can be expressed as:
# end_ratio = (base_momentum + scale * \
# (max_momentum - base_momentum)) / base_momentum
# iteration: 0-iter_up_phase:
if
start_iter
==
0
:
end_ratio
=
1
-
scale
+
end_ratio
*
scale
# iteration: iter_up_phase-self.max_iter_per_phase
else
:
start_ratio
=
1
-
scale
+
start_ratio
*
scale
progress
=
curr_iter
-
start_iter
return
anneal
ing_cos
(
base_momentum
*
start_ratio
,
return
self
.
anneal
_func
(
base_momentum
*
start_ratio
,
base_momentum
*
end_ratio
,
progress
/
(
end_iter
-
start_iter
))
raise
RuntimeError
(
'The method should return in the for-loop and '
'should not be executed until this'
)
@
HOOKS
.
register_module
()
...
...
@@ -331,11 +430,11 @@ class OneCycleMomentumUpdaterHook(MomentumUpdaterHook):
"""
def
__init__
(
self
,
base_momentum
=
0.85
,
max_momentum
=
0.95
,
pct_start
=
0.3
,
anneal_strategy
=
'cos'
,
three_phase
=
False
,
base_momentum
:
Union
[
float
,
list
,
dict
]
=
0.85
,
max_momentum
:
Union
[
float
,
list
,
dict
]
=
0.95
,
pct_start
:
float
=
0.3
,
anneal_strategy
:
str
=
'cos'
,
three_phase
:
bool
=
False
,
**
kwargs
):
# validate by_epoch, currently only support by_epoch=False
if
'by_epoch'
not
in
kwargs
:
...
...
@@ -357,6 +456,7 @@ class OneCycleMomentumUpdaterHook(MomentumUpdaterHook):
f
'got
{
pct_start
}
'
)
self
.
pct_start
=
pct_start
# validate anneal_strategy
self
.
anneal_func
:
Callable
[[
float
,
float
,
float
],
float
]
if
anneal_strategy
not
in
[
'cos'
,
'linear'
]:
raise
ValueError
(
'anneal_strategy must by one of "cos" or '
f
'"linear", instead got
{
anneal_strategy
}
'
)
...
...
@@ -365,8 +465,8 @@ class OneCycleMomentumUpdaterHook(MomentumUpdaterHook):
elif
anneal_strategy
==
'linear'
:
self
.
anneal_func
=
annealing_linear
self
.
three_phase
=
three_phase
self
.
momentum_phases
=
[]
# init momentum_phases
super
(
OneCycleMomentumUpdaterHook
,
self
).
__init__
(
**
kwargs
)
self
.
momentum_phases
:
List
[
dict
]
=
[]
# init momentum_phases
super
().
__init__
(
**
kwargs
)
def
before_run
(
self
,
runner
):
if
isinstance
(
runner
.
optimizer
,
dict
):
...
...
@@ -462,9 +562,10 @@ class OneCycleMomentumUpdaterHook(MomentumUpdaterHook):
elif
'betas'
in
param_group
.
keys
():
param_group
[
'betas'
]
=
(
mom
,
param_group
[
'betas'
][
1
])
def
get_momentum
(
self
,
runner
,
param_group
)
:
def
get_momentum
(
self
,
runner
,
param_group
:
Dict
[
str
,
float
])
->
float
:
curr_iter
=
runner
.
iter
start_iter
=
0
momentum
=
0.
for
i
,
phase
in
enumerate
(
self
.
momentum_phases
):
end_iter
=
phase
[
'end_iter'
]
if
curr_iter
<=
end_iter
or
i
==
len
(
self
.
momentum_phases
)
-
1
:
...
...
mmcv/runner/hooks/optimizer.py
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
import
copy
import
logging
from
collections
import
defaultdict
from
itertools
import
chain
from
typing
import
Optional
,
Union
import
torch.nn
as
nn
from
torch
import
Tensor
from
torch.nn.utils
import
clip_grad
from
mmcv.utils
import
TORCH_VERSION
,
_BatchNorm
,
digit_version
...
...
@@ -20,9 +24,29 @@ except ImportError:
@
HOOKS
.
register_module
()
class
OptimizerHook
(
Hook
):
"""A hook contains custom operations for the optimizer.
def
__init__
(
self
,
grad_clip
=
None
):
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Default: None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with `loss` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Default: False.
"""
def
__init__
(
self
,
grad_clip
:
Optional
[
dict
]
=
None
,
detect_anomalous_params
:
bool
=
False
):
self
.
grad_clip
=
grad_clip
self
.
detect_anomalous_params
=
detect_anomalous_params
def
clip_grads
(
self
,
params
):
params
=
list
(
...
...
@@ -32,7 +56,10 @@ class OptimizerHook(Hook):
def
after_train_iter
(
self
,
runner
):
runner
.
optimizer
.
zero_grad
()
if
self
.
detect_anomalous_params
:
self
.
detect_anomalous_parameters
(
runner
.
outputs
[
'loss'
],
runner
)
runner
.
outputs
[
'loss'
].
backward
()
if
self
.
grad_clip
is
not
None
:
grad_norm
=
self
.
clip_grads
(
runner
.
model
.
parameters
())
if
grad_norm
is
not
None
:
...
...
@@ -41,6 +68,32 @@ class OptimizerHook(Hook):
runner
.
outputs
[
'num_samples'
])
runner
.
optimizer
.
step
()
def
detect_anomalous_parameters
(
self
,
loss
:
Tensor
,
runner
)
->
None
:
logger
=
runner
.
logger
parameters_in_graph
=
set
()
visited
=
set
()
def
traverse
(
grad_fn
):
if
grad_fn
is
None
:
return
if
grad_fn
not
in
visited
:
visited
.
add
(
grad_fn
)
if
hasattr
(
grad_fn
,
'variable'
):
parameters_in_graph
.
add
(
grad_fn
.
variable
)
parents
=
grad_fn
.
next_functions
if
parents
is
not
None
:
for
parent
in
parents
:
grad_fn
=
parent
[
0
]
traverse
(
grad_fn
)
traverse
(
loss
.
grad_fn
)
for
n
,
p
in
runner
.
model
.
named_parameters
():
if
p
not
in
parameters_in_graph
and
p
.
requires_grad
:
logger
.
log
(
level
=
logging
.
ERROR
,
msg
=
f
'
{
n
}
with shape
{
p
.
size
()
}
is not '
f
'in the computational graph
\n
'
)
@
HOOKS
.
register_module
()
class
GradientCumulativeOptimizerHook
(
OptimizerHook
):
...
...
@@ -61,8 +114,8 @@ class GradientCumulativeOptimizerHook(OptimizerHook):
>>> optim_hook = OptimizerHook()
"""
def
__init__
(
self
,
cumulative_iters
=
1
,
**
kwargs
):
super
(
GradientCumulativeOptimizerHook
,
self
).
__init__
(
**
kwargs
)
def
__init__
(
self
,
cumulative_iters
:
int
=
1
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
assert
isinstance
(
cumulative_iters
,
int
)
and
cumulative_iters
>
0
,
\
f
'cumulative_iters only accepts positive int, but got '
\
...
...
@@ -73,7 +126,7 @@ class GradientCumulativeOptimizerHook(OptimizerHook):
self
.
remainder_iters
=
0
self
.
initialized
=
False
def
has_batch_norm
(
self
,
module
)
:
def
has_batch_norm
(
self
,
module
:
nn
.
Module
)
->
bool
:
if
isinstance
(
module
,
_BatchNorm
):
return
True
for
m
in
module
.
children
():
...
...
@@ -160,11 +213,11 @@ if (TORCH_VERSION != 'parrots'
"""
def
__init__
(
self
,
grad_clip
=
None
,
coalesce
=
True
,
bucket_size_mb
=
-
1
,
loss_scale
=
512.
,
distributed
=
True
):
grad_clip
:
Optional
[
dict
]
=
None
,
coalesce
:
bool
=
True
,
bucket_size_mb
:
int
=
-
1
,
loss_scale
:
Union
[
float
,
str
,
dict
]
=
512.
,
distributed
:
bool
=
True
):
self
.
grad_clip
=
grad_clip
self
.
coalesce
=
coalesce
self
.
bucket_size_mb
=
bucket_size_mb
...
...
@@ -181,7 +234,7 @@ if (TORCH_VERSION != 'parrots'
raise
ValueError
(
'loss_scale must be of type float, dict, or '
f
'"dynamic", got
{
loss_scale
}
'
)
def
before_run
(
self
,
runner
):
def
before_run
(
self
,
runner
)
->
None
:
"""Preparing steps before Mixed Precision Training."""
# wrap model mode to fp16
wrap_fp16_model
(
runner
.
model
)
...
...
@@ -190,7 +243,8 @@ if (TORCH_VERSION != 'parrots'
scaler_state_dict
=
runner
.
meta
[
'fp16'
][
'loss_scaler'
]
self
.
loss_scaler
.
load_state_dict
(
scaler_state_dict
)
def
copy_grads_to_fp32
(
self
,
fp16_net
,
fp32_weights
):
def
copy_grads_to_fp32
(
self
,
fp16_net
:
nn
.
Module
,
fp32_weights
:
Tensor
)
->
None
:
"""Copy gradients from fp16 model to fp32 weight copy."""
for
fp32_param
,
fp16_param
in
zip
(
fp32_weights
,
fp16_net
.
parameters
()):
...
...
@@ -200,13 +254,14 @@ if (TORCH_VERSION != 'parrots'
fp32_param
.
size
())
fp32_param
.
grad
.
copy_
(
fp16_param
.
grad
)
def
copy_params_to_fp16
(
self
,
fp16_net
,
fp32_weights
):
def
copy_params_to_fp16
(
self
,
fp16_net
:
nn
.
Module
,
fp32_weights
:
Tensor
)
->
None
:
"""Copy updated params from fp32 weight copy to fp16 model."""
for
fp16_param
,
fp32_param
in
zip
(
fp16_net
.
parameters
(),
fp32_weights
):
fp16_param
.
data
.
copy_
(
fp32_param
.
data
)
def
after_train_iter
(
self
,
runner
):
def
after_train_iter
(
self
,
runner
)
->
None
:
"""Backward optimization steps for Mixed Precision Training. For
dynamic loss scaling, please refer to
https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler.
...
...
@@ -249,10 +304,9 @@ if (TORCH_VERSION != 'parrots'
"""
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
GradientCumulativeFp16OptimizerHook
,
self
).
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
def
after_train_iter
(
self
,
runner
):
def
after_train_iter
(
self
,
runner
)
->
None
:
if
not
self
.
initialized
:
self
.
_init
(
runner
)
...
...
@@ -294,7 +348,7 @@ if (TORCH_VERSION != 'parrots'
else
:
@
HOOKS
.
register_module
()
class
Fp16OptimizerHook
(
OptimizerHook
):
class
Fp16OptimizerHook
(
OptimizerHook
):
# type: ignore
"""FP16 optimizer hook (mmcv's implementation).
The steps of fp16 optimizer is as follows.
...
...
@@ -316,11 +370,11 @@ else:
"""
def
__init__
(
self
,
grad_clip
=
None
,
coalesce
=
True
,
bucket_size_mb
=
-
1
,
loss_scale
=
512.
,
distributed
=
True
):
grad_clip
:
Optional
[
dict
]
=
None
,
coalesce
:
bool
=
True
,
bucket_size_mb
:
int
=
-
1
,
loss_scale
:
Union
[
float
,
str
,
dict
]
=
512.
,
distributed
:
bool
=
True
):
self
.
grad_clip
=
grad_clip
self
.
coalesce
=
coalesce
self
.
bucket_size_mb
=
bucket_size_mb
...
...
@@ -336,7 +390,7 @@ else:
raise
ValueError
(
'loss_scale must be of type float, dict, or '
f
'"dynamic", got
{
loss_scale
}
'
)
def
before_run
(
self
,
runner
):
def
before_run
(
self
,
runner
)
->
None
:
"""Preparing steps before Mixed Precision Training.
1. Make a master copy of fp32 weights for optimization.
...
...
@@ -346,7 +400,7 @@ else:
old_groups
=
runner
.
optimizer
.
param_groups
runner
.
optimizer
.
param_groups
=
copy
.
deepcopy
(
runner
.
optimizer
.
param_groups
)
state
=
defaultdict
(
dict
)
state
:
defaultdict
=
defaultdict
(
dict
)
p_map
=
{
old_p
:
p
for
old_p
,
p
in
zip
(
...
...
@@ -364,7 +418,8 @@ else:
scaler_state_dict
=
runner
.
meta
[
'fp16'
][
'loss_scaler'
]
self
.
loss_scaler
.
load_state_dict
(
scaler_state_dict
)
def
copy_grads_to_fp32
(
self
,
fp16_net
,
fp32_weights
):
def
copy_grads_to_fp32
(
self
,
fp16_net
:
nn
.
Module
,
fp32_weights
:
Tensor
)
->
None
:
"""Copy gradients from fp16 model to fp32 weight copy."""
for
fp32_param
,
fp16_param
in
zip
(
fp32_weights
,
fp16_net
.
parameters
()):
...
...
@@ -374,13 +429,14 @@ else:
fp32_param
.
size
())
fp32_param
.
grad
.
copy_
(
fp16_param
.
grad
)
def
copy_params_to_fp16
(
self
,
fp16_net
,
fp32_weights
):
def
copy_params_to_fp16
(
self
,
fp16_net
:
nn
.
Module
,
fp32_weights
:
Tensor
)
->
None
:
"""Copy updated params from fp32 weight copy to fp16 model."""
for
fp16_param
,
fp32_param
in
zip
(
fp16_net
.
parameters
(),
fp32_weights
):
fp16_param
.
data
.
copy_
(
fp32_param
.
data
)
def
after_train_iter
(
self
,
runner
):
def
after_train_iter
(
self
,
runner
)
->
None
:
"""Backward optimization steps for Mixed Precision Training. For
dynamic loss scaling, please refer `loss_scalar.py`
...
...
@@ -436,16 +492,15 @@ else:
'fp16'
,
{})[
'loss_scaler'
]
=
self
.
loss_scaler
.
state_dict
()
@
HOOKS
.
register_module
()
class
GradientCumulativeFp16OptimizerHook
(
GradientCumulativeOptimizerHook
,
Fp16OptimizerHook
):
class
GradientCumulativeFp16OptimizerHook
(
# type: ignore
GradientCumulativeOptimizerHook
,
Fp16OptimizerHook
):
"""Fp16 optimizer Hook (using mmcv implementation) implements multi-
iters gradient cumulating."""
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
GradientCumulativeFp16OptimizerHook
,
self
).
__init__
(
*
args
,
**
kwargs
)
super
().
__init__
(
*
args
,
**
kwargs
)
def
after_train_iter
(
self
,
runner
):
def
after_train_iter
(
self
,
runner
)
->
None
:
if
not
self
.
initialized
:
self
.
_init
(
runner
)
...
...
mmcv/runner/hooks/profiler.py
View file @
fdeee889
# Copyright (c) OpenMMLab. All rights reserved.
import
os.path
as
osp
import
warnings
from
typing
import
Callable
,
List
,
Optional
,
Union
...
...
@@ -131,6 +132,15 @@ class ProfilerHook(Hook):
raise
ImportError
(
'please run "pip install '
'torch-tb-profiler" to install '
'torch_tb_profiler'
)
if
'dir_name'
not
in
trace_cfg
:
trace_cfg
[
'dir_name'
]
=
osp
.
join
(
runner
.
work_dir
,
'tf_tracing_logs'
)
elif
not
osp
.
isabs
(
trace_cfg
[
'dir_name'
]):
trace_cfg
[
'dir_name'
]
=
osp
.
join
(
runner
.
work_dir
,
trace_cfg
[
'dir_name'
])
runner
.
logger
.
info
(
'tracing files of ProfilerHook will be saved to '
f
"
{
trace_cfg
[
'dir_name'
]
}
."
)
_on_trace_ready
=
torch
.
profiler
.
tensorboard_trace_handler
(
**
trace_cfg
)
else
:
...
...
@@ -142,7 +152,7 @@ class ProfilerHook(Hook):
raise
ValueError
(
'on_trace_ready should be handler, dict or None, '
f
'but got
{
type
(
self
.
on_trace_ready
)
}
'
)
if
runner
.
max_epochs
>
1
:
if
self
.
by_epoch
and
runner
.
max_epochs
>
1
:
warnings
.
warn
(
f
'profiler will profile
{
runner
.
max_epochs
}
epochs '
'instead of 1 epoch. Since profiler will slow down '
'the training, it is recommended to train 1 epoch '
...
...
mmcv/runner/hooks/sync_buffer.py
View file @
fdeee889
...
...
@@ -13,7 +13,7 @@ class SyncBuffersHook(Hook):
effective only for distributed training. Defaults to True.
"""
def
__init__
(
self
,
distributed
=
True
):
def
__init__
(
self
,
distributed
:
bool
=
True
):
self
.
distributed
=
distributed
def
after_epoch
(
self
,
runner
):
...
...
mmcv/runner/iter_based_runner.py
View file @
fdeee889
...
...
@@ -4,9 +4,11 @@ import platform
import
shutil
import
time
import
warnings
from
typing
import
Callable
,
Dict
,
List
,
Optional
,
Tuple
,
Union
,
no_type_check
import
torch
from
torch.optim
import
Optimizer
from
torch.utils.data
import
DataLoader
import
mmcv
from
.base_runner
import
BaseRunner
...
...
@@ -18,13 +20,13 @@ from .utils import get_host_info
class
IterLoader
:
def
__init__
(
self
,
dataloader
):
def
__init__
(
self
,
dataloader
:
DataLoader
):
self
.
_dataloader
=
dataloader
self
.
iter_loader
=
iter
(
self
.
_dataloader
)
self
.
_epoch
=
0
@
property
def
epoch
(
self
):
def
epoch
(
self
)
->
int
:
return
self
.
_epoch
def
__next__
(
self
):
...
...
@@ -57,6 +59,7 @@ class IterBasedRunner(BaseRunner):
self
.
data_loader
=
data_loader
self
.
_epoch
=
data_loader
.
epoch
data_batch
=
next
(
data_loader
)
self
.
data_batch
=
data_batch
self
.
call_hook
(
'before_train_iter'
)
outputs
=
self
.
model
.
train_step
(
data_batch
,
self
.
optimizer
,
**
kwargs
)
if
not
isinstance
(
outputs
,
dict
):
...
...
@@ -65,6 +68,7 @@ class IterBasedRunner(BaseRunner):
self
.
log_buffer
.
update
(
outputs
[
'log_vars'
],
outputs
[
'num_samples'
])
self
.
outputs
=
outputs
self
.
call_hook
(
'after_train_iter'
)
del
self
.
data_batch
self
.
_inner_iter
+=
1
self
.
_iter
+=
1
...
...
@@ -74,6 +78,7 @@ class IterBasedRunner(BaseRunner):
self
.
mode
=
'val'
self
.
data_loader
=
data_loader
data_batch
=
next
(
data_loader
)
self
.
data_batch
=
data_batch
self
.
call_hook
(
'before_val_iter'
)
outputs
=
self
.
model
.
val_step
(
data_batch
,
**
kwargs
)
if
not
isinstance
(
outputs
,
dict
):
...
...
@@ -82,9 +87,14 @@ class IterBasedRunner(BaseRunner):
self
.
log_buffer
.
update
(
outputs
[
'log_vars'
],
outputs
[
'num_samples'
])
self
.
outputs
=
outputs
self
.
call_hook
(
'after_val_iter'
)
del
self
.
data_batch
self
.
_inner_iter
+=
1
def
run
(
self
,
data_loaders
,
workflow
,
max_iters
=
None
,
**
kwargs
):
def
run
(
self
,
data_loaders
:
List
[
DataLoader
],
workflow
:
List
[
Tuple
[
str
,
int
]],
max_iters
:
Optional
[
int
]
=
None
,
**
kwargs
)
->
None
:
"""Start running.
Args:
...
...
@@ -137,10 +147,11 @@ class IterBasedRunner(BaseRunner):
self
.
call_hook
(
'after_epoch'
)
self
.
call_hook
(
'after_run'
)
@
no_type_check
def
resume
(
self
,
checkpoint
,
resume_optimizer
=
True
,
map_location
=
'default'
):
checkpoint
:
str
,
resume_optimizer
:
bool
=
True
,
map_location
:
Union
[
str
,
Callable
]
=
'default'
)
->
None
:
"""Resume model from checkpoint.
Args:
...
...
@@ -176,12 +187,13 @@ class IterBasedRunner(BaseRunner):
self
.
logger
.
info
(
f
'resumed from epoch:
{
self
.
epoch
}
, iter
{
self
.
iter
}
'
)
def
save_checkpoint
(
self
,
out_dir
,
filename_tmpl
=
'iter_{}.pth'
,
meta
=
None
,
save_optimizer
=
True
,
create_symlink
=
True
):
def
save_checkpoint
(
# type: ignore
self
,
out_dir
:
str
,
filename_tmpl
:
str
=
'iter_{}.pth'
,
meta
:
Optional
[
Dict
]
=
None
,
save_optimizer
:
bool
=
True
,
create_symlink
:
bool
=
True
)
->
None
:
"""Save checkpoint to file.
Args:
...
...
@@ -257,13 +269,13 @@ class IterBasedRunner(BaseRunner):
will be triggered after default hooks.
"""
if
checkpoint_config
is
not
None
:
checkpoint_config
.
setdefault
(
'by_epoch'
,
False
)
checkpoint_config
.
setdefault
(
'by_epoch'
,
False
)
# type: ignore
if
lr_config
is
not
None
:
lr_config
.
setdefault
(
'by_epoch'
,
False
)
lr_config
.
setdefault
(
'by_epoch'
,
False
)
# type: ignore
if
log_config
is
not
None
:
for
info
in
log_config
[
'hooks'
]:
info
.
setdefault
(
'by_epoch'
,
False
)
super
(
IterBasedRunner
,
self
).
register_training_hooks
(
super
().
register_training_hooks
(
lr_config
=
lr_config
,
momentum_config
=
momentum_config
,
optimizer_config
=
optimizer_config
,
...
...
Prev
1
…
20
21
22
23
24
25
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment