test_hooks.py 11.6 KB
Newer Older
Kai Chen's avatar
Kai Chen committed
1
"""Tests the hooks with runners.
Wenwei Zhang's avatar
Wenwei Zhang committed
2
3
4
5
6

CommandLine:
    pytest tests/test_hooks.py
    xdoctest tests/test_hooks.py zero
"""
7
import logging
Jiangmiao Pang's avatar
Jiangmiao Pang committed
8
import os.path as osp
9
import shutil
Jiangmiao Pang's avatar
Jiangmiao Pang committed
10
import sys
11
import tempfile
Wenwei Zhang's avatar
Wenwei Zhang committed
12
from unittest.mock import MagicMock, call
Jiangmiao Pang's avatar
Jiangmiao Pang committed
13

14
15
16
import pytest
import torch
import torch.nn as nn
shilong's avatar
shilong committed
17
from torch.nn.init import constant_
18
19
from torch.utils.data import DataLoader

shilong's avatar
shilong committed
20
21
22
from mmcv.runner import (CheckpointHook, EMAHook, EpochBasedRunner,
                         IterTimerHook, MlflowLoggerHook, PaviLoggerHook,
                         WandbLoggerHook)
Wang Xinjiang's avatar
Wang Xinjiang committed
23
from mmcv.runner.hooks.lr_updater import CosineRestartLrUpdaterHook
Jiangmiao Pang's avatar
Jiangmiao Pang committed
24
25


shilong's avatar
shilong committed
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
def test_ema_hook():
    """xdoctest -m tests/test_hooks.py test_ema_hook."""

    class DemoModel(nn.Module):

        def __init__(self):
            super().__init__()
            self.conv = nn.Conv2d(
                in_channels=1,
                out_channels=2,
                kernel_size=1,
                padding=1,
                bias=True)
            self._init_weight()

        def _init_weight(self):
            constant_(self.conv.weight, 0)
            constant_(self.conv.bias, 0)

        def forward(self, x):
            return self.conv(x).sum()

        def train_step(self, x, optimizer, **kwargs):
            return dict(loss=self(x))

        def val_step(self, x, optimizer, **kwargs):
            return dict(loss=self(x))

    loader = DataLoader(torch.ones((1, 1, 1, 1)))
    runner = _build_demo_runner()
    demo_model = DemoModel()
    runner.model = demo_model
    emahook = EMAHook(momentum=0.1, interval=2, warm_up=100, resume_from=None)
    checkpointhook = CheckpointHook(interval=1, by_epoch=True)
    runner.register_hook(emahook, priority='HIGHEST')
    runner.register_hook(checkpointhook)
    runner.run([loader, loader], [('train', 1), ('val', 1)], 1)
    checkpoint = torch.load(f'{runner.work_dir}/epoch_1.pth')
    contain_ema_buffer = False
    for name, value in checkpoint['state_dict'].items():
        if 'ema' in name:
            contain_ema_buffer = True
            assert value.sum() == 0
            value.fill_(1)
        else:
            assert value.sum() == 0
    assert contain_ema_buffer
    torch.save(checkpoint, f'{runner.work_dir}/epoch_1.pth')
    work_dir = runner.work_dir
    resume_ema_hook = EMAHook(
        momentum=0.5, warm_up=0, resume_from=f'{work_dir}/epoch_1.pth')
    runner = _build_demo_runner()
    runner.model = demo_model
    runner.register_hook(resume_ema_hook, priority='HIGHEST')
    checkpointhook = CheckpointHook(interval=1, by_epoch=True)
    runner.register_hook(checkpointhook)
    runner.run([loader, loader], [('train', 1), ('val', 1)], 2)
    checkpoint = torch.load(f'{runner.work_dir}/epoch_2.pth')
    contain_ema_buffer = False
    for name, value in checkpoint['state_dict'].items():
        if 'ema' in name:
            contain_ema_buffer = True
            assert value.sum() == 2
        else:
            assert value.sum() == 1
    assert contain_ema_buffer
    shutil.rmtree(runner.work_dir)
    shutil.rmtree(work_dir)


Jiangmiao Pang's avatar
Jiangmiao Pang committed
96
97
98
def test_pavi_hook():
    sys.modules['pavi'] = MagicMock()

Wenwei Zhang's avatar
Wenwei Zhang committed
99
100
    loader = DataLoader(torch.ones((5, 2)))
    runner = _build_demo_runner()
101
    runner.meta = dict(config_dict=dict(lr=0.02, gpu_ids=range(1)))
102
    hook = PaviLoggerHook(add_graph=False, add_last_ckpt=True)
Jiangmiao Pang's avatar
Jiangmiao Pang committed
103
104
    runner.register_hook(hook)
    runner.run([loader, loader], [('train', 1), ('val', 1)], 1)
105
    shutil.rmtree(runner.work_dir)
Jiangmiao Pang's avatar
Jiangmiao Pang committed
106
107

    assert hasattr(hook, 'writer')
Wenwei Zhang's avatar
Wenwei Zhang committed
108
109
110
    hook.writer.add_scalars.assert_called_with('val', {
        'learning_rate': 0.02,
        'momentum': 0.95
111
    }, 1)
Jiangmiao Pang's avatar
Jiangmiao Pang committed
112
    hook.writer.add_snapshot_file.assert_called_with(
113
        tag=runner.work_dir.split('/')[-1],
114
115
        snapshot_file_path=osp.join(runner.work_dir, 'epoch_1.pth'),
        iteration=1)
116
117


Wang Xinjiang's avatar
Wang Xinjiang committed
118
119
120
121
122
123
124
125
def test_sync_buffers_hook():
    loader = DataLoader(torch.ones((5, 2)))
    runner = _build_demo_runner()
    runner.register_hook_from_cfg(dict(type='SyncBuffersHook'))
    runner.run([loader, loader], [('train', 1), ('val', 1)], 1)
    shutil.rmtree(runner.work_dir)


Wenwei Zhang's avatar
Wenwei Zhang committed
126
def test_momentum_runner_hook():
Kai Chen's avatar
Kai Chen committed
127
    """xdoctest -m tests/test_hooks.py test_momentum_runner_hook."""
Wenwei Zhang's avatar
Wenwei Zhang committed
128
129
130
131
132
    sys.modules['pavi'] = MagicMock()
    loader = DataLoader(torch.ones((10, 2)))
    runner = _build_demo_runner()

    # add momentum scheduler
Wang Xinjiang's avatar
Wang Xinjiang committed
133
134
    hook_cfg = dict(
        type='CyclicMomentumUpdaterHook',
Wenwei Zhang's avatar
Wenwei Zhang committed
135
136
137
138
        by_epoch=False,
        target_ratio=(0.85 / 0.95, 1),
        cyclic_times=1,
        step_ratio_up=0.4)
Wang Xinjiang's avatar
Wang Xinjiang committed
139
    runner.register_hook_from_cfg(hook_cfg)
Wenwei Zhang's avatar
Wenwei Zhang committed
140
141

    # add momentum LR scheduler
Wang Xinjiang's avatar
Wang Xinjiang committed
142
143
    hook_cfg = dict(
        type='CyclicLrUpdaterHook',
Wenwei Zhang's avatar
Wenwei Zhang committed
144
145
146
147
        by_epoch=False,
        target_ratio=(10, 1),
        cyclic_times=1,
        step_ratio_up=0.4)
Wang Xinjiang's avatar
Wang Xinjiang committed
148
149
    runner.register_hook_from_cfg(hook_cfg)
    runner.register_hook_from_cfg(dict(type='IterTimerHook'))
Wenwei Zhang's avatar
Wenwei Zhang committed
150
151

    # add pavi hook
152
    hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
Wenwei Zhang's avatar
Wenwei Zhang committed
153
154
    runner.register_hook(hook)
    runner.run([loader], [('train', 1)], 1)
155
    shutil.rmtree(runner.work_dir)
Wenwei Zhang's avatar
Wenwei Zhang committed
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176

    # TODO: use a more elegant way to check values
    assert hasattr(hook, 'writer')
    calls = [
        call('train', {
            'learning_rate': 0.01999999999999999,
            'momentum': 0.95
        }, 0),
        call('train', {
            'learning_rate': 0.2,
            'momentum': 0.85
        }, 4),
        call('train', {
            'learning_rate': 0.155,
            'momentum': 0.875
        }, 6),
    ]
    hook.writer.add_scalars.assert_has_calls(calls, any_order=True)


def test_cosine_runner_hook():
Kai Chen's avatar
Kai Chen committed
177
    """xdoctest -m tests/test_hooks.py test_cosine_runner_hook."""
Wenwei Zhang's avatar
Wenwei Zhang committed
178
179
180
181
182
    sys.modules['pavi'] = MagicMock()
    loader = DataLoader(torch.ones((10, 2)))
    runner = _build_demo_runner()

    # add momentum scheduler
Wang Xinjiang's avatar
Wang Xinjiang committed
183
184
185

    hook_cfg = dict(
        type='CosineAnnealingMomentumUpdaterHook',
186
187
188
189
        min_momentum_ratio=0.99 / 0.95,
        by_epoch=False,
        warmup_iters=2,
        warmup_ratio=0.9 / 0.95)
Wang Xinjiang's avatar
Wang Xinjiang committed
190
    runner.register_hook_from_cfg(hook_cfg)
Wenwei Zhang's avatar
Wenwei Zhang committed
191
192

    # add momentum LR scheduler
Wang Xinjiang's avatar
Wang Xinjiang committed
193
194
195
196
197
198
199
200
    hook_cfg = dict(
        type='CosineAnnealingLrUpdaterHook',
        by_epoch=False,
        min_lr_ratio=0,
        warmup_iters=2,
        warmup_ratio=0.9)
    runner.register_hook_from_cfg(hook_cfg)
    runner.register_hook_from_cfg(dict(type='IterTimerHook'))
201
    runner.register_hook(IterTimerHook())
Wenwei Zhang's avatar
Wenwei Zhang committed
202
    # add pavi hook
203
    hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
Wenwei Zhang's avatar
Wenwei Zhang committed
204
205
    runner.register_hook(hook)
    runner.run([loader], [('train', 1)], 1)
206
    shutil.rmtree(runner.work_dir)
Wenwei Zhang's avatar
Wenwei Zhang committed
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226

    # TODO: use a more elegant way to check values
    assert hasattr(hook, 'writer')
    calls = [
        call('train', {
            'learning_rate': 0.02,
            'momentum': 0.95
        }, 0),
        call('train', {
            'learning_rate': 0.01,
            'momentum': 0.97
        }, 5),
        call('train', {
            'learning_rate': 0.0004894348370484647,
            'momentum': 0.9890211303259032
        }, 9)
    ]
    hook.writer.add_scalars.assert_has_calls(calls, any_order=True)


Harry's avatar
Harry committed
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
def test_cosine_restart_lr_update_hook():
    """Test CosineRestartLrUpdaterHook."""
    with pytest.raises(AssertionError):
        # either `min_lr` or `min_lr_ratio` should be specified
        CosineRestartLrUpdaterHook(
            by_epoch=False,
            periods=[2, 10],
            restart_weights=[0.5, 0.5],
            min_lr=0.1,
            min_lr_ratio=0)

    with pytest.raises(AssertionError):
        # periods and restart_weights should have the same length
        CosineRestartLrUpdaterHook(
            by_epoch=False,
            periods=[2, 10],
            restart_weights=[0.5],
            min_lr_ratio=0)

    with pytest.raises(ValueError):
        # the last cumulative_periods 7 (out of [5, 7]) should >= 10
        sys.modules['pavi'] = MagicMock()
        loader = DataLoader(torch.ones((10, 2)))
        runner = _build_demo_runner()

        # add cosine restart LR scheduler
        hook = CosineRestartLrUpdaterHook(
            by_epoch=False,
            periods=[5, 2],  # cumulative_periods [5, 7 (5 + 2)]
            restart_weights=[0.5, 0.5],
            min_lr=0.0001)
        runner.register_hook(hook)
        runner.register_hook(IterTimerHook())

        # add pavi hook
        hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
        runner.register_hook(hook)
        runner.run([loader], [('train', 1)], 1)
        shutil.rmtree(runner.work_dir)

    sys.modules['pavi'] = MagicMock()
    loader = DataLoader(torch.ones((10, 2)))
    runner = _build_demo_runner()

    # add cosine restart LR scheduler
    hook = CosineRestartLrUpdaterHook(
        by_epoch=False,
        periods=[5, 5],
        restart_weights=[0.5, 0.5],
        min_lr_ratio=0)
    runner.register_hook(hook)
    runner.register_hook(IterTimerHook())

    # add pavi hook
    hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
    runner.register_hook(hook)
    runner.run([loader], [('train', 1)], 1)
    shutil.rmtree(runner.work_dir)

    # TODO: use a more elegant way to check values
    assert hasattr(hook, 'writer')
    calls = [
        call('train', {
            'learning_rate': 0.01,
            'momentum': 0.95
        }, 0),
        call('train', {
            'learning_rate': 0.0,
            'momentum': 0.95
        }, 5),
        call('train', {
            'learning_rate': 0.0009549150281252633,
            'momentum': 0.95
        }, 9)
    ]
    hook.writer.add_scalars.assert_has_calls(calls, any_order=True)


305
306
307
308
309
@pytest.mark.parametrize('log_model', (True, False))
def test_mlflow_hook(log_model):
    sys.modules['mlflow'] = MagicMock()
    sys.modules['mlflow.pytorch'] = MagicMock()

Wenwei Zhang's avatar
Wenwei Zhang committed
310
311
    runner = _build_demo_runner()
    loader = DataLoader(torch.ones((5, 2)))
312

313
    hook = MlflowLoggerHook(exp_name='test', log_model=log_model)
314
315
    runner.register_hook(hook)
    runner.run([loader, loader], [('train', 1), ('val', 1)], 1)
316
    shutil.rmtree(runner.work_dir)
317
318

    hook.mlflow.set_experiment.assert_called_with('test')
Wenwei Zhang's avatar
Wenwei Zhang committed
319
320
321
322
323
    hook.mlflow.log_metrics.assert_called_with(
        {
            'learning_rate': 0.02,
            'momentum': 0.95
        }, step=5)
324
325
326
327
328
329
330
331
332
    if log_model:
        hook.mlflow_pytorch.log_model.assert_called_with(
            runner.model, 'models')
    else:
        assert not hook.mlflow_pytorch.log_model.called


def test_wandb_hook():
    sys.modules['wandb'] = MagicMock()
Wenwei Zhang's avatar
Wenwei Zhang committed
333
    runner = _build_demo_runner()
334
    hook = WandbLoggerHook()
Wenwei Zhang's avatar
Wenwei Zhang committed
335
    loader = DataLoader(torch.ones((5, 2)))
336
337
338

    runner.register_hook(hook)
    runner.run([loader, loader], [('train', 1), ('val', 1)], 1)
339
340
    shutil.rmtree(runner.work_dir)

341
    hook.wandb.init.assert_called_with()
Wenwei Zhang's avatar
Wenwei Zhang committed
342
343
344
345
346
    hook.wandb.log.assert_called_with({
        'learning_rate': 0.02,
        'momentum': 0.95
    },
                                      step=5)
347
    hook.wandb.join.assert_called_with()
Wenwei Zhang's avatar
Wenwei Zhang committed
348
349
350


def _build_demo_runner():
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368

    class Model(nn.Module):

        def __init__(self):
            super().__init__()
            self.linear = nn.Linear(2, 1)

        def forward(self, x):
            return self.linear(x)

        def train_step(self, x, optimizer, **kwargs):
            return dict(loss=self(x))

        def val_step(self, x, optimizer, **kwargs):
            return dict(loss=self(x))

    model = Model()

Wenwei Zhang's avatar
Wenwei Zhang committed
369
370
371
372
373
374
375
    optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95)

    log_config = dict(
        interval=1, hooks=[
            dict(type='TextLoggerHook'),
        ])

376
    tmp_dir = tempfile.mkdtemp()
377
    runner = EpochBasedRunner(
Wenwei Zhang's avatar
Wenwei Zhang committed
378
        model=model,
379
380
381
        work_dir=tmp_dir,
        optimizer=optimizer,
        logger=logging.getLogger())
382
    runner.register_checkpoint_hook(dict(interval=1))
Wenwei Zhang's avatar
Wenwei Zhang committed
383
384
    runner.register_logger_hooks(log_config)
    return runner