test_hooks.py 14 KB
Newer Older
Kai Chen's avatar
Kai Chen committed
1
"""Tests the hooks with runners.
Wenwei Zhang's avatar
Wenwei Zhang committed
2
3
4
5
6

CommandLine:
    pytest tests/test_hooks.py
    xdoctest tests/test_hooks.py zero
"""
7
import logging
Jiangmiao Pang's avatar
Jiangmiao Pang committed
8
import os.path as osp
9
import re
10
import shutil
Jiangmiao Pang's avatar
Jiangmiao Pang committed
11
import sys
12
import tempfile
Wenwei Zhang's avatar
Wenwei Zhang committed
13
from unittest.mock import MagicMock, call
Jiangmiao Pang's avatar
Jiangmiao Pang committed
14

15
16
17
import pytest
import torch
import torch.nn as nn
shilong's avatar
shilong committed
18
from torch.nn.init import constant_
19
20
from torch.utils.data import DataLoader

21
22
23
from mmcv.runner import (CheckpointHook, EMAHook, IterTimerHook,
                         MlflowLoggerHook, PaviLoggerHook, WandbLoggerHook,
                         build_runner)
Wang Xinjiang's avatar
Wang Xinjiang committed
24
from mmcv.runner.hooks.lr_updater import CosineRestartLrUpdaterHook
Jiangmiao Pang's avatar
Jiangmiao Pang committed
25
26


27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
def test_checkpoint_hook():
    """xdoctest -m tests/test_runner/test_hooks.py test_checkpoint_hook."""

    # test epoch based runner
    loader = DataLoader(torch.ones((5, 2)))
    runner = _build_demo_runner('EpochBasedRunner', max_epochs=1)
    runner.meta = dict()
    checkpointhook = CheckpointHook(interval=1, by_epoch=True)
    runner.register_hook(checkpointhook)
    runner.run([loader], [('train', 1)])
    assert runner.meta['hook_msgs']['last_ckpt'] == osp.join(
        runner.work_dir, 'epoch_1.pth')
    shutil.rmtree(runner.work_dir)

    # test iter based runner
    runner = _build_demo_runner(
        'IterBasedRunner', max_iters=1, max_epochs=None)
    runner.meta = dict()
    checkpointhook = CheckpointHook(interval=1, by_epoch=False)
    runner.register_hook(checkpointhook)
    runner.run([loader], [('train', 1)])
    assert runner.meta['hook_msgs']['last_ckpt'] == osp.join(
        runner.work_dir, 'iter_1.pth')
    shutil.rmtree(runner.work_dir)


shilong's avatar
shilong committed
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
def test_ema_hook():
    """xdoctest -m tests/test_hooks.py test_ema_hook."""

    class DemoModel(nn.Module):

        def __init__(self):
            super().__init__()
            self.conv = nn.Conv2d(
                in_channels=1,
                out_channels=2,
                kernel_size=1,
                padding=1,
                bias=True)
            self._init_weight()

        def _init_weight(self):
            constant_(self.conv.weight, 0)
            constant_(self.conv.bias, 0)

        def forward(self, x):
            return self.conv(x).sum()

        def train_step(self, x, optimizer, **kwargs):
            return dict(loss=self(x))

        def val_step(self, x, optimizer, **kwargs):
            return dict(loss=self(x))

    loader = DataLoader(torch.ones((1, 1, 1, 1)))
    runner = _build_demo_runner()
    demo_model = DemoModel()
    runner.model = demo_model
    emahook = EMAHook(momentum=0.1, interval=2, warm_up=100, resume_from=None)
    checkpointhook = CheckpointHook(interval=1, by_epoch=True)
    runner.register_hook(emahook, priority='HIGHEST')
    runner.register_hook(checkpointhook)
89
    runner.run([loader, loader], [('train', 1), ('val', 1)])
shilong's avatar
shilong committed
90
91
92
93
94
95
96
97
98
99
100
101
102
103
    checkpoint = torch.load(f'{runner.work_dir}/epoch_1.pth')
    contain_ema_buffer = False
    for name, value in checkpoint['state_dict'].items():
        if 'ema' in name:
            contain_ema_buffer = True
            assert value.sum() == 0
            value.fill_(1)
        else:
            assert value.sum() == 0
    assert contain_ema_buffer
    torch.save(checkpoint, f'{runner.work_dir}/epoch_1.pth')
    work_dir = runner.work_dir
    resume_ema_hook = EMAHook(
        momentum=0.5, warm_up=0, resume_from=f'{work_dir}/epoch_1.pth')
104
    runner = _build_demo_runner(max_epochs=2)
shilong's avatar
shilong committed
105
106
107
108
    runner.model = demo_model
    runner.register_hook(resume_ema_hook, priority='HIGHEST')
    checkpointhook = CheckpointHook(interval=1, by_epoch=True)
    runner.register_hook(checkpointhook)
109
    runner.run([loader, loader], [('train', 1), ('val', 1)])
shilong's avatar
shilong committed
110
111
112
113
114
115
116
117
118
119
120
121
122
    checkpoint = torch.load(f'{runner.work_dir}/epoch_2.pth')
    contain_ema_buffer = False
    for name, value in checkpoint['state_dict'].items():
        if 'ema' in name:
            contain_ema_buffer = True
            assert value.sum() == 2
        else:
            assert value.sum() == 1
    assert contain_ema_buffer
    shutil.rmtree(runner.work_dir)
    shutil.rmtree(work_dir)


Jiangmiao Pang's avatar
Jiangmiao Pang committed
123
124
125
def test_pavi_hook():
    sys.modules['pavi'] = MagicMock()

Wenwei Zhang's avatar
Wenwei Zhang committed
126
127
    loader = DataLoader(torch.ones((5, 2)))
    runner = _build_demo_runner()
128
    runner.meta = dict(config_dict=dict(lr=0.02, gpu_ids=range(1)))
129
    hook = PaviLoggerHook(add_graph=False, add_last_ckpt=True)
Jiangmiao Pang's avatar
Jiangmiao Pang committed
130
    runner.register_hook(hook)
131
    runner.run([loader, loader], [('train', 1), ('val', 1)])
132
    shutil.rmtree(runner.work_dir)
Jiangmiao Pang's avatar
Jiangmiao Pang committed
133
134

    assert hasattr(hook, 'writer')
Wenwei Zhang's avatar
Wenwei Zhang committed
135
136
137
    hook.writer.add_scalars.assert_called_with('val', {
        'learning_rate': 0.02,
        'momentum': 0.95
138
    }, 1)
Jiangmiao Pang's avatar
Jiangmiao Pang committed
139
    hook.writer.add_snapshot_file.assert_called_with(
140
        tag=runner.work_dir.split('/')[-1],
141
142
        snapshot_file_path=osp.join(runner.work_dir, 'epoch_1.pth'),
        iteration=1)
143
144


Wang Xinjiang's avatar
Wang Xinjiang committed
145
146
147
148
def test_sync_buffers_hook():
    loader = DataLoader(torch.ones((5, 2)))
    runner = _build_demo_runner()
    runner.register_hook_from_cfg(dict(type='SyncBuffersHook'))
149
    runner.run([loader, loader], [('train', 1), ('val', 1)])
Wang Xinjiang's avatar
Wang Xinjiang committed
150
151
152
    shutil.rmtree(runner.work_dir)


Wenwei Zhang's avatar
Wenwei Zhang committed
153
def test_momentum_runner_hook():
Kai Chen's avatar
Kai Chen committed
154
    """xdoctest -m tests/test_hooks.py test_momentum_runner_hook."""
Wenwei Zhang's avatar
Wenwei Zhang committed
155
156
157
158
159
    sys.modules['pavi'] = MagicMock()
    loader = DataLoader(torch.ones((10, 2)))
    runner = _build_demo_runner()

    # add momentum scheduler
Wang Xinjiang's avatar
Wang Xinjiang committed
160
161
    hook_cfg = dict(
        type='CyclicMomentumUpdaterHook',
Wenwei Zhang's avatar
Wenwei Zhang committed
162
163
164
165
        by_epoch=False,
        target_ratio=(0.85 / 0.95, 1),
        cyclic_times=1,
        step_ratio_up=0.4)
Wang Xinjiang's avatar
Wang Xinjiang committed
166
    runner.register_hook_from_cfg(hook_cfg)
Wenwei Zhang's avatar
Wenwei Zhang committed
167
168

    # add momentum LR scheduler
Wang Xinjiang's avatar
Wang Xinjiang committed
169
170
    hook_cfg = dict(
        type='CyclicLrUpdaterHook',
Wenwei Zhang's avatar
Wenwei Zhang committed
171
172
173
174
        by_epoch=False,
        target_ratio=(10, 1),
        cyclic_times=1,
        step_ratio_up=0.4)
Wang Xinjiang's avatar
Wang Xinjiang committed
175
176
    runner.register_hook_from_cfg(hook_cfg)
    runner.register_hook_from_cfg(dict(type='IterTimerHook'))
Wenwei Zhang's avatar
Wenwei Zhang committed
177
178

    # add pavi hook
179
    hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
Wenwei Zhang's avatar
Wenwei Zhang committed
180
    runner.register_hook(hook)
181
    runner.run([loader], [('train', 1)])
182
    shutil.rmtree(runner.work_dir)
Wenwei Zhang's avatar
Wenwei Zhang committed
183
184
185
186
187
188
189

    # TODO: use a more elegant way to check values
    assert hasattr(hook, 'writer')
    calls = [
        call('train', {
            'learning_rate': 0.01999999999999999,
            'momentum': 0.95
190
        }, 1),
Wenwei Zhang's avatar
Wenwei Zhang committed
191
192
193
        call('train', {
            'learning_rate': 0.2,
            'momentum': 0.85
194
        }, 5),
Wenwei Zhang's avatar
Wenwei Zhang committed
195
196
197
        call('train', {
            'learning_rate': 0.155,
            'momentum': 0.875
198
        }, 7),
Wenwei Zhang's avatar
Wenwei Zhang committed
199
200
201
202
203
    ]
    hook.writer.add_scalars.assert_has_calls(calls, any_order=True)


def test_cosine_runner_hook():
Kai Chen's avatar
Kai Chen committed
204
    """xdoctest -m tests/test_hooks.py test_cosine_runner_hook."""
Wenwei Zhang's avatar
Wenwei Zhang committed
205
206
207
208
209
    sys.modules['pavi'] = MagicMock()
    loader = DataLoader(torch.ones((10, 2)))
    runner = _build_demo_runner()

    # add momentum scheduler
Wang Xinjiang's avatar
Wang Xinjiang committed
210
211
212

    hook_cfg = dict(
        type='CosineAnnealingMomentumUpdaterHook',
213
214
215
216
        min_momentum_ratio=0.99 / 0.95,
        by_epoch=False,
        warmup_iters=2,
        warmup_ratio=0.9 / 0.95)
Wang Xinjiang's avatar
Wang Xinjiang committed
217
    runner.register_hook_from_cfg(hook_cfg)
Wenwei Zhang's avatar
Wenwei Zhang committed
218
219

    # add momentum LR scheduler
Wang Xinjiang's avatar
Wang Xinjiang committed
220
221
222
223
224
225
226
227
    hook_cfg = dict(
        type='CosineAnnealingLrUpdaterHook',
        by_epoch=False,
        min_lr_ratio=0,
        warmup_iters=2,
        warmup_ratio=0.9)
    runner.register_hook_from_cfg(hook_cfg)
    runner.register_hook_from_cfg(dict(type='IterTimerHook'))
228
    runner.register_hook(IterTimerHook())
Wenwei Zhang's avatar
Wenwei Zhang committed
229
    # add pavi hook
230
    hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
Wenwei Zhang's avatar
Wenwei Zhang committed
231
    runner.register_hook(hook)
232
    runner.run([loader], [('train', 1)])
233
    shutil.rmtree(runner.work_dir)
Wenwei Zhang's avatar
Wenwei Zhang committed
234
235
236
237
238
239
240

    # TODO: use a more elegant way to check values
    assert hasattr(hook, 'writer')
    calls = [
        call('train', {
            'learning_rate': 0.02,
            'momentum': 0.95
241
        }, 1),
Wenwei Zhang's avatar
Wenwei Zhang committed
242
243
244
        call('train', {
            'learning_rate': 0.01,
            'momentum': 0.97
245
        }, 6),
Wenwei Zhang's avatar
Wenwei Zhang committed
246
247
248
        call('train', {
            'learning_rate': 0.0004894348370484647,
            'momentum': 0.9890211303259032
249
        }, 10)
Wenwei Zhang's avatar
Wenwei Zhang committed
250
251
252
253
    ]
    hook.writer.add_scalars.assert_has_calls(calls, any_order=True)


Harry's avatar
Harry committed
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
def test_cosine_restart_lr_update_hook():
    """Test CosineRestartLrUpdaterHook."""
    with pytest.raises(AssertionError):
        # either `min_lr` or `min_lr_ratio` should be specified
        CosineRestartLrUpdaterHook(
            by_epoch=False,
            periods=[2, 10],
            restart_weights=[0.5, 0.5],
            min_lr=0.1,
            min_lr_ratio=0)

    with pytest.raises(AssertionError):
        # periods and restart_weights should have the same length
        CosineRestartLrUpdaterHook(
            by_epoch=False,
            periods=[2, 10],
            restart_weights=[0.5],
            min_lr_ratio=0)

    with pytest.raises(ValueError):
        # the last cumulative_periods 7 (out of [5, 7]) should >= 10
        sys.modules['pavi'] = MagicMock()
        loader = DataLoader(torch.ones((10, 2)))
        runner = _build_demo_runner()

        # add cosine restart LR scheduler
        hook = CosineRestartLrUpdaterHook(
            by_epoch=False,
            periods=[5, 2],  # cumulative_periods [5, 7 (5 + 2)]
            restart_weights=[0.5, 0.5],
            min_lr=0.0001)
        runner.register_hook(hook)
        runner.register_hook(IterTimerHook())

        # add pavi hook
        hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
        runner.register_hook(hook)
291
        runner.run([loader], [('train', 1)])
Harry's avatar
Harry committed
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
        shutil.rmtree(runner.work_dir)

    sys.modules['pavi'] = MagicMock()
    loader = DataLoader(torch.ones((10, 2)))
    runner = _build_demo_runner()

    # add cosine restart LR scheduler
    hook = CosineRestartLrUpdaterHook(
        by_epoch=False,
        periods=[5, 5],
        restart_weights=[0.5, 0.5],
        min_lr_ratio=0)
    runner.register_hook(hook)
    runner.register_hook(IterTimerHook())

    # add pavi hook
    hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
    runner.register_hook(hook)
310
    runner.run([loader], [('train', 1)])
Harry's avatar
Harry committed
311
312
313
314
315
316
317
318
    shutil.rmtree(runner.work_dir)

    # TODO: use a more elegant way to check values
    assert hasattr(hook, 'writer')
    calls = [
        call('train', {
            'learning_rate': 0.01,
            'momentum': 0.95
319
        }, 1),
Harry's avatar
Harry committed
320
        call('train', {
Kuro Latency's avatar
Kuro Latency committed
321
            'learning_rate': 0.01,
Harry's avatar
Harry committed
322
            'momentum': 0.95
323
        }, 6),
Harry's avatar
Harry committed
324
325
326
        call('train', {
            'learning_rate': 0.0009549150281252633,
            'momentum': 0.95
327
        }, 10)
Harry's avatar
Harry committed
328
329
330
331
    ]
    hook.writer.add_scalars.assert_has_calls(calls, any_order=True)


332
333
334
335
336
@pytest.mark.parametrize('log_model', (True, False))
def test_mlflow_hook(log_model):
    sys.modules['mlflow'] = MagicMock()
    sys.modules['mlflow.pytorch'] = MagicMock()

Wenwei Zhang's avatar
Wenwei Zhang committed
337
338
    runner = _build_demo_runner()
    loader = DataLoader(torch.ones((5, 2)))
339

340
    hook = MlflowLoggerHook(exp_name='test', log_model=log_model)
341
    runner.register_hook(hook)
342
    runner.run([loader, loader], [('train', 1), ('val', 1)])
343
    shutil.rmtree(runner.work_dir)
344
345

    hook.mlflow.set_experiment.assert_called_with('test')
Wenwei Zhang's avatar
Wenwei Zhang committed
346
347
348
349
    hook.mlflow.log_metrics.assert_called_with(
        {
            'learning_rate': 0.02,
            'momentum': 0.95
350
        }, step=6)
351
352
353
354
355
356
357
358
359
    if log_model:
        hook.mlflow_pytorch.log_model.assert_called_with(
            runner.model, 'models')
    else:
        assert not hook.mlflow_pytorch.log_model.called


def test_wandb_hook():
    sys.modules['wandb'] = MagicMock()
Wenwei Zhang's avatar
Wenwei Zhang committed
360
    runner = _build_demo_runner()
361
    hook = WandbLoggerHook()
Wenwei Zhang's avatar
Wenwei Zhang committed
362
    loader = DataLoader(torch.ones((5, 2)))
363
364

    runner.register_hook(hook)
365
    runner.run([loader, loader], [('train', 1), ('val', 1)])
366
367
    shutil.rmtree(runner.work_dir)

368
    hook.wandb.init.assert_called_with()
Wenwei Zhang's avatar
Wenwei Zhang committed
369
370
371
372
    hook.wandb.log.assert_called_with({
        'learning_rate': 0.02,
        'momentum': 0.95
    },
373
374
                                      step=6,
                                      commit=True)
375
    hook.wandb.join.assert_called_with()
Wenwei Zhang's avatar
Wenwei Zhang committed
376
377


378
379
380
def _build_demo_runner(runner_type='EpochBasedRunner',
                       max_epochs=1,
                       max_iters=None):
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398

    class Model(nn.Module):

        def __init__(self):
            super().__init__()
            self.linear = nn.Linear(2, 1)

        def forward(self, x):
            return self.linear(x)

        def train_step(self, x, optimizer, **kwargs):
            return dict(loss=self(x))

        def val_step(self, x, optimizer, **kwargs):
            return dict(loss=self(x))

    model = Model()

Wenwei Zhang's avatar
Wenwei Zhang committed
399
400
401
402
403
404
405
    optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95)

    log_config = dict(
        interval=1, hooks=[
            dict(type='TextLoggerHook'),
        ])

406
    tmp_dir = tempfile.mkdtemp()
407
408
409
410
411
412
413
414
415
    runner = build_runner(
        dict(type=runner_type),
        default_args=dict(
            model=model,
            work_dir=tmp_dir,
            optimizer=optimizer,
            logger=logging.getLogger(),
            max_epochs=max_epochs,
            max_iters=max_iters))
416
    runner.register_checkpoint_hook(dict(interval=1))
Wenwei Zhang's avatar
Wenwei Zhang committed
417
418
    runner.register_logger_hooks(log_config)
    return runner
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457


def test_runner_with_revise_keys():

    import os

    class Model(nn.Module):

        def __init__(self):
            super().__init__()
            self.conv = nn.Conv2d(3, 3, 1)

    class PrefixModel(nn.Module):

        def __init__(self):
            super().__init__()
            self.backbone = Model()

    pmodel = PrefixModel()
    model = Model()
    checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth')

    # add prefix
    torch.save(model.state_dict(), checkpoint_path)
    runner = _build_demo_runner(runner_type='EpochBasedRunner')
    runner.model = pmodel
    state_dict = runner.load_checkpoint(
        checkpoint_path, revise_keys=[(r'^', 'backbone.')])
    for key in pmodel.backbone.state_dict().keys():
        assert torch.equal(pmodel.backbone.state_dict()[key], state_dict[key])
    # strip prefix
    torch.save(pmodel.state_dict(), checkpoint_path)
    runner.model = model
    state_dict = runner.load_checkpoint(
        checkpoint_path, revise_keys=[(r'^backbone\.', '')])
    for key in state_dict.keys():
        key_stripped = re.sub(r'^backbone\.', '', key)
        assert torch.equal(model.state_dict()[key_stripped], state_dict[key])
    os.remove(checkpoint_path)