"test/check_v2_dataset_warnings.py" did not exist on "56b0497646653bf1f027ce9c2ba668331252f67c"
train.py 4.85 KB
Newer Older
pangjm's avatar
pangjm committed
1
from __future__ import division
Kai Chen's avatar
Kai Chen committed
2

pangjm's avatar
pangjm committed
3
import argparse
4
import logging
Kai Chen's avatar
Kai Chen committed
5
from collections import OrderedDict
pangjm's avatar
pangjm committed
6

Kai Chen's avatar
Kai Chen committed
7
import numpy as np
pangjm's avatar
pangjm committed
8
9
import torch
from mmcv import Config
Kai Chen's avatar
Kai Chen committed
10
11
12
from mmcv.torchpack import Runner, obj_from_dict

from mmdet import datasets
13
from mmdet.core import (init_dist, DistOptimizerHook, DistSamplerSeedHook,
Kai Chen's avatar
Kai Chen committed
14
                        MMDataParallel, MMDistributedDataParallel,
15
                        CocoDistEvalRecallHook, CocoDistEvalmAPHook)
Kai Chen's avatar
Kai Chen committed
16
from mmdet.datasets.loader import build_dataloader
Kai Chen's avatar
Kai Chen committed
17
from mmdet.models import build_detector, RPN
Kai Chen's avatar
Kai Chen committed
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39


def parse_losses(losses):
    log_vars = OrderedDict()
    for loss_name, loss_value in losses.items():
        if isinstance(loss_value, torch.Tensor):
            log_vars[loss_name] = loss_value.mean()
        elif isinstance(loss_value, list):
            log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
        else:
            raise TypeError(
                '{} is not a tensor or list of tensors'.format(loss_name))

    loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key)

    log_vars['loss'] = loss
    for name in log_vars:
        log_vars[name] = log_vars[name].item()

    return loss, log_vars


Kai Chen's avatar
Kai Chen committed
40
def batch_processor(model, data, train_mode):
Kai Chen's avatar
Kai Chen committed
41
42
43
44
    losses = model(**data)
    loss, log_vars = parse_losses(losses)

    outputs = dict(
45
        loss=loss, log_vars=log_vars, num_samples=len(data['img'].data))
Kai Chen's avatar
Kai Chen committed
46
47

    return outputs
pangjm's avatar
pangjm committed
48
49


50
51
52
53
54
55
56
def get_logger(log_level):
    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(message)s', level=log_level)
    logger = logging.getLogger()
    return logger


Kai Chen's avatar
Kai Chen committed
57
58
59
60
61
62
def set_random_seed(seed):
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)


pangjm's avatar
pangjm committed
63
def parse_args():
Kai Chen's avatar
Kai Chen committed
64
    parser = argparse.ArgumentParser(description='Train a detector')
pangjm's avatar
pangjm committed
65
    parser.add_argument('config', help='train config file path')
66
    parser.add_argument('--work_dir', help='the dir to save logs and models')
pangjm's avatar
pangjm committed
67
    parser.add_argument(
Kai Chen's avatar
Kai Chen committed
68
69
70
71
        '--validate',
        action='store_true',
        help='whether to add a validate phase')
    parser.add_argument(
72
        '--gpus', type=int, default=1, help='number of gpus to use')
Kai Chen's avatar
Kai Chen committed
73
    parser.add_argument('--seed', type=int, help='random seed')
74
75
76
77
78
79
    parser.add_argument(
        '--launcher',
        choices=['none', 'pytorch', 'slurm', 'mpi'],
        default='none',
        help='job launcher')
    parser.add_argument('--local_rank', type=int, default=0)
pangjm's avatar
pangjm committed
80
81
82
83
84
85
    args = parser.parse_args()

    return args


def main():
86
87
    args = parse_args()

Kai Chen's avatar
Kai Chen committed
88
    cfg = Config.fromfile(args.config)
89
90
91
92
93
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    cfg.gpus = args.gpus

    logger = get_logger(cfg.log_level)
Kai Chen's avatar
Kai Chen committed
94

Kai Chen's avatar
Kai Chen committed
95
96
97
98
99
    # set random seed if specified
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

Kai Chen's avatar
Kai Chen committed
100
    # init distributed environment if necessary
101
102
    if args.launcher == 'none':
        dist = False
103
        logger.info('Disabled distributed training.')
104
105
    else:
        dist = True
Kai Chen's avatar
Kai Chen committed
106
        init_dist(args.launcher, **cfg.dist_params)
107
108
109
        if torch.distributed.get_rank() != 0:
            logger.setLevel('ERROR')
        logger.info('Enabled distributed training.')
pangjm's avatar
pangjm committed
110

Kai Chen's avatar
Kai Chen committed
111
112
113
    # prepare data loaders
    train_dataset = obj_from_dict(cfg.data.train, datasets)
    data_loaders = [
114
115
        build_dataloader(train_dataset, cfg.data.imgs_per_gpu,
                         cfg.data.workers_per_gpu, cfg.gpus, dist)
Kai Chen's avatar
Kai Chen committed
116
    ]
pangjm's avatar
pangjm committed
117
    if args.validate:
Kai Chen's avatar
Kai Chen committed
118
119
        val_dataset = obj_from_dict(cfg.data.val, datasets)
        data_loaders.append(
120
121
            build_dataloader(val_dataset, cfg.data.imgs_per_gpu,
                             cfg.data.workers_per_gpu, cfg.gpus, dist))
pangjm's avatar
pangjm committed
122
123

    # build model
Kai Chen's avatar
Kai Chen committed
124
125
    model = build_detector(
        cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
126
    if dist:
127
        model = MMDistributedDataParallel(model.cuda())
pangjm's avatar
pangjm committed
128
    else:
129
        model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
pangjm's avatar
pangjm committed
130

Kai Chen's avatar
Kai Chen committed
131
    # build runner
pangjm's avatar
pangjm committed
132
133
    runner = Runner(model, batch_processor, cfg.optimizer, cfg.work_dir,
                    cfg.log_level)
Kai Chen's avatar
Kai Chen committed
134
135
    # register hooks
    optimizer_config = DistOptimizerHook(
136
        **cfg.optimizer_config) if dist else cfg.optimizer_config
Kai Chen's avatar
Kai Chen committed
137
    runner.register_training_hooks(cfg.lr_config, optimizer_config,
pangjm's avatar
pangjm committed
138
                                   cfg.checkpoint_config, cfg.log_config)
139
    if dist:
pangjm's avatar
pangjm committed
140
        runner.register_hook(DistSamplerSeedHook())
Kai Chen's avatar
Kai Chen committed
141
142
        # register eval hooks
        if isinstance(model.module, RPN):
143
            runner.register_hook(CocoDistEvalRecallHook(cfg.data.val))
Kai Chen's avatar
Kai Chen committed
144
145
        elif cfg.data.val.type == 'CocoDataset':
            runner.register_hook(CocoDistEvalmAPHook(cfg.data.val))
Kai Chen's avatar
Kai Chen committed
146

pangjm's avatar
pangjm committed
147
148
149
150
    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
Kai Chen's avatar
Kai Chen committed
151
    runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
pangjm's avatar
pangjm committed
152
153


Kai Chen's avatar
Kai Chen committed
154
if __name__ == '__main__':
pangjm's avatar
pangjm committed
155
    main()