train.py 11.2 KB
Newer Older
flauted's avatar
flauted committed
1
2
3
4
5
6
7
r"""PyTorch Detection Training.

To run in a multi-gpu environment, use the distributed launcher::

    python -m torch.distributed.launch --nproc_per_node=$NGPU --use_env \
        train.py ... --world-size $NGPU

8
9
10
The default hyperparameters are tuned for training on 8 gpus and 2 images per gpu.
    --lr 0.02 --batch-size 2 --world-size 8
If you use different number of gpus, the learning rate should be changed to 0.02/8*$NGPU.
11
12
13
14
15
16
17
18

On top of that, for training Faster/Mask R-CNN, the default hyperparameters are
    --epochs 26 --lr-steps 16 22 --aspect-ratio-group-factor 3

Also, if you train Keypoint R-CNN, the default hyperparameters are
    --epochs 46 --lr-steps 36 43 --aspect-ratio-group-factor 3
Because the number of images is smaller in the person keypoint subset of COCO,
the number of epochs should be adapted so that we have the same number of iterations.
flauted's avatar
flauted committed
19
"""
20
21
22
23
import datetime
import os
import time

24
import presets
25
26
27
28
29
import torch
import torch.utils.data
import torchvision
import torchvision.models.detection
import torchvision.models.detection.mask_rcnn
30
import utils
31
32
from coco_utils import get_coco, get_coco_kp
from engine import train_one_epoch, evaluate
33
from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
34
35


flauted's avatar
flauted committed
36
def get_dataset(name, image_set, transform, data_path):
37
    paths = {"coco": (data_path, get_coco, 91), "coco_kp": (data_path, get_coco_kp, 2)}
38
39
40
41
42
43
    p, ds_fn, num_classes = paths[name]

    ds = ds_fn(p, image_set=image_set, transforms=transform)
    return ds, num_classes


44
45
def get_transform(train, args):
    if train:
46
47
48
49
50
        return presets.DetectionPresetTrain(data_augmentation=args.data_augmentation)
    elif args.weights and args.test_only:
        weights = torchvision.models.get_weight(args.weights)
        trans = weights.transforms()
        return lambda img, target: (trans(img), target)
51
    else:
52
        return presets.DetectionPresetEval()
53
54


55
56
def get_args_parser(add_help=True):
    import argparse
57
58
59

    parser = argparse.ArgumentParser(description="PyTorch Detection Training", add_help=add_help)

60
61
62
63
    parser.add_argument("--data-path", default="/datasets01/COCO/022719/", type=str, help="dataset path")
    parser.add_argument("--dataset", default="coco", type=str, help="dataset name")
    parser.add_argument("--model", default="maskrcnn_resnet50_fpn", type=str, help="model name")
    parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
64
65
66
67
68
69
70
    parser.add_argument(
        "-b", "--batch-size", default=2, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
    )
    parser.add_argument("--epochs", default=26, type=int, metavar="N", help="number of total epochs to run")
    parser.add_argument(
        "-j", "--workers", default=4, type=int, metavar="N", help="number of data loading workers (default: 4)"
    )
71
    parser.add_argument("--opt", default="sgd", type=str, help="optimizer")
72
73
74
75
    parser.add_argument(
        "--lr",
        default=0.02,
        type=float,
76
        help="initial learning rate, 0.02 is the default value for training on 8 gpus and 2 images_per_gpu",
77
78
79
80
81
82
83
84
85
86
87
    )
    parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
    parser.add_argument(
        "--wd",
        "--weight-decay",
        default=1e-4,
        type=float,
        metavar="W",
        help="weight decay (default: 1e-4)",
        dest="weight_decay",
    )
88
89
90
91
92
93
    parser.add_argument(
        "--norm-weight-decay",
        default=None,
        type=float,
        help="weight decay for Normalization layers (default: None, same value as --wd)",
    )
94
95
96
    parser.add_argument(
        "--lr-scheduler", default="multisteplr", type=str, help="name of lr scheduler (default: multisteplr)"
    )
97
98
99
100
101
102
103
104
105
106
107
108
109
110
    parser.add_argument(
        "--lr-step-size", default=8, type=int, help="decrease lr every step-size epochs (multisteplr scheduler only)"
    )
    parser.add_argument(
        "--lr-steps",
        default=[16, 22],
        nargs="+",
        type=int,
        help="decrease lr every step-size epochs (multisteplr scheduler only)",
    )
    parser.add_argument(
        "--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma (multisteplr scheduler only)"
    )
    parser.add_argument("--print-freq", default=20, type=int, help="print frequency")
111
112
    parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
    parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
113
114
115
116
117
118
    parser.add_argument("--start_epoch", default=0, type=int, help="start epoch")
    parser.add_argument("--aspect-ratio-group-factor", default=3, type=int)
    parser.add_argument("--rpn-score-thresh", default=None, type=float, help="rpn score threshold for faster-rcnn")
    parser.add_argument(
        "--trainable-backbone-layers", default=None, type=int, help="number of trainable layers of backbone"
    )
119
120
121
    parser.add_argument(
        "--data-augmentation", default="hflip", type=str, help="data augmentation policy (default: hflip)"
    )
122
123
124
125
126
127
    parser.add_argument(
        "--sync-bn",
        dest="sync_bn",
        help="Use sync batch norm",
        action="store_true",
    )
128
129
130
131
132
133
134
135
    parser.add_argument(
        "--test-only",
        dest="test_only",
        help="Only test the model",
        action="store_true",
    )

    # distributed training parameters
136
    parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
137
    parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
138
    parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load")
139
    parser.add_argument("--weights-backbone", default=None, type=str, help="the backbone weights enum name to load")
140

141
142
143
    # Mixed precision training parameters
    parser.add_argument("--amp", action="store_true", help="Use torch.cuda.amp for mixed precision training")

144
145
146
    return parser


147
def main(args):
148
149
150
    if args.output_dir:
        utils.mkdir(args.output_dir)

151
152
153
154
155
156
157
158
    utils.init_distributed_mode(args)
    print(args)

    device = torch.device(args.device)

    # Data loading code
    print("Loading data")

159
160
    dataset, num_classes = get_dataset(args.dataset, "train", get_transform(True, args), args.data_path)
    dataset_test, _ = get_dataset(args.dataset, "val", get_transform(False, args), args.data_path)
161
162
163
164
165
166
167
168
169
170
171
172
173

    print("Creating data loaders")
    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
        test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test)
    else:
        train_sampler = torch.utils.data.RandomSampler(dataset)
        test_sampler = torch.utils.data.SequentialSampler(dataset_test)

    if args.aspect_ratio_group_factor >= 0:
        group_ids = create_aspect_ratio_groups(dataset, k=args.aspect_ratio_group_factor)
        train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, args.batch_size)
    else:
174
        train_batch_sampler = torch.utils.data.BatchSampler(train_sampler, args.batch_size, drop_last=True)
175
176

    data_loader = torch.utils.data.DataLoader(
177
178
        dataset, batch_sampler=train_batch_sampler, num_workers=args.workers, collate_fn=utils.collate_fn
    )
179
180

    data_loader_test = torch.utils.data.DataLoader(
181
182
        dataset_test, batch_size=1, sampler=test_sampler, num_workers=args.workers, collate_fn=utils.collate_fn
    )
183
184

    print("Creating model")
185
    kwargs = {"trainable_backbone_layers": args.trainable_backbone_layers}
186
187
    if args.data_augmentation in ["multiscale", "lsj"]:
        kwargs["_skip_resize"] = True
188
    if "rcnn" in args.model:
189
190
        if args.rpn_score_thresh is not None:
            kwargs["rpn_score_thresh"] = args.rpn_score_thresh
191
192
193
    model = torchvision.models.detection.__dict__[args.model](
        weights=args.weights, weights_backbone=args.weights_backbone, num_classes=num_classes, **kwargs
    )
194
    model.to(device)
195
196
    if args.distributed and args.sync_bn:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
197
198
199
200
201
202

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        model_without_ddp = model.module

203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
    if args.norm_weight_decay is None:
        parameters = [p for p in model.parameters() if p.requires_grad]
    else:
        param_groups = torchvision.ops._utils.split_normalization_params(model)
        wd_groups = [args.norm_weight_decay, args.weight_decay]
        parameters = [{"params": p, "weight_decay": w} for p, w in zip(param_groups, wd_groups) if p]

    opt_name = args.opt.lower()
    if opt_name.startswith("sgd"):
        optimizer = torch.optim.SGD(
            parameters,
            lr=args.lr,
            momentum=args.momentum,
            weight_decay=args.weight_decay,
            nesterov="nesterov" in opt_name,
        )
    elif opt_name == "adamw":
        optimizer = torch.optim.AdamW(parameters, lr=args.lr, weight_decay=args.weight_decay)
    else:
        raise RuntimeError(f"Invalid optimizer {args.opt}. Only SGD and AdamW are supported.")
223

224
225
    scaler = torch.cuda.amp.GradScaler() if args.amp else None

226
    args.lr_scheduler = args.lr_scheduler.lower()
227
    if args.lr_scheduler == "multisteplr":
228
        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)
229
    elif args.lr_scheduler == "cosineannealinglr":
230
231
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)
    else:
232
        raise RuntimeError(
233
            f"Invalid lr scheduler '{args.lr_scheduler}'. Only MultiStepLR and CosineAnnealingLR are supported."
234
        )
Francisco Massa's avatar
Francisco Massa committed
235

236
    if args.resume:
237
238
239
240
241
        checkpoint = torch.load(args.resume, map_location="cpu")
        model_without_ddp.load_state_dict(checkpoint["model"])
        optimizer.load_state_dict(checkpoint["optimizer"])
        lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
        args.start_epoch = checkpoint["epoch"] + 1
242
243
        if args.amp:
            scaler.load_state_dict(checkpoint["scaler"])
Francisco Massa's avatar
Francisco Massa committed
244

245
246
247
248
249
250
    if args.test_only:
        evaluate(model, data_loader_test, device=device)
        return

    print("Start training")
    start_time = time.time()
MultiK's avatar
MultiK committed
251
    for epoch in range(args.start_epoch, args.epochs):
252
253
        if args.distributed:
            train_sampler.set_epoch(epoch)
254
        train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq, scaler)
255
256
        lr_scheduler.step()
        if args.output_dir:
257
            checkpoint = {
258
259
260
261
262
                "model": model_without_ddp.state_dict(),
                "optimizer": optimizer.state_dict(),
                "lr_scheduler": lr_scheduler.state_dict(),
                "args": args,
                "epoch": epoch,
263
            }
264
265
            if args.amp:
                checkpoint["scaler"] = scaler.state_dict()
266
            utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth"))
267
            utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth"))
268
269
270
271
272
273

        # evaluate after every epoch
        evaluate(model, data_loader_test, device=device)

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
274
    print(f"Training time {total_time_str}")
275
276
277


if __name__ == "__main__":
278
    args = get_args_parser().parse_args()
279
    main(args)