main.py 17.2 KB
Newer Older
1
2
3
4
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import datetime
import json
Yanghan Wang's avatar
Yanghan Wang committed
5
6
import logging
import os
7
8
9
10
11
import random
import time
from datetime import timedelta
from pathlib import Path

Yanghan Wang's avatar
Yanghan Wang committed
12
import detr.util.misc as utils
13
14
15
16
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
Yanghan Wang's avatar
Yanghan Wang committed
17
18
from detectron2.engine.launch import _find_free_port
from detectron2.utils.file_io import PathManager
19
20
21
22
from detr import datasets
from detr.datasets import build_dataset, get_coco_api_from_dataset
from detr.engine import evaluate, train_one_epoch
from detr.models import build_model
Yanghan Wang's avatar
Yanghan Wang committed
23
from torch.utils.data import DataLoader, DistributedSampler
24
25
26

DEFAULT_TIMEOUT = timedelta(minutes=30)

Yanghan Wang's avatar
Yanghan Wang committed
27

28
def get_args_parser():
Yanghan Wang's avatar
Yanghan Wang committed
29
30
31
32
33
34
35
36
37
38
    parser = argparse.ArgumentParser("Set transformer detector", add_help=False)
    parser.add_argument("--lr", default=1e-4, type=float)
    parser.add_argument("--lr_backbone", default=1e-5, type=float)
    parser.add_argument("--batch_size", default=2, type=int)
    parser.add_argument("--weight_decay", default=1e-4, type=float)
    parser.add_argument("--epochs", default=300, type=int)
    parser.add_argument("--lr_drop", default=200, type=int)
    parser.add_argument(
        "--clip_max_norm", default=0.1, type=float, help="gradient clipping max norm"
    )
39
40

    # Model parameters
Yanghan Wang's avatar
Yanghan Wang committed
41
42
43
44
45
46
    parser.add_argument(
        "--frozen_weights",
        type=str,
        default=None,
        help="Path to the pretrained model. If set, only the mask head will be trained",
    )
47
    # * Backbone
Yanghan Wang's avatar
Yanghan Wang committed
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
    parser.add_argument(
        "--backbone",
        default="resnet50",
        type=str,
        help="Name of the convolutional backbone to use",
    )
    parser.add_argument(
        "--dilation",
        action="store_true",
        help="If true, we replace stride with dilation in the last convolutional block (DC5)",
    )
    parser.add_argument(
        "--position_embedding",
        default="sine",
        type=str,
        choices=("sine", "learned"),
        help="Type of positional embedding to use on top of the image features",
    )
66
67

    # * Transformer
Yanghan Wang's avatar
Yanghan Wang committed
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
    parser.add_argument(
        "--enc_layers",
        default=6,
        type=int,
        help="Number of encoding layers in the transformer",
    )
    parser.add_argument(
        "--dec_layers",
        default=6,
        type=int,
        help="Number of decoding layers in the transformer",
    )
    parser.add_argument(
        "--dim_feedforward",
        default=2048,
        type=int,
        help="Intermediate size of the feedforward layers in the transformer blocks",
    )
    parser.add_argument(
        "--hidden_dim",
        default=256,
        type=int,
        help="Size of the embeddings (dimension of the transformer)",
    )
    parser.add_argument(
        "--dropout", default=0.1, type=float, help="Dropout applied in the transformer"
    )
    parser.add_argument(
        "--nheads",
        default=8,
        type=int,
        help="Number of attention heads inside the transformer's attentions",
    )
    parser.add_argument(
        "--num_queries", default=100, type=int, help="Number of query slots"
    )
    parser.add_argument("--pre_norm", action="store_true")
105
106

    # * Segmentation
Yanghan Wang's avatar
Yanghan Wang committed
107
108
109
110
111
    parser.add_argument(
        "--masks",
        action="store_true",
        help="Train segmentation head if the flag is provided",
    )
112
113

    # Loss
Yanghan Wang's avatar
Yanghan Wang committed
114
115
116
117
118
119
    parser.add_argument(
        "--no_aux_loss",
        dest="aux_loss",
        action="store_false",
        help="Disables auxiliary decoding losses (loss at each layer)",
    )
120
    # * Matcher
Yanghan Wang's avatar
Yanghan Wang committed
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
    parser.add_argument(
        "--set_cost_class",
        default=1,
        type=float,
        help="Class coefficient in the matching cost",
    )
    parser.add_argument(
        "--set_cost_bbox",
        default=5,
        type=float,
        help="L1 box coefficient in the matching cost",
    )
    parser.add_argument(
        "--set_cost_giou",
        default=2,
        type=float,
        help="giou box coefficient in the matching cost",
    )
139
    # * Loss coefficients
Yanghan Wang's avatar
Yanghan Wang committed
140
141
142
143
144
145
146
147
148
149
    parser.add_argument("--mask_loss_coef", default=1, type=float)
    parser.add_argument("--dice_loss_coef", default=1, type=float)
    parser.add_argument("--bbox_loss_coef", default=5, type=float)
    parser.add_argument("--giou_loss_coef", default=2, type=float)
    parser.add_argument(
        "--eos_coef",
        default=0.1,
        type=float,
        help="Relative classification weight of the no-object class",
    )
150
151

    # dataset parameters
Yanghan Wang's avatar
Yanghan Wang committed
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
    parser.add_argument("--dataset_file", default="coco")
    parser.add_argument(
        "--ade_path",
        type=str,
        default="manifold://winvision/tree/detectron2/ADEChallengeData2016/",
    )
    parser.add_argument(
        "--coco_path", type=str, default="manifold://fair_vision_data/tree/"
    )
    parser.add_argument(
        "--coco_panoptic_path", type=str, default="manifold://fair_vision_data/tree/"
    )
    parser.add_argument("--remove_difficult", action="store_true")

    parser.add_argument(
        "--output-dir", default="", help="path where to save, empty for no saving"
    )
    parser.add_argument(
        "--device", default="cuda", help="device to use for training / testing"
    )
    parser.add_argument("--seed", default=42, type=int)
    parser.add_argument("--resume", default="", help="resume from checkpoint")
    parser.add_argument(
        "--start_epoch", default=0, type=int, metavar="N", help="start epoch"
    )
    parser.add_argument("--eval", action="store_true")
    parser.add_argument("--num_workers", default=2, type=int)
179
180
181

    # distributed training parameters
    parser.add_argument(
Yanghan Wang's avatar
Yanghan Wang committed
182
183
184
185
186
187
188
189
190
191
192
193
194
195
        "--num-gpus", type=int, default=8, help="number of gpus *per machine*"
    )
    parser.add_argument(
        "--num-machines", type=int, default=1, help="total number of machines"
    )
    parser.add_argument(
        "--machine-rank",
        type=int,
        default=0,
        help="the rank of this machine (unique per machine)",
    )
    parser.add_argument(
        "--dist-url", default="env://", help="url used to set up distributed training"
    )
196
197
198
199
    return parser


def main(args):
Yanghan Wang's avatar
Yanghan Wang committed
200
    # utils.init_distributed_mode(args)
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221

    if args.frozen_weights is not None:
        assert args.masks, "Frozen training is meant for segmentation only"
    print(args)

    device = torch.device(args.device)

    # fix the seed for reproducibility
    seed = args.seed + utils.get_rank()
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)

    model, criterion, postprocessors = build_model(args)
    model.to(device)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        model_without_ddp = model.module
    n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
Yanghan Wang's avatar
Yanghan Wang committed
222
    print("number of params:", n_parameters)
223
224
225

    param_dicts = [
        {
Yanghan Wang's avatar
Yanghan Wang committed
226
227
228
229
230
231
232
233
234
235
236
237
            "params": [
                p
                for n, p in model_without_ddp.named_parameters()
                if "backbone" not in n and p.requires_grad
            ]
        },
        {
            "params": [
                p
                for n, p in model_without_ddp.named_parameters()
                if "backbone" in n and p.requires_grad
            ],
238
239
240
            "lr": args.lr_backbone,
        },
    ]
Yanghan Wang's avatar
Yanghan Wang committed
241
242
243
    optimizer = torch.optim.AdamW(
        param_dicts, lr=args.lr, weight_decay=args.weight_decay
    )
244
245
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)

Yanghan Wang's avatar
Yanghan Wang committed
246
247
    dataset_train = build_dataset(image_set="train", args=args)
    dataset_val = build_dataset(image_set="val", args=args)
248
249
250
251
252
253
254
255
256

    if args.distributed:
        sampler_train = DistributedSampler(dataset_train)
        sampler_val = DistributedSampler(dataset_val, shuffle=False)
    else:
        sampler_train = torch.utils.data.RandomSampler(dataset_train)
        sampler_val = torch.utils.data.SequentialSampler(dataset_val)

    batch_sampler_train = torch.utils.data.BatchSampler(
Yanghan Wang's avatar
Yanghan Wang committed
257
258
        sampler_train, args.batch_size, drop_last=True
    )
259

Yanghan Wang's avatar
Yanghan Wang committed
260
261
262
263
264
265
266
267
268
269
270
271
272
273
    data_loader_train = DataLoader(
        dataset_train,
        batch_sampler=batch_sampler_train,
        collate_fn=utils.collate_fn,
        num_workers=args.num_workers,
    )
    data_loader_val = DataLoader(
        dataset_val,
        args.batch_size,
        sampler=sampler_val,
        drop_last=False,
        collate_fn=utils.collate_fn,
        num_workers=args.num_workers,
    )
274
275
276
277
278
279
280
281
282

    if args.dataset_file == "coco_panoptic":
        # We also evaluate AP during panoptic training, on original coco DS
        coco_val = datasets.coco.build("val", args)
        base_ds = get_coco_api_from_dataset(coco_val)
    else:
        base_ds = get_coco_api_from_dataset(dataset_val)

    if args.frozen_weights is not None:
Yanghan Wang's avatar
Yanghan Wang committed
283
284
        checkpoint = torch.load(args.frozen_weights, map_location="cpu")
        model_without_ddp.detr.load_state_dict(checkpoint["model"])
285
286

    if args.resume:
Yanghan Wang's avatar
Yanghan Wang committed
287
        if args.resume.startswith("https"):
288
            checkpoint = torch.hub.load_state_dict_from_url(
Yanghan Wang's avatar
Yanghan Wang committed
289
290
                args.resume, map_location="cpu", check_hash=True
            )
291
        else:
Yanghan Wang's avatar
Yanghan Wang committed
292
293
294
295
296
297
298
299
300
301
302
            checkpoint = torch.load(args.resume, map_location="cpu")
        model_without_ddp.load_state_dict(checkpoint["model"])
        if (
            not args.eval
            and "optimizer" in checkpoint
            and "lr_scheduler" in checkpoint
            and "epoch" in checkpoint
        ):
            optimizer.load_state_dict(checkpoint["optimizer"])
            lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
            args.start_epoch = checkpoint["epoch"] + 1
303
304

    if args.eval:
Yanghan Wang's avatar
Yanghan Wang committed
305
306
307
308
309
310
311
312
313
        test_stats, coco_evaluator = evaluate(
            model,
            criterion,
            postprocessors,
            data_loader_val,
            base_ds,
            device,
            args.output_dir,
        )
314
315
316
317
318
319
320
321
322
323
324
        if args.output_dir:
            with PathManager.open(os.path.join(args.output_dir, "eval.pth"), "wb") as f:
                utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, f)
        return

    print("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            sampler_train.set_epoch(epoch)
        train_stats = train_one_epoch(
Yanghan Wang's avatar
Yanghan Wang committed
325
326
327
328
329
330
331
332
            model,
            criterion,
            data_loader_train,
            optimizer,
            device,
            epoch,
            args.clip_max_norm,
        )
333
334
        lr_scheduler.step()
        if args.output_dir:
Yanghan Wang's avatar
Yanghan Wang committed
335
            checkpoint_paths = []  # os.path.join(args.output_dir, 'checkpoint.pth')]
336
337
            # extra checkpoint before LR drop and every 10 epochs
            if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 10 == 0:
Yanghan Wang's avatar
Yanghan Wang committed
338
339
340
                checkpoint_paths.append(
                    os.path.join(args.output_dir, f"checkpoint{epoch:04}.pth")
                )
341
342
343
            for checkpoint_path in checkpoint_paths:
                with PathManager.open(checkpoint_path, "wb") as f:
                    if args.gpu == 0 and args.machine_rank == 0:
Yanghan Wang's avatar
Yanghan Wang committed
344
345
346
347
348
349
350
351
352
353
                        utils.save_on_master(
                            {
                                "model": model_without_ddp.state_dict(),
                                "optimizer": optimizer.state_dict(),
                                "lr_scheduler": lr_scheduler.state_dict(),
                                "epoch": epoch,
                                "args": args,
                            },
                            f,
                        )
354
355

        test_stats, coco_evaluator = evaluate(
Yanghan Wang's avatar
Yanghan Wang committed
356
357
358
359
360
361
362
            model,
            criterion,
            postprocessors,
            data_loader_val,
            base_ds,
            device,
            args.output_dir,
363
364
        )

Yanghan Wang's avatar
Yanghan Wang committed
365
366
367
368
369
370
        log_stats = {
            **{f"train_{k}": v for k, v in train_stats.items()},
            **{f"test_{k}": v for k, v in test_stats.items()},
            "epoch": epoch,
            "n_parameters": n_parameters,
        }
371
372
373
374
375
376
377

        if args.output_dir and utils.is_main_process():
            with PathManager.open(os.path.join(args.output_dir, "log.txt"), "w") as f:
                f.write(json.dumps(log_stats) + "\n")

            # for evaluation logs
            if coco_evaluator is not None:
Yanghan Wang's avatar
Yanghan Wang committed
378
                PathManager.mkdirs(os.path.join(args.output_dir, "eval"))
379
                if "bbox" in coco_evaluator.coco_eval:
Yanghan Wang's avatar
Yanghan Wang committed
380
                    filenames = ["latest.pth"]
381
                    if epoch % 50 == 0:
Yanghan Wang's avatar
Yanghan Wang committed
382
                        filenames.append(f"{epoch:03}.pth")
383
                    for name in filenames:
Yanghan Wang's avatar
Yanghan Wang committed
384
385
386
387
                        with PathManager.open(
                            os.path.join(args.output_dir, "eval", name), "wb"
                        ) as f:
                            torch.save(coco_evaluator.coco_eval["bbox"].eval, f)
388
389
390

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
Yanghan Wang's avatar
Yanghan Wang committed
391
392
    print("Training time {}".format(total_time_str))

393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425

def launch(
    main_func,
    num_gpus_per_machine,
    num_machines=1,
    machine_rank=0,
    dist_url=None,
    args=(),
    timeout=DEFAULT_TIMEOUT,
):
    """
    Launch multi-gpu or distributed training.
    This function must be called on all machines involved in the training.
    It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine.

    Args:
        main_func: a function that will be called by `main_func(*args)`
        num_gpus_per_machine (int): number of GPUs per machine
        num_machines (int): the total number of machines
        machine_rank (int): the rank of this machine
        dist_url (str): url to connect to for distributed jobs, including protocol
                       e.g. "tcp://127.0.0.1:8686".
                       Can be set to "auto" to automatically select a free port on localhost
        timeout (timedelta): timeout of the distributed workers
        args (tuple): arguments passed to main_func
    """
    world_size = num_machines * num_gpus_per_machine
    args[0].distributed = world_size > 1
    if args[0].distributed:
        # https://github.com/pytorch/pytorch/pull/14391
        # TODO prctl in spawned processes

        if dist_url == "auto":
Yanghan Wang's avatar
Yanghan Wang committed
426
427
428
            assert (
                num_machines == 1
            ), "dist_url=auto not supported in multi-machine jobs."
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
            port = _find_free_port()
            dist_url = f"tcp://127.0.0.1:{port}"
        if num_machines > 1 and dist_url.startswith("file://"):
            logger = logging.getLogger(__name__)
            logger.warning(
                "file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://"
            )

        mp.spawn(
            _distributed_worker,
            nprocs=num_gpus_per_machine,
            args=(
                main_func,
                world_size,
                num_gpus_per_machine,
                machine_rank,
                dist_url,
                args,
                timeout,
            ),
            daemon=False,
        )
    else:
        main_func(*args)

Yanghan Wang's avatar
Yanghan Wang committed
454

455
456
457
458
459
460
461
462
463
464
465
466
467
468
def synchronize():
    """
    Helper function to synchronize (barrier) among all processes when
    using distributed training
    """
    if not dist.is_available():
        return
    if not dist.is_initialized():
        return
    world_size = dist.get_world_size()
    if world_size == 1:
        return
    dist.barrier()

Yanghan Wang's avatar
Yanghan Wang committed
469

470
471
472
473
474
475
476
477
478
479
def _distributed_worker(
    local_rank,
    main_func,
    world_size,
    num_gpus_per_machine,
    machine_rank,
    dist_url,
    args,
    timeout=DEFAULT_TIMEOUT,
):
Yanghan Wang's avatar
Yanghan Wang committed
480
481
482
    assert (
        torch.cuda.is_available()
    ), "cuda is not available. Please check your installation."
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
    global_rank = machine_rank * num_gpus_per_machine + local_rank
    try:
        dist.init_process_group(
            backend="NCCL",
            init_method=dist_url,
            world_size=world_size,
            rank=global_rank,
            timeout=timeout,
        )
    except Exception as e:
        logger = logging.getLogger(__name__)
        logger.error("Process group URL: {}".format(dist_url))
        raise e
    # synchronize is needed here to prevent a possible timeout after calling init_process_group
    # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
    synchronize()

    assert num_gpus_per_machine <= torch.cuda.device_count()
    torch.cuda.set_device(local_rank)
    args[0].gpu = local_rank

    # Setup the local process group (which contains ranks within the same machine)
Yanghan Wang's avatar
Yanghan Wang committed
505
506
507
    # assert comm._LOCAL_PROCESS_GROUP is None
    # num_machines = world_size // num_gpus_per_machine
    # for i in range(num_machines):
508
509
510
511
512
513
514
515
    #    ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
    #    pg = dist.new_group(ranks_on_i)
    #    if i == machine_rank:
    #        comm._LOCAL_PROCESS_GROUP = pg

    main_func(*args)


516
def invoke_main() -> None:
Yanghan Wang's avatar
Yanghan Wang committed
517
518
519
    parser = argparse.ArgumentParser(
        "DETR training and evaluation script", parents=[get_args_parser()]
    )
520
521
522
523
524
525
526
527
528
529
530
531
    args = parser.parse_args()
    if args.output_dir:
        PathManager.mkdirs(args.output_dir)
    print("Command Line Args:", args)
    launch(
        main,
        args.num_gpus,
        num_machines=args.num_machines,
        machine_rank=args.machine_rank,
        dist_url=args.dist_url,
        args=(args,),
    )
532
533
534
535


if __name__ == "__main__":
    invoke_main()  # pragma: no cover