engine.py 3.94 KB
Newer Older
1
2
3
4
import math
import sys
import time

5
import torch
6
7
import torchvision.models.detection.mask_rcnn
import utils
8
9
from coco_eval import CocoEvaluator
from coco_utils import get_coco_api_from_dataset
10
11


12
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, scaler=None):
13
14
    model.train()
    metric_logger = utils.MetricLogger(delimiter="  ")
15
    metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value:.6f}"))
16
    header = f"Epoch: [{epoch}]"
17
18
19

    lr_scheduler = None
    if epoch == 0:
20
        warmup_factor = 1.0 / 1000
21
22
        warmup_iters = min(1000, len(data_loader) - 1)

23
24
25
        lr_scheduler = torch.optim.lr_scheduler.LinearLR(
            optimizer, start_factor=warmup_factor, total_iters=warmup_iters
        )
26
27
28
29

    for images, targets in metric_logger.log_every(data_loader, print_freq, header):
        images = list(image.to(device) for image in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
30
31
32
        with torch.cuda.amp.autocast(enabled=scaler is not None):
            loss_dict = model(images, targets)
            losses = sum(loss for loss in loss_dict.values())
33
34
35
36
37
38
39
40

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = utils.reduce_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())

        loss_value = losses_reduced.item()

        if not math.isfinite(loss_value):
41
            print(f"Loss is {loss_value}, stopping training")
42
43
44
45
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
46
47
48
49
50
51
52
        if scaler is not None:
            scaler.scale(losses).backward()
            scaler.step(optimizer)
            scaler.update()
        else:
            losses.backward()
            optimizer.step()
53
54
55
56
57
58
59

        if lr_scheduler is not None:
            lr_scheduler.step()

        metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])

PatrickBue's avatar
PatrickBue committed
60
61
    return metric_logger

62
63
64
65
66
67
68
69
70
71
72
73
74

def _get_iou_types(model):
    model_without_ddp = model
    if isinstance(model, torch.nn.parallel.DistributedDataParallel):
        model_without_ddp = model.module
    iou_types = ["bbox"]
    if isinstance(model_without_ddp, torchvision.models.detection.MaskRCNN):
        iou_types.append("segm")
    if isinstance(model_without_ddp, torchvision.models.detection.KeypointRCNN):
        iou_types.append("keypoints")
    return iou_types


75
@torch.inference_mode()
76
77
78
79
80
81
82
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
83
    header = "Test:"
84
85
86
87
88

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

Erik's avatar
Erik committed
89
90
    for images, targets in metric_logger.log_every(data_loader, 100, header):
        images = list(img.to(device) for img in images)
91

92
93
        if torch.cuda.is_available():
            torch.cuda.synchronize()
94
        model_time = time.time()
Erik's avatar
Erik committed
95
        outputs = model(images)
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115

        outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator