lightning_train_net.py 5.51 KB
Newer Older
facebook-github-bot's avatar
facebook-github-bot committed
1
2
3
4
5
6
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved


import logging
import os
7
from typing import Any, Dict, List, Type, Union
facebook-github-bot's avatar
facebook-github-bot committed
8

9
import mobile_cv.torch.utils_pytorch.comm as comm
facebook-github-bot's avatar
facebook-github-bot committed
10
import pytorch_lightning as pl  # type: ignore
11
from d2go.config import CfgNode
12
from d2go.runner.callbacks.quantization import QuantizationAwareTraining
13
14
from d2go.runner.lightning_task import DefaultTask
from d2go.setup import basic_argument_parser, prepare_for_launch, setup_after_launch
15
from d2go.trainer.api import TrainNetOutput
16
from d2go.trainer.lightning.training_loop import _do_test, _do_train
17
from detectron2.utils.file_io import PathManager
18
from pytorch_lightning.callbacks import Callback, LearningRateMonitor, TQDMProgressBar
facebook-github-bot's avatar
facebook-github-bot committed
19
20
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
21
from pytorch_lightning.strategies.ddp import DDPStrategy
facebook-github-bot's avatar
facebook-github-bot committed
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from torch.distributed import get_rank


logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("detectron2go.lightning.train_net")

FINAL_MODEL_CKPT = f"model_final{ModelCheckpoint.FILE_EXTENSION}"


def _get_trainer_callbacks(cfg: CfgNode) -> List[Callback]:
    """Gets the trainer callbacks based on the given D2Go Config.

    Args:
        cfg: The normalized ConfigNode for this D2Go Task.

    Returns:
38
        A list of configured Callbacks to be used by the Lightning Trainer.
facebook-github-bot's avatar
facebook-github-bot committed
39
40
    """
    callbacks: List[Callback] = [
41
        TQDMProgressBar(refresh_rate=10),  # Arbitrary refresh_rate.
facebook-github-bot's avatar
facebook-github-bot committed
42
43
        LearningRateMonitor(logging_interval="step"),
        ModelCheckpoint(
44
            dirpath=cfg.OUTPUT_DIR,
facebook-github-bot's avatar
facebook-github-bot committed
45
46
47
            save_last=True,
        ),
    ]
Kai Zhang's avatar
Kai Zhang committed
48
49
    if cfg.QUANTIZATION.QAT.ENABLED:
        callbacks.append(QuantizationAwareTraining.from_config(cfg))
facebook-github-bot's avatar
facebook-github-bot committed
50
51
    return callbacks

Yanghan Wang's avatar
Yanghan Wang committed
52

53
54
55
56
def _get_strategy(cfg: CfgNode) -> DDPStrategy:
    return DDPStrategy(find_unused_parameters=cfg.MODEL.DDP_FIND_UNUSED_PARAMETERS)


Kai Zhang's avatar
Kai Zhang committed
57
def _get_accelerator(use_cpu: bool) -> str:
58
    return "cpu" if use_cpu else "gpu"
facebook-github-bot's avatar
facebook-github-bot committed
59

Kai Zhang's avatar
Kai Zhang committed
60

61
62
63
64
65
66
67
68
69
70
71
72
def _get_lightning_precision(precision: str) -> Union[str, int]:
    """
    Convert our string format for precision to what lightning Trainer expects
    """
    if precision == "float16":
        return 16
    elif precision == "bfloat16":
        return "bf16"
    else:
        return precision


73
def get_trainer_params(cfg: CfgNode) -> Dict[str, Any]:
Kai Zhang's avatar
Kai Zhang committed
74
    use_cpu = cfg.MODEL.DEVICE.lower() == "cpu"
75
    strategy = _get_strategy(cfg)
76
77
    accelerator = _get_accelerator(use_cpu)

78
    params = {
79
        "max_epochs": -1,
80
81
82
83
        "max_steps": cfg.SOLVER.MAX_ITER,
        "val_check_interval": cfg.TEST.EVAL_PERIOD
        if cfg.TEST.EVAL_PERIOD > 0
        else cfg.SOLVER.MAX_ITER,
84
85
        "num_nodes": comm.get_num_nodes(),
        "devices": comm.get_local_size(),
86
        "strategy": strategy,
87
        "accelerator": accelerator,
88
89
90
        "callbacks": _get_trainer_callbacks(cfg),
        "logger": TensorBoardLogger(save_dir=cfg.OUTPUT_DIR),
        "num_sanity_val_steps": 0,
Kai Zhang's avatar
Kai Zhang committed
91
        "replace_sampler_ddp": False,
92
93
94
        "precision": _get_lightning_precision(cfg.SOLVER.AMP.PRECISION)
        if cfg.SOLVER.AMP.ENABLED
        else 32,
95
    }
96
97
98
99
100
101
102
103
104
105
106
107
    if cfg.SOLVER.CLIP_GRADIENTS.ENABLED:
        if (
            cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE.lower() == "norm"
            and cfg.SOLVER.CLIP_GRADIENTS.NORM_TYPE != 2.0
        ):
            raise ValueError(
                "D2Go Lightning backend supports only L2-norm for norm-based gradient clipping!"
            )
        params["gradient_clip_val"] = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
        params["gradient_clip_algorithm"] = cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE

    return params
108

Yanghan Wang's avatar
Yanghan Wang committed
109

facebook-github-bot's avatar
facebook-github-bot committed
110
111
def main(
    cfg: CfgNode,
112
    output_dir: str,
113
    runner_class: Union[str, Type[DefaultTask]],
facebook-github-bot's avatar
facebook-github-bot committed
114
    eval_only: bool = False,
115
) -> TrainNetOutput:
facebook-github-bot's avatar
facebook-github-bot committed
116
117
118
119
120
121
122
    """Main function for launching a training with lightning trainer
    Args:
        cfg: D2go config node
        num_machines: Number of nodes used for distributed training
        num_processes: Number of processes on each node.
        eval_only: True if run evaluation only.
    """
123
    task_cls: Type[DefaultTask] = setup_after_launch(cfg, output_dir, runner_class)
facebook-github-bot's avatar
facebook-github-bot committed
124

125
    task = task_cls.from_config(cfg, eval_only)
126
    trainer_params = get_trainer_params(cfg)
facebook-github-bot's avatar
facebook-github-bot committed
127
128

    last_checkpoint = os.path.join(cfg.OUTPUT_DIR, "last.ckpt")
129
    if PathManager.exists(last_checkpoint):
facebook-github-bot's avatar
facebook-github-bot committed
130
131
132
133
134
        # resume training from checkpoint
        trainer_params["resume_from_checkpoint"] = last_checkpoint
        logger.info(f"Resuming training from checkpoint: {last_checkpoint}.")

    trainer = pl.Trainer(**trainer_params)
135
136
    model_configs = None
    if eval_only:
137
        _do_test(trainer, task)
138
    else:
139
        model_configs = _do_train(cfg, trainer, task)
140

141
    return TrainNetOutput(
142
        tensorboard_log_dir=trainer_params["logger"].log_dir,
facebook-github-bot's avatar
facebook-github-bot committed
143
        accuracy=task.eval_res,
144
        metrics=task.eval_res,
145
        model_configs=model_configs,
facebook-github-bot's avatar
facebook-github-bot committed
146
147
148
149
150
    )


def argument_parser():
    parser = basic_argument_parser(distributed=True, requires_output_dir=False)
151
152
    # Change default runner argument
    parser.set_defaults(runner="d2go.runner.lightning_task.GeneralizedRCNNTask")
153
154
155
    parser.add_argument(
        "--eval-only", action="store_true", help="perform evaluation only"
    )
facebook-github-bot's avatar
facebook-github-bot committed
156
157
158
159
160
    return parser


if __name__ == "__main__":
    args = argument_parser().parse_args()
161
    cfg, output_dir, runner_name = prepare_for_launch(args)
162

facebook-github-bot's avatar
facebook-github-bot committed
163
164
    ret = main(
        cfg,
165
        output_dir,
166
        runner_name,
167
        eval_only=args.eval_only,
facebook-github-bot's avatar
facebook-github-bot committed
168
169
170
    )
    if get_rank() == 0:
        print(ret)