train.py 3.04 KB
Newer Older
1
import torch
2
3
import torch.nn as nn
from torchvision.models import resnet18
4
5
6
from tqdm import tqdm

import colossalai
7
from colossalai.legacy.core import global_context as gpc
8
9
10
11
12
from colossalai.logging import get_dist_logger
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
from colossalai.nn.optimizer import Lamb, Lars


13
class DummyDataloader:
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
    def __init__(self, length, batch_size):
        self.length = length
        self.batch_size = batch_size

    def generate(self):
        data = torch.rand(self.batch_size, 3, 224, 224)
        label = torch.randint(low=0, high=10, size=(self.batch_size,))
        return data, label

    def __iter__(self):
        self.step = 0
        return self

    def __next__(self):
        if self.step < self.length:
            self.step += 1
            return self.generate()
        else:
            raise StopIteration

    def __len__(self):
        return self.length


def main():
    # initialize distributed setting
40
    parser = colossalai.legacy.get_default_parser()
41
42
43
    parser.add_argument(
        "--optimizer", choices=["lars", "lamb"], help="Choose your large-batch optimizer", required=True
    )
44
45
46
    args = parser.parse_args()

    # launch from torch
47
    colossalai.legacy.launch_from_torch(config=args.config)
48
49
50
51
52

    # get logger
    logger = get_dist_logger()
    logger.info("initialized distributed environment", ranks=[0])

53
54
55
56
57
58
    # create synthetic dataloaders
    train_dataloader = DummyDataloader(length=10, batch_size=gpc.config.BATCH_SIZE)
    test_dataloader = DummyDataloader(length=5, batch_size=gpc.config.BATCH_SIZE)

    # build model
    model = resnet18(num_classes=gpc.config.NUM_CLASSES)
59
60

    # create loss function
61
    criterion = nn.CrossEntropyLoss()
62
63

    # create optimizer
64
65
66
67
68
    if args.optimizer == "lars":
        optim_cls = Lars
    elif args.optimizer == "lamb":
        optim_cls = Lamb
    optimizer = optim_cls(model.parameters(), lr=gpc.config.LEARNING_RATE, weight_decay=gpc.config.WEIGHT_DECAY)
69
70

    # create lr scheduler
71
72
73
    lr_scheduler = CosineAnnealingWarmupLR(
        optimizer=optimizer, total_steps=gpc.config.NUM_EPOCHS, warmup_steps=gpc.config.WARMUP_EPOCHS
    )
74
75

    # initialize
76
    engine, train_dataloader, test_dataloader, _ = colossalai.legacy.initialize(
77
78
79
80
81
82
        model=model,
        optimizer=optimizer,
        criterion=criterion,
        train_dataloader=train_dataloader,
        test_dataloader=test_dataloader,
    )
83
84
85
86
87
88
89
90
91

    logger.info("Engine is built", ranks=[0])

    for epoch in range(gpc.config.NUM_EPOCHS):
        # training
        engine.train()
        data_iter = iter(train_dataloader)

        if gpc.get_global_rank() == 0:
92
            description = "Epoch {} / {}".format(epoch, gpc.config.NUM_EPOCHS)
93
94
95
96
97
98
99
100
101
102
            progress = tqdm(range(len(train_dataloader)), desc=description)
        else:
            progress = range(len(train_dataloader))
        for _ in progress:
            engine.zero_grad()
            engine.execute_schedule(data_iter, return_output_label=False)
            engine.step()
            lr_scheduler.step()


103
if __name__ == "__main__":
104
    main()