test_zero.py 5.52 KB
Newer Older
zbian's avatar
zbian committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
#!/usr/bin/env python
# -*- encoding: utf-8 -*-

import os.path as osp

import pytest
import torch
from torch.utils.data import DataLoader

import colossalai
from colossalai.builder import build_dataset, build_loss, build_data_sampler, build_model
from colossalai.core import global_context
from colossalai.engine.gradient_handler import DataParallelGradientHandler
from colossalai.nn.optimizer import ZeroRedundancyOptimizer_Level_1, ZeroRedundancyOptimizer_Level_3, \
    ZeroRedundancyOptimizer_Level_2
from colossalai.utils import print_rank_0

DIR_PATH = osp.dirname(osp.abspath(__file__))
CONFIG_PATH = osp.join(DIR_PATH, 'config.py')


def run_dist():
    colossalai.init_dist(CONFIG_PATH)

    # build resnet model
    model = build_model(global_context.config.model)
    model.build_from_cfg()
    model = model.cuda()

    level = global_context.config.level

    if level > 1:
        model = model.half()

    # test init cuda memory
    _ = torch.rand(1).cuda()
    torch.cuda.synchronize()
    max_alloc = torch.cuda.max_memory_allocated()
    max_reserved = torch.cuda.max_memory_reserved()
    print(f'before run: max_allocation = {max_alloc}, max_reserved = {max_reserved}')

    # build dataloader
    train_dataset = build_dataset(global_context.config.train_data.dataset)

    sampler_cfg = global_context.config.train_data.dataloader.pop('sampler', None)
    if sampler_cfg is None:
        train_dataloader = DataLoader(dataset=train_dataset, **global_context.config.train_data.dataloader)
    else:
        sampler = build_data_sampler(sampler_cfg, train_dataset)
        train_dataloader = DataLoader(dataset=train_dataset, sampler=sampler,
                                      **global_context.config.train_data.dataloader)

    test_dataset = build_dataset(global_context.config.test_data.dataset)
    test_dataloader = DataLoader(dataset=test_dataset, **global_context.config.test_data.dataloader)

    # build optimizer and loss
    # optimizer = build_optimizer(global_context.config.optimizer, model)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    if level == 1:
        zero_optim = ZeroRedundancyOptimizer_Level_1(init_optimizer=optimizer, verbose=False)
    elif level == 2:
        zero_optim = ZeroRedundancyOptimizer_Level_2(init_optimizer=optimizer, cpu_offload=True, verbose=False)
    elif level == 3:
        zero_optim = ZeroRedundancyOptimizer_Level_3(init_optimizer=optimizer,
                                                     module=model,
                                                     verbose=False,
                                                     offload_optimizer_config=dict(
                                                         device='cpu',
                                                         pin_memory=True,
                                                         buffer_count=5,
                                                         fast_init=False
                                                     ),
                                                     offload_param_config=dict(
                                                         device='cpu',
                                                         pin_memory=True,
                                                         buffer_count=5,
                                                         buffer_size=1e8,
                                                         max_in_cpu=1e9
                                                     )
                                                     )

    loss_fn = build_loss(global_context.config.loss)
    gradient_handler = DataParallelGradientHandler(model, zero_optim)

    # train
    for epoch in range(100):
        model.train()

        # train
        avg_train_loss = 0
        train_iter = 0

        for idx, (data, label) in enumerate(train_dataloader):
            # model = model.half()
            data = data[0].cuda()
            label = label[0].cuda()

            if level > 1:
                data = data.half()

            output = model(data)
            loss = loss_fn(output[0], label)

            if level > 1:
                zero_optim.backward(loss)
                zero_optim.overlapping_partition_gradients_reduce_epilogue()
            else:
                loss.backward()
                gradient_handler.handle_gradient()

            zero_optim.step()
            zero_optim.zero_grad()

            avg_train_loss += loss.detach().cpu().numpy()
            train_iter += 1

        print_rank_0(f'epoch: {epoch}, train loss: {avg_train_loss / train_iter}')

        if epoch % 2 == 0:
            model.eval()
            avg_eval_loss = 0
            correct = 0
            total = 0
            eval_iters = 0

            for idx, (data, label) in enumerate(test_dataloader):
                with torch.no_grad():
                    data = data[0].cuda()
                    label = label[0].cuda()

                    if level > 1:
                        data = data.half()

                    output = model(data)
                    loss = loss_fn(output[0], label)

                avg_eval_loss += loss.detach().cpu().numpy()
                preds = torch.argmax(output[0], dim=1)
                total += data.size(0)
                correct += sum(preds == label)
                eval_iters += 1

            print_rank_0(f'epoch: {epoch}, eval loss: {avg_eval_loss / eval_iters}, acc: {correct / total}')


@pytest.mark.skip("This test should be invoked manually using the script provided")
@pytest.mark.dist
def test_zero():
    run_dist()


if __name__ == '__main__':
    test_zero()