test_optim.py 6.51 KB
Newer Older
1
2
3
4
5
6
7
8
from functools import partial
from time import time

import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
HELSON's avatar
HELSON committed
9
from torch.testing import assert_close
10
11
12

import colossalai
from colossalai.amp import convert_to_apex_amp
13
from colossalai.gemini.chunk import ChunkManager, init_chunk_manager, search_chunk_configuration
14
15
from colossalai.gemini.gemini_mgr import GeminiManager
from colossalai.nn.optimizer import HybridAdam
16
from colossalai.nn.optimizer.zero_optimizer import ZeroOptimizer
17
from colossalai.nn.parallel import ZeroDDP
18
from colossalai.tensor import ColoParameter, ColoTensor
19
20
21
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.utils.cuda import get_current_device
22
from colossalai.utils.model.colo_init_context import ColoInitContext, post_process_colo_init_ctx
23
from tests.components_to_test import run_fwd_bwd
24
from tests.components_to_test.registry import non_distributed_component_funcs
HELSON's avatar
HELSON committed
25
from tests.test_tensor.common_utils import debug_print, set_seed
26
27
28
29
30
31
32
33
34
35
36
37
38
39


def check_param(model: ZeroDDP, torch_model: torch.nn.Module):
    zero_dict = model.state_dict(only_rank_0=False)
    torch_dict = torch_model.state_dict()

    for key, value in torch_dict.items():
        # key is 'module.model.PARAMETER', so we truncate it
        key = key[7:]
        if key == 'model.lm_head.weight':
            continue
        assert key in zero_dict, "{} not in ZeRO dictionary.".format(key)
        temp_zero_value = zero_dict[key].to(device=value.device, dtype=value.dtype)
        # debug_print([0], "max range: ", key, torch.max(torch.abs(value - temp_zero_value)))
HELSON's avatar
HELSON committed
40
        assert_close(value, temp_zero_value, rtol=1e-3, atol=1e-2)
41
42


43
# 'gpt2', 'bert',
44
TEST_MODELS = ['no_leaf_module', 'gpt2', 'bert', 'simple_net', 'nested_model', 'repeated_computed_layers']
45
46


HELSON's avatar
HELSON committed
47
@parameterize('placement_policy', ['cuda', 'cpu', 'auto', 'const'])
48
49
@parameterize('model_name', TEST_MODELS)
def exam_model_step(placement_policy, model_name: str):
50
    set_seed(42)
51
    get_components_func = non_distributed_component_funcs.get_callable(model_name)
52
53
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()

HELSON's avatar
HELSON committed
54
55
56
57
58
59
    torch_model = model_builder().cuda()
    amp_config = dict(opt_level='O2', keep_batchnorm_fp32=False, loss_scale=128)
    torch_optim = torch.optim.Adam(torch_model.parameters(), lr=1e-3)
    torch_model, torch_optim = convert_to_apex_amp(torch_model, torch_optim, amp_config)
    torch_model = DDP(torch_model, device_ids=[dist.get_rank()])

60
61
    init_dev = get_current_device()
    with ColoInitContext(device=init_dev):
62
        model = model_builder()
63
64
65

    post_process_colo_init_ctx(model, device=init_dev)

66
    for torch_p, p in zip(torch_model.parameters(), model.parameters()):
HELSON's avatar
HELSON committed
67
        p.data.copy_(torch_p.data)
68
69
70
71
72
73
74
75
76
77
78
79
80
81

    world_size = torch.distributed.get_world_size()
    config_dict, _ = search_chunk_configuration(model, search_range_mb=1, search_interval_byte=100)
    config_dict[world_size]['chunk_size'] = 5000
    config_dict[world_size]['keep_gathered'] = False
    if placement_policy != 'cuda':
        init_device = torch.device('cpu')
    else:
        init_device = None
    chunk_manager = ChunkManager(config_dict, init_device=init_device)
    gemini_manager = GeminiManager(placement_policy, chunk_manager)
    model = ZeroDDP(model, gemini_manager, pin_memory=True)

    optimizer = HybridAdam(model.parameters(), lr=1e-3)
HELSON's avatar
HELSON committed
82
    zero_optim = ZeroOptimizer(optimizer, model, initial_scale=128)
83
84
85
86
87

    model.eval()
    torch_model.eval()

    set_seed(dist.get_rank() * 3 + 128)
88
    for i, (input_ids, label) in enumerate(train_dataloader):
89
90
        if i > 2:
            break
HELSON's avatar
HELSON committed
91
        input_ids, label = input_ids.cuda(), label.cuda()
92
93
94
        zero_optim.zero_grad()
        torch_optim.zero_grad()

HELSON's avatar
HELSON committed
95
96
97
        torch_loss = run_fwd_bwd(torch_model, input_ids, label, criterion, torch_optim)
        loss = run_fwd_bwd(model, input_ids, label, criterion, zero_optim)
        assert_close(torch_loss, loss)
98
99
100
101
102
103
104

        zero_optim.step()
        torch_optim.step()

        check_param(model, torch_model)


105
@parameterize('placement_policy', ['cuda', 'cpu'])
106
@parameterize('model_name', TEST_MODELS)
107
def exam_tiny_example(placement_policy, model_name: str):
HELSON's avatar
HELSON committed
108
    set_seed(2008)
109
    get_components_func = non_distributed_component_funcs.get_callable(model_name)
110
111
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()

HELSON's avatar
HELSON committed
112
113
114
115
116
117
    torch_model = model_builder().cuda()
    amp_config = dict(opt_level='O2', keep_batchnorm_fp32=False, loss_scale=2)
    torch_optim = torch.optim.Adam(torch_model.parameters(), lr=1e-3)
    torch_model, torch_optim = convert_to_apex_amp(torch_model, torch_optim, amp_config)
    torch_model = DDP(torch_model, device_ids=[dist.get_rank()])

118
119
    init_dev = get_current_device()
    with ColoInitContext(device=init_dev):
120
        model = model_builder()
121
122
123

    post_process_colo_init_ctx(model, device=init_dev)

124
    for torch_p, p in zip(torch_model.parameters(), model.parameters()):
HELSON's avatar
HELSON committed
125
        p.data.copy_(torch_p.data)
126
127
128
129
130
131
132
133
134
135
136

    chunk_manager = init_chunk_manager(model=model, init_device=get_current_device(), search_range_mb=1)
    gemini_manager = GeminiManager(placement_policy, chunk_manager)
    model = ZeroDDP(model, gemini_manager, pin_memory=True)
    optimizer = HybridAdam(model.parameters(), lr=1e-3)
    zero_optim = ZeroOptimizer(optimizer, model, initial_scale=2)

    model.eval()
    torch_model.eval()

    set_seed(dist.get_rank() * 3 + 128)
137
    for i, (input_ids, label) in enumerate(train_dataloader):
138
139
140
        if i > 2:
            break

HELSON's avatar
HELSON committed
141
142
143
        input_ids = input_ids.cuda()
        label = label.cuda()

144
145
146
        zero_optim.zero_grad()
        torch_optim.zero_grad()

HELSON's avatar
HELSON committed
147
148
149
        torch_loss = run_fwd_bwd(torch_model, input_ids, label, criterion, torch_optim)
        loss = run_fwd_bwd(model, input_ids, label, criterion, zero_optim)
        assert_close(torch_loss, loss)
150
151
152
153
154
155
156

        zero_optim.step()
        torch_optim.step()

        check_param(model, torch_model)


157
158
159
def run_dist(rank, world_size, port):
    config = {}
    colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
160
    exam_model_step()
161
    exam_tiny_example()
162
163
164
165
166


@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
@rerun_if_address_is_in_use()
167
def test_optim(world_size):
168
169
170
171
172
    run_func = partial(run_dist, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)


if __name__ == '__main__':
HELSON's avatar
HELSON committed
173
    test_optim(1)