test_zero_optim_state_dict.py 3.79 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
import pytest
import colossalai
import torch
from colossalai.context.parallel_mode import ParallelMode
import torch.multiprocessing as mp
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.utils.model.colo_init_context import ColoInitContext
from colossalai.core import global_context as gpc
from functools import partial
from tests.test_tensor._utils import set_seed
from tests.components_to_test.registry import non_distributed_component_funcs
from colossalai.nn.parallel.data_parallel import ZeroDDP
15
from colossalai.gemini import ChunkManager, GeminiManager
16
17
18
from colossalai.testing import parameterize
from colossalai.nn.optimizer import HybridAdam
from colossalai.zero import ZeroOptimizer
19
from colossalai.tensor import ProcessGroup
20
21
22
23
24
25
26
27


def init_zero(model, use_chunk, use_zero, placement_policy):
    chunk_size = ChunkManager.search_chunk_size(model, 8192, 8) if use_chunk else None
    chunk_manager = ChunkManager(chunk_size,
                                 enable_distributed_storage=use_zero,
                                 init_device=GeminiManager.get_default_device(placement_policy))
    gemini_manager = GeminiManager(placement_policy, chunk_manager)
28
29
    pg = ProcessGroup()
    return ZeroDDP(model, gemini_manager, pg)
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99


def run_step(model, optim, criterion, data, label):
    optim.zero_grad()
    logits = model(data)
    loss = criterion(logits, label)
    optim.backward(loss)
    optim.step()


def check_state_dict_eq(state_dict, other):
    for p, state in state_dict['state'].items():
        other_state = other['state'][p]
        for k, v in state.items():
            if isinstance(v, torch.Tensor):
                assert torch.allclose(v, other_state[k], atol=1e-3), f'{v} vs {other_state[k]}'
            else:
                assert v == other_state[k]


@parameterize('use_chunk', [False, True])
@parameterize('use_zero', [False, True])
@parameterize('placement_policy', ['cuda', 'cpu'])
def run_nested_model(use_chunk, use_zero, placement_policy):
    get_components_func = non_distributed_component_funcs.get_callable('nested_model')
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()

    set_seed(42)
    with ColoInitContext(device=get_current_device()):
        model = model_builder()
    set_seed(42)
    with ColoInitContext(device=get_current_device()):
        model_copy = model_builder()
    model = init_zero(model, use_chunk, use_zero, placement_policy)
    model_copy = init_zero(model_copy, use_chunk, use_zero, placement_policy)

    optim = HybridAdam(model.parameters(), lr=1e-3)
    optim = ZeroOptimizer(optim, model, initial_scale=32)
    optim_copy = HybridAdam(model_copy.parameters(), lr=1e-3)
    optim_copy = ZeroOptimizer(optim_copy, model_copy, initial_scale=32)

    model.train()
    model_copy.train()
    set_seed(gpc.get_local_rank(ParallelMode.DATA))
    data_iter = iter(train_dataloader)

    data, label = map(lambda x: x.cuda(), next(data_iter))
    run_step(model, optim, criterion, data, label)
    optim_copy.load_state_dict(optim.state_dict())
    check_state_dict_eq(optim.state_dict(), optim_copy.state_dict())

    data, label = map(lambda x: x.cuda(), next(data_iter))
    run_step(model_copy, optim_copy, criterion, data, label)


def run_dist(rank, world_size, port):
    colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    run_nested_model()


@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 2])
@rerun_if_address_is_in_use()
def test_zero_optim_state_dist(world_size):
    run_func = partial(run_dist, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)


if __name__ == '__main__':
    test_zero_optim_state_dist(2)