test_load.py 6.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
from copy import deepcopy
from functools import partial
from tempfile import TemporaryDirectory
from typing import Dict

import pytest
import torch
import torch.distributed as dist
import torch.nn as nn
from torch import Tensor
from torch.nn import Module
from torch.optim import Adam, Optimizer

14
15
16
17
18
import colossalai
from colossalai.testing import rerun_if_address_is_in_use, spawn
from colossalai.utils.checkpoint_io.io import load, save
from colossalai.utils.checkpoint_io.meta import ParamDistMeta, ParamRedistMeta, RankRedistMeta, RedistMeta

19
20
21
22
23
24
25

def check_model_state_dict(a: Dict[str, Tensor], b: Dict[str, Tensor]) -> None:
    assert set(a.keys()) == set(b.keys())
    for k, v in a.items():
        assert torch.equal(v, b[k])


26
def check_optim_state_dict(a: dict, b: dict, ignore_param_groups: bool = False) -> None:
27
28
29
30
31
32
33
34
    assert set(a['state'].keys()) == set(b['state'].keys())
    for k, state in a['state'].items():
        b_state = b['state'][k]
        for v1, v2 in zip(state.values(), b_state.values()):
            if isinstance(v1, Tensor):
                assert torch.equal(v1, v2)
            else:
                assert v1 == v2
35
    if not ignore_param_groups:
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
        assert a['param_groups'] == b['param_groups']


class DummyModel(nn.Module):

    def __init__(self) -> None:
        super().__init__()
        self.fc = nn.Linear(20, 1)


def prepare_model_optim(shard: bool = False, zero: bool = False):
    model = DummyModel()
    if shard:
        model.fc.weight.data = model.fc.weight.chunk(2, 1)[dist.get_rank() % 2]
    if zero:
        dp_rank = dist.get_rank() // 2
        model.fc.weight.data = model.fc.weight.reshape(-1).split([3, model.fc.weight.size(1) - 3], 0)[dp_rank]
        if dp_rank != 0:
            model.fc.bias.data = torch.empty(0, dtype=model.fc.bias.dtype)
    for p in model.parameters():
        p.grad = torch.rand_like(p)
    optimizer = Adam(model.parameters(), lr=1e-3)
    optimizer.step()
    return model, optimizer


def reset_model_optim(model: Module, optimizer: Optimizer, scalar: float = 0.0):
    with torch.no_grad():
        for p in model.parameters():
            p.fill_(scalar)
        for state in optimizer.state.values():
            for v in state.values():
                if isinstance(v, Tensor):
                    v.fill_(scalar)


def get_dist_metas(nprocs: int, zero: bool = False):
    dp_world_size = nprocs // 2
    dist_metas = []
    for rank in range(nprocs):
        if zero:
            dist_metas.append({
                'fc.weight':
                    ParamDistMeta(rank // 2,
                                  dp_world_size,
                                  rank % 2,
                                  2,
                                  tp_shard_dims=[1],
                                  tp_num_parts=[2],
                                  zero_numel=10,
                                  zero_orig_shape=[1, 10]),
                'fc.bias':
                    ParamDistMeta(rank // 2, dp_world_size, 0, 1, zero_numel=1, zero_orig_shape=[1])
            })
        else:
            dist_metas.append({
                'fc.weight': ParamDistMeta(rank // 2, dp_world_size, rank % 2, 2, tp_shard_dims=[1], tp_num_parts=[2]),
                'fc.bias': ParamDistMeta(rank // 2, dp_world_size, 0, 1)
            })
    return dist_metas


def get_redist_meta(nprocs: int):
    dp_world_size = nprocs // 2
    rank_meta = {
        'fc.weight': {rank: RankRedistMeta(rank // 2, rank % 2, 0) for rank in range(nprocs)},
        'fc.bias': {rank: RankRedistMeta(rank // 2, 0, 0) for rank in range(nprocs)}
    }
    param_meta = {
        'fc.weight': ParamRedistMeta(dp_world_size, 2, tp_shard_dims=[1], tp_num_parts=[2]),
        'fc.bias': ParamRedistMeta(dp_world_size, 1)
    }
    return RedistMeta(rank_meta, [], param_meta)


@pytest.mark.parametrize('max_shard_size_gb', [80 / 1024**3, 0])
def test_save_global_load_global(max_shard_size_gb: float):
    model, optimizer = prepare_model_optim()
    with TemporaryDirectory() as dir_name:
        save(dir_name, model, optimizer, max_shard_size_gb=max_shard_size_gb)
        new_model, new_optimizer = prepare_model_optim()
        load(dir_name, new_model, new_optimizer, max_shard_size_gb=max_shard_size_gb)
        check_model_state_dict(model.state_dict(), new_model.state_dict())
        check_optim_state_dict(optimizer.state_dict(), new_optimizer.state_dict())


122
def run_dist(rank, world_size, port, test_fn):
123
    colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
124
    test_fn()
125
126
127


def launch_dist(fn, world_size: int):
128
    spawn(run_dist, world_size, test_fn=fn)
129
130
131


def save_dist(dir_name: str, zero: bool):
132
133
    model, optimizer = prepare_model_optim(shard=True, zero=zero)
    reset_model_optim(model, optimizer)
134
135
    world_size = dist.get_world_size()
    rank = dist.get_rank()
136
    save(dir_name, model, optimizer, dist_meta=get_dist_metas(world_size, zero)[rank])
137
138
139
140


def load_and_check_dist(dir_name: str):
    world_size = dist.get_world_size()
141
142
    model, optimizer = prepare_model_optim(shard=True)
    reset_model_optim(model, optimizer)
143
    model_state_dict = deepcopy(model.state_dict())
144
145
146
    optimizer_state_dict = deepcopy(optimizer.state_dict())
    reset_model_optim(model, optimizer, 1)
    load(dir_name, model, optimizer, get_redist_meta(world_size), get_dist_metas(world_size))
147
    check_model_state_dict(model_state_dict, model.state_dict())
148
    check_optim_state_dict(optimizer_state_dict, optimizer.state_dict())
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186


@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_save_global_load_dist():
    model, optimizer = prepare_model_optim()
    reset_model_optim(model, optimizer)
    with TemporaryDirectory() as dir_name:
        save(dir_name, model, optimizer)
        fn = partial(load_and_check_dist, dir_name)
        launch_dist(fn, 4)


@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_save_dist_load_dist():
    with TemporaryDirectory() as dir_name:
        # save tp + dp
        fn = partial(save_dist, dir_name, False)
        launch_dist(fn, 2)
        # load tp + dp
        fn = partial(load_and_check_dist, dir_name)
        launch_dist(fn, 2)
    with TemporaryDirectory() as dir_name:
        # save tp + zero
        fn = partial(save_dist, dir_name, True)
        launch_dist(fn, 4)
        # load tp + dp
        fn = partial(load_and_check_dist, dir_name)
        launch_dist(fn, 2)
        launch_dist(fn, 4)


if __name__ == '__main__':
    test_save_global_load_global(80 / 1024**3)
    test_save_global_load_global(0)
    test_save_global_load_dist()
    test_save_dist_load_dist()