_utils.py 10.8 KB
Newer Older
1
import copy
2
import math
3
from contextlib import nullcontext
4
from typing import Any, Callable, Dict, List, Optional
5

6
import torch
7
import torch.distributed as dist
8
9
10
from torch import Tensor
from torch import distributed as dist
from torch.distributed import ProcessGroup
11
from torch.nn import Module
12
from torch.optim import Adam, Optimizer
13

14
15
from colossalai.booster import Booster
from colossalai.booster.plugin import HybridParallelPlugin
16
from colossalai.lazy import LazyInitContext
17
from colossalai.pipeline.stage_manager import PipelineStageManager
18
from colossalai.shardformer import ShardConfig, ShardFormer
19
from colossalai.shardformer._utils import getattr_
20
from colossalai.shardformer.policies.auto_policy import Policy
21
from colossalai.tensor.d_tensor.api import is_customized_distributed_tensor, is_distributed_tensor
22
23


24
25
26
27
28
def build_model(model_fn,
                enable_fused_normalization=True,
                enable_tensor_parallelism=True,
                enable_flash_attention=False,
                enable_jit_fused=False,
29
                enable_sequence_parallelism=False,
30
31
                use_lazy_init: bool = False):
    # create new model
32
33
34
35
36
37
38
    ctx = LazyInitContext() if use_lazy_init else nullcontext()
    with ctx:
        # create new model
        org_model = model_fn()
        model_copy = copy.deepcopy(org_model)
    if use_lazy_init:
        ctx.materialize(org_model)
39
    # shard model
40
    shard_config = ShardConfig(enable_fused_normalization=enable_fused_normalization,
41
42
                               enable_tensor_parallelism=enable_tensor_parallelism,
                               enable_flash_attention=enable_flash_attention,
43
44
                               enable_jit_fused=enable_jit_fused,
                               enable_sequence_parallelism=enable_sequence_parallelism)
45
    model_copy = copy.deepcopy(org_model)
46
    shard_former = ShardFormer(shard_config=shard_config)
ver217's avatar
ver217 committed
47
    sharded_model, shared_params = shard_former.optimize(model_copy)
48
    return org_model.cuda(), sharded_model.cuda()
49
50


51
52
53
54
def build_pipeline_model(model_fn,
                         stage_manager=None,
                         enable_fused_normalization=False,
                         enable_tensor_parallelism=False,
Jianghai's avatar
Jianghai committed
55
56
                         use_lazy_init: bool = False,
                         policy: Optional[Policy] = None):
57
58
59
60
61
62
63
64
65
66
67
68
    ctx = LazyInitContext() if use_lazy_init else nullcontext()
    with ctx:
        # create new model
        org_model = model_fn()
        model_copy = copy.deepcopy(org_model)
    if use_lazy_init:
        ctx.materialize(org_model)

    # shard model
    shard_config = ShardConfig(enable_fused_normalization=enable_fused_normalization,
                               enable_tensor_parallelism=enable_tensor_parallelism,
                               pipeline_stage_manager=stage_manager)
Jianghai's avatar
Jianghai committed
69

70
    shard_former = ShardFormer(shard_config=shard_config)
Jianghai's avatar
Jianghai committed
71
    sharded_model, shared_params = shard_former.optimize(model_copy, policy=policy)
72
73
74
    return org_model.cuda(), sharded_model.cuda()


75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
def run_forward(original_model, sharded_model, data_gen_fn, output_transform_fn, loss_fn):
    # prepare input
    data = data_gen_fn()
    data = {k: v.cuda() for k, v in data.items()}
    # switch to train mode
    original_model.train()
    sharded_model.train()
    # run forward
    org_output = original_model(**data)
    org_output = output_transform_fn(org_output)
    org_loss = loss_fn(org_output)

    shard_output = sharded_model(**data)
    shard_output = output_transform_fn(shard_output)
    shard_loss = loss_fn(shard_output)
90
    return org_output, org_loss, shard_output, shard_loss
91
92
93
94
95
96
97
98
99
100
101


def check_state_dict(org_model: Module, sharded_model: Module, name: str = ''):
    org_sd = org_model.state_dict()
    shard_sd = sharded_model.state_dict()
    for k, v in org_sd.items():
        assert k in shard_sd, f'{name} {k} not in sharded model'
        shard_v = shard_sd[k]
        assert v.shape == shard_v.shape, f'{name} {k} shape mismatch, {v.shape} vs {shard_v.shape}'
        assert v.dtype == shard_v.dtype, f'{name} {k} dtype mismatch, {v.dtype} vs {shard_v.dtype}'
        assert torch.equal(v, shard_v), f'{name} {k} value mismatch'
102
103


104
105
106
107
108
109
def build_model_from_hybrid_plugin(model_fn: Callable, loss_fn: Callable, test_config: Dict[str, Any]):

    use_lazy_init = False
    if 'use_lazy_init' in test_config:
        use_lazy_init = test_config.pop('use_lazy_init')

110
    ctx = LazyInitContext() if use_lazy_init else nullcontext()
111
    with ctx:
112
        org_model = model_fn()
113
114
        sharded_model = copy.deepcopy(org_model)
    if use_lazy_init:
115
        ctx.materialize(org_model)
116

117
    org_model = org_model.cuda()
118
119
120
121
    org_optimizer = Adam(org_model.parameters(), lr=1e-3)
    sharded_optimizer = Adam(sharded_model.parameters(), lr=1e-3)
    criterion = loss_fn

122
123
    plugin = HybridParallelPlugin(**test_config)
    booster = Booster(plugin=plugin)
124

125
    sharded_model, sharded_optimizer, criterion, _, _ = booster.boost(sharded_model, sharded_optimizer, criterion)
126
127
128
129
130
131
    return org_model, org_optimizer, sharded_model, sharded_optimizer, criterion, booster


def run_forward_backward_with_hybrid_plugin(org_model: Module, sharded_model: Module, sharded_optimizer: Optimizer,
                                            data_gen_fn: Callable, output_transform_fn: Callable, criterion: Callable,
                                            booster: Booster):
132
133
    org_model.cuda()
    sharded_model.cuda()
134
135
136
137
138
139
140

    def _criterion(outputs, inputs):
        outputs = output_transform_fn(outputs)
        loss = criterion(outputs)
        return loss

    data = data_gen_fn()
141
142
143
144
145
146
147
148
149
150

    if booster.plugin.enable_sequence_parallelism and booster.plugin.tp_size != 0:
        seq_len = data['input_ids'].shape[1]
        lcm = booster.plugin.tp_size * seq_len // math.gcd(booster.plugin.tp_size, seq_len)
        times = lcm // seq_len
        input_shape = data['input_ids'].shape
        for k, v in data.items():
            if v.shape == input_shape:
                data[k] = v.repeat(1, times)

151
152
    sharded_model.train()
    if booster.plugin.stage_manager is not None:
153
154
155
156
157
158
        for k, v in data.items():
            if torch.is_tensor(v) or 'Tensor' in v.__class__.__name__:
                new_shape = [1] * v.dim()
                new_shape[0] = 4
                data[k] = v.to('cuda').repeat(*new_shape)

159
160
161
162
163
164
165
166
167
168
169
        data_iter = iter([data])
        sharded_output = booster.execute_pipeline(data_iter,
                                                  sharded_model,
                                                  _criterion,
                                                  sharded_optimizer,
                                                  return_loss=True,
                                                  return_outputs=True)
        sharded_loss = sharded_output['loss']
    else:
        data = {k: v.cuda() for k, v in data.items()}
        sharded_output = sharded_model(**data)
170

171
        sharded_loss = criterion(sharded_output)
172
        sharded_optimizer.backward(sharded_loss)
173
174

    org_model.train()
175
    data = {k: v.cuda() for k, v in data.items()}
176
    org_output = org_model(**data)
177

178
179
180
181
182
183
184
185
186
187
    org_loss = criterion(org_output)
    org_loss.backward()

    return org_loss, org_output, sharded_loss, sharded_output


def check_output_hidden_state(org_output: Tensor,
                              sharded_output: Tensor,
                              stage_manager: Optional[PipelineStageManager] = None,
                              atol: float = 1e-5,
188
189
                              rtol: float = 1e-3,
                              dim: int = 0):
190
191
192
193
194
195
196

    org_hidden_state = org_output.last_hidden_state

    if stage_manager is None:
        sharded_hidden_state = sharded_output.last_hidden_state

    if stage_manager and stage_manager.is_last_stage():
197
        sharded_hidden_state = torch.cat([output.last_hidden_state for output in sharded_output['outputs']], dim=dim)
198

199
    assert torch.allclose(org_hidden_state.float(), sharded_hidden_state.float(), atol=atol, rtol=rtol), \
200
201
202
203
        f"shard model's output hidden state is not equal to origin model's last hidden state\n{org_hidden_state}\n{sharded_hidden_state}"


def check_loss(org_loss: Tensor, sharded_loss: Tensor, atol: float = 1e-5, rtol: float = 1e-3):
204
    assert torch.allclose(org_loss.float(), sharded_loss.float(), atol=atol, rtol=rtol), \
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
        f"shard model loss is not equal to origin model loss\n{org_loss}\n{sharded_loss}"


def check_weight(org_model: Module,
                 sharded_model: Module,
                 layer_suffix: List[str],
                 tp_group: Optional[ProcessGroup] = None,
                 dim: int = 0,
                 atol: float = 1e-5,
                 rtol: float = 1e-3,
                 verbose: bool = False):

    for suffix in layer_suffix:
        org_weight = getattr_(org_model, suffix).weight
        sharded_weight = getattr_(sharded_model, suffix).weight

        if is_distributed_tensor(sharded_weight) or is_customized_distributed_tensor(sharded_weight):
            sharded_weight_list = [
223
                torch.zeros_like(sharded_weight).to('cuda') for _ in range(dist.get_world_size(tp_group))
224
225
226
227
228
229
230
            ]
            dist.all_gather(sharded_weight_list, sharded_weight, tp_group)
            sharded_weight = torch.cat(sharded_weight_list, dim=dim)

        if verbose and dist.get_rank() == 0:
            print(f"'{suffix}' weight: {org_weight}, {sharded_weight}")

231
        assert torch.allclose(org_weight.float(), sharded_weight.float(), atol=atol, rtol=rtol), \
232
            f"shard model weight {suffix} is not equal to origin model weight\n{org_weight}\n{sharded_weight}"
233
234
235
236
237
238
239
240
241
242


def check_grad(org_model: Module,
               sharded_model: Module,
               layer_suffix: List[str],
               tp_group: ProcessGroup = None,
               dim: int = 0,
               atol: float = 1e-5,
               rtol: float = 1e-3,
               verbose: bool = False):
243
    for suffix in layer_suffix:
244
        org_grad = getattr_(org_model, suffix).weight.grad
245
246
247
248
        shard_grad = getattr_(sharded_model, suffix).weight.grad
        shard_weight = getattr_(sharded_model, suffix).weight

        if is_distributed_tensor(shard_weight) or is_customized_distributed_tensor(shard_weight):
249
            shard_grad_list = [torch.zeros_like(shard_grad).to('cuda') for _ in range(dist.get_world_size(tp_group))]
250
251
252
253
254
255
            dist.all_gather(shard_grad_list, shard_grad, tp_group)
            shard_grad = torch.cat(shard_grad_list, dim=dim)

        # embedding may be resized when using tensor parallel
        if shard_grad.shape[0] > org_grad.shape[0]:
            shard_grad = shard_grad[:org_grad.shape[0], :]
256
        if verbose and dist.get_rank() == 0:
257
            print(f"'{suffix}' grad: {org_grad}, {shard_grad}")
258

259
        assert torch.allclose(
260
            org_grad.float(), shard_grad.float(), rtol=rtol, atol=atol
261
        ), f"error attribute '{suffix}', orgin model grad is not equal to shard model grad\n{org_grad}\n{shard_grad}"