_utils.py 13.3 KB
Newer Older
1
import copy
2
import math
3
from contextlib import nullcontext
4
from typing import Any, Callable, Dict, List, Optional
5

6
import torch
7
import torch.distributed as dist
8
9
10
from torch import Tensor
from torch import distributed as dist
from torch.distributed import ProcessGroup
11
from torch.nn import Module
12
from torch.optim import Adam, Optimizer
13

14
15
from colossalai.booster import Booster
from colossalai.booster.plugin import HybridParallelPlugin
16
from colossalai.booster.plugin.hybrid_parallel_plugin import HybridParallelModule
17
from colossalai.lazy import LazyInitContext
18
from colossalai.pipeline.stage_manager import PipelineStageManager
19
from colossalai.shardformer import ShardConfig, ShardFormer
20
from colossalai.shardformer._utils import getattr_
21
from colossalai.shardformer.policies.auto_policy import Policy
22
from colossalai.tensor.d_tensor.api import is_customized_distributed_tensor, is_distributed_tensor
23
24


25
26
27
28
29
def build_model(model_fn,
                enable_fused_normalization=True,
                enable_tensor_parallelism=True,
                enable_flash_attention=False,
                enable_jit_fused=False,
30
                enable_sequence_parallelism=False,
31
32
                use_lazy_init: bool = False):
    # create new model
33
34
35
36
37
38
39
    ctx = LazyInitContext() if use_lazy_init else nullcontext()
    with ctx:
        # create new model
        org_model = model_fn()
        model_copy = copy.deepcopy(org_model)
    if use_lazy_init:
        ctx.materialize(org_model)
40
    # shard model
41
    shard_config = ShardConfig(enable_fused_normalization=enable_fused_normalization,
42
43
                               enable_tensor_parallelism=enable_tensor_parallelism,
                               enable_flash_attention=enable_flash_attention,
44
45
                               enable_jit_fused=enable_jit_fused,
                               enable_sequence_parallelism=enable_sequence_parallelism)
46
    model_copy = copy.deepcopy(org_model)
47
    shard_former = ShardFormer(shard_config=shard_config)
ver217's avatar
ver217 committed
48
    sharded_model, shared_params = shard_former.optimize(model_copy)
49
    return org_model.cuda(), sharded_model.cuda()
50
51


52
53
54
55
def build_pipeline_model(model_fn,
                         stage_manager=None,
                         enable_fused_normalization=False,
                         enable_tensor_parallelism=False,
Jianghai's avatar
Jianghai committed
56
57
                         use_lazy_init: bool = False,
                         policy: Optional[Policy] = None):
58
59
60
61
62
63
64
65
66
67
68
69
    ctx = LazyInitContext() if use_lazy_init else nullcontext()
    with ctx:
        # create new model
        org_model = model_fn()
        model_copy = copy.deepcopy(org_model)
    if use_lazy_init:
        ctx.materialize(org_model)

    # shard model
    shard_config = ShardConfig(enable_fused_normalization=enable_fused_normalization,
                               enable_tensor_parallelism=enable_tensor_parallelism,
                               pipeline_stage_manager=stage_manager)
Jianghai's avatar
Jianghai committed
70

71
    shard_former = ShardFormer(shard_config=shard_config)
Jianghai's avatar
Jianghai committed
72
    sharded_model, shared_params = shard_former.optimize(model_copy, policy=policy)
73
74
75
    return org_model.cuda(), sharded_model.cuda()


76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
def run_forward(original_model, sharded_model, data_gen_fn, output_transform_fn, loss_fn):
    # prepare input
    data = data_gen_fn()
    data = {k: v.cuda() for k, v in data.items()}
    # switch to train mode
    original_model.train()
    sharded_model.train()
    # run forward
    org_output = original_model(**data)
    org_output = output_transform_fn(org_output)
    org_loss = loss_fn(org_output)

    shard_output = sharded_model(**data)
    shard_output = output_transform_fn(shard_output)
    shard_loss = loss_fn(shard_output)
91
    return org_output, org_loss, shard_output, shard_loss
92
93
94
95
96
97
98
99
100
101
102


def check_state_dict(org_model: Module, sharded_model: Module, name: str = ''):
    org_sd = org_model.state_dict()
    shard_sd = sharded_model.state_dict()
    for k, v in org_sd.items():
        assert k in shard_sd, f'{name} {k} not in sharded model'
        shard_v = shard_sd[k]
        assert v.shape == shard_v.shape, f'{name} {k} shape mismatch, {v.shape} vs {shard_v.shape}'
        assert v.dtype == shard_v.dtype, f'{name} {k} dtype mismatch, {v.dtype} vs {shard_v.dtype}'
        assert torch.equal(v, shard_v), f'{name} {k} value mismatch'
103
104


105
106
107
108
109
110
def build_model_from_hybrid_plugin(model_fn: Callable, loss_fn: Callable, test_config: Dict[str, Any]):

    use_lazy_init = False
    if 'use_lazy_init' in test_config:
        use_lazy_init = test_config.pop('use_lazy_init')

111
    ctx = LazyInitContext() if use_lazy_init else nullcontext()
112
    with ctx:
113
        org_model = model_fn()
114
115
        sharded_model = copy.deepcopy(org_model)
    if use_lazy_init:
116
        ctx.materialize(org_model)
117

118
    org_model = org_model.cuda()
119
120
121
122
    org_optimizer = Adam(org_model.parameters(), lr=1e-3)
    sharded_optimizer = Adam(sharded_model.parameters(), lr=1e-3)
    criterion = loss_fn

123
124
    plugin = HybridParallelPlugin(**test_config)
    booster = Booster(plugin=plugin)
125

126
    sharded_model, sharded_optimizer, criterion, _, _ = booster.boost(sharded_model, sharded_optimizer, criterion)
127
128
129
130
131
132
    return org_model, org_optimizer, sharded_model, sharded_optimizer, criterion, booster


def run_forward_backward_with_hybrid_plugin(org_model: Module, sharded_model: Module, sharded_optimizer: Optimizer,
                                            data_gen_fn: Callable, output_transform_fn: Callable, criterion: Callable,
                                            booster: Booster):
133
134
    org_model.cuda()
    sharded_model.cuda()
135
136
137
138
139
140
141

    def _criterion(outputs, inputs):
        outputs = output_transform_fn(outputs)
        loss = criterion(outputs)
        return loss

    data = data_gen_fn()
142
143
144
145
146
147
148
149
150
151

    if booster.plugin.enable_sequence_parallelism and booster.plugin.tp_size != 0:
        seq_len = data['input_ids'].shape[1]
        lcm = booster.plugin.tp_size * seq_len // math.gcd(booster.plugin.tp_size, seq_len)
        times = lcm // seq_len
        input_shape = data['input_ids'].shape
        for k, v in data.items():
            if v.shape == input_shape:
                data[k] = v.repeat(1, times)

152
153
    sharded_model.train()
    if booster.plugin.stage_manager is not None:
154
155
156
157
158
159
        for k, v in data.items():
            if torch.is_tensor(v) or 'Tensor' in v.__class__.__name__:
                new_shape = [1] * v.dim()
                new_shape[0] = 4
                data[k] = v.to('cuda').repeat(*new_shape)

160
161
162
163
164
165
166
167
168
169
170
        data_iter = iter([data])
        sharded_output = booster.execute_pipeline(data_iter,
                                                  sharded_model,
                                                  _criterion,
                                                  sharded_optimizer,
                                                  return_loss=True,
                                                  return_outputs=True)
        sharded_loss = sharded_output['loss']
    else:
        data = {k: v.cuda() for k, v in data.items()}
        sharded_output = sharded_model(**data)
171

172
        sharded_loss = criterion(sharded_output)
173
        sharded_optimizer.backward(sharded_loss)
174
175

    org_model.train()
176
    data = {k: v.cuda() for k, v in data.items()}
177
    org_output = org_model(**data)
178

179
180
181
182
183
184
185
186
187
188
    org_loss = criterion(org_output)
    org_loss.backward()

    return org_loss, org_output, sharded_loss, sharded_output


def check_output_hidden_state(org_output: Tensor,
                              sharded_output: Tensor,
                              stage_manager: Optional[PipelineStageManager] = None,
                              atol: float = 1e-5,
189
190
                              rtol: float = 1e-3,
                              dim: int = 0):
191
192
193
194

    org_hidden_state = org_output.last_hidden_state

    if stage_manager and stage_manager.is_last_stage():
Jianghai's avatar
Jianghai committed
195
196
197
        sharded_hidden_state = sharded_output['outputs']['last_hidden_state']
    else:
        sharded_hidden_state = sharded_output.last_hidden_state
198

199
    assert torch.allclose(org_hidden_state.float(), sharded_hidden_state.float(), atol=atol, rtol=rtol), \
200
201
202
203
        f"shard model's output hidden state is not equal to origin model's last hidden state\n{org_hidden_state}\n{sharded_hidden_state}"


def check_loss(org_loss: Tensor, sharded_loss: Tensor, atol: float = 1e-5, rtol: float = 1e-3):
204
    assert torch.allclose(org_loss.float(), sharded_loss.float(), atol=atol, rtol=rtol), \
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
        f"shard model loss is not equal to origin model loss\n{org_loss}\n{sharded_loss}"


def check_weight(org_model: Module,
                 sharded_model: Module,
                 layer_suffix: List[str],
                 tp_group: Optional[ProcessGroup] = None,
                 dim: int = 0,
                 atol: float = 1e-5,
                 rtol: float = 1e-3,
                 verbose: bool = False):

    for suffix in layer_suffix:
        org_weight = getattr_(org_model, suffix).weight
        sharded_weight = getattr_(sharded_model, suffix).weight

        if is_distributed_tensor(sharded_weight) or is_customized_distributed_tensor(sharded_weight):
            sharded_weight_list = [
223
                torch.zeros_like(sharded_weight).to('cuda') for _ in range(dist.get_world_size(tp_group))
224
225
226
227
228
229
230
            ]
            dist.all_gather(sharded_weight_list, sharded_weight, tp_group)
            sharded_weight = torch.cat(sharded_weight_list, dim=dim)

        if verbose and dist.get_rank() == 0:
            print(f"'{suffix}' weight: {org_weight}, {sharded_weight}")

231
        assert torch.allclose(org_weight.float(), sharded_weight.float(), atol=atol, rtol=rtol), \
232
            f"shard model weight {suffix} is not equal to origin model weight\n{org_weight}\n{sharded_weight}"
233
234


235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
def get_grad_tensors_for_check(org_model: Module,
                               sharded_model: Module,
                               layer_suffix: List[str],
                               tp_group: ProcessGroup = None,
                               dim: int = 0,
                               atol: float = 1e-5,
                               rtol: float = 1e-3,
                               verbose: bool = False,
                               name: str = None):

    grad_to_check = {}
    for suffix in layer_suffix:
        org_grad = getattr_(org_model, suffix).weight.grad
        shard_grad = getattr_(sharded_model, suffix).weight.grad
        shard_weight = getattr_(sharded_model, suffix).weight
        if is_distributed_tensor(shard_weight) or is_customized_distributed_tensor(shard_weight):
            shard_grad_list = [torch.zeros_like(shard_grad).to('cuda') for _ in range(dist.get_world_size(tp_group))]
            dist.all_gather(shard_grad_list, shard_grad, tp_group)
            shard_grad = torch.cat(shard_grad_list, dim=dim)

        # embedding may be resized when using tensor parallel
        if shard_grad.shape[0] > org_grad.shape[0]:
            shard_grad = shard_grad[:org_grad.shape[0], :]
        if verbose and dist.get_rank() == 0:
            print(f"'{suffix}' grad: {org_grad}, {shard_grad}")

        grad_to_check[suffix] = {
            "org_grad": org_grad.float(),
            "shard_grad": shard_grad.float(),
            "rtol": rtol,
            "atol": atol
        }

    return grad_to_check


# used by sam/blip2
272
273
274
275
276
277
278
279
def check_grad(org_model: Module,
               sharded_model: Module,
               layer_suffix: List[str],
               tp_group: ProcessGroup = None,
               dim: int = 0,
               atol: float = 1e-5,
               rtol: float = 1e-3,
               verbose: bool = False):
280
    for suffix in layer_suffix:
281
        org_grad = getattr_(org_model, suffix).weight.grad
282
283
284
        shard_grad = getattr_(sharded_model, suffix).weight.grad
        shard_weight = getattr_(sharded_model, suffix).weight
        if is_distributed_tensor(shard_weight) or is_customized_distributed_tensor(shard_weight):
285
            shard_grad_list = [torch.zeros_like(shard_grad).to('cuda') for _ in range(dist.get_world_size(tp_group))]
286
287
288
289
290
291
            dist.all_gather(shard_grad_list, shard_grad, tp_group)
            shard_grad = torch.cat(shard_grad_list, dim=dim)

        # embedding may be resized when using tensor parallel
        if shard_grad.shape[0] > org_grad.shape[0]:
            shard_grad = shard_grad[:org_grad.shape[0], :]
292
        if verbose and dist.get_rank() == 0:
293
            print(f"'{suffix}' grad: {org_grad}, {shard_grad}")
294

295
        assert torch.allclose(
296
            org_grad.float(), shard_grad.float(), rtol=rtol, atol=atol
297
        ), f"error attribute '{suffix}', orgin model grad is not equal to shard model grad\n{org_grad}\n{shard_grad}"
298
299
300
301
302
303
304
305
306
307
308
309


def unwrap_model(module: Module,
                 base_model_class_name: Optional[str] = None,
                 base_model_attribute_name: Optional[str] = None):
    if isinstance(module, HybridParallelModule):
        module = module.unwrap()
    if base_model_class_name is None:
        return module
    if module.__class__.__name__ == base_model_class_name:
        return module
    return getattr(module, base_model_attribute_name, None)
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324


def check_all_grad_tensors(check_tensors):
    """
    "org_grad": tensor to be compared from the original model
    "shard_grad": tensor to be compared from the sharded model
    """
    for suffix, check_info in check_tensors.items():
        org_grad = check_info["org_grad"]
        shard_grad = check_info["shard_grad"]
        rtol = check_info["rtol"]
        atol = check_info["atol"]
        assert torch.allclose(
            org_grad, shard_grad, atol=atol, rtol=rtol
        ), f"error attribute '{suffix}', orgin model grad is not equal to shard model grad\n{org_grad}\n{shard_grad}"