"tests/test_ops/test_op.py" did not exist on "ae7d3f49270c08fe3e37c82821f7d1de10de2837"
test_model.py 15.3 KB
Newer Older
1
from colossalai.tensor.colo_parameter import ColoParameter
2
3
4
5
from tests.components_to_test.registry import non_distributed_component_funcs

import colossalai
import pytest
6
import torch
7
8
9
10
11
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.utils import ColoInitContext
ver217's avatar
ver217 committed
12
13
from colossalai.tensor import distspec, named_params_with_colotensor, TensorSpec, ComputePattern, \
    ParallelAction, ColoTensor, ColoOptimizer, DistSpecManager
14
from colossalai.context import ParallelMode
15
from colossalai.core import global_context as gpc
16
17

from functools import partial
18
19
20
import random
import os
import numpy as np
21

22
23
24
25
# Hack huggingface Bert ModelOutput
# Make it available to our ColoTensor
from transformers.file_utils import ModelOutput
from dataclasses import fields
26
27


28
def _post_init_colotensor(self):
29
30
    class_fields = fields(self)
    # Safety and consistency checks
31
    if len(class_fields) == 0:
32
33
34
35
36
37
38
39
40
41
42
43
44
        raise ValueError(f"{self.__class__.__name__} has no fields.")
    if not all(field.default is None for field in class_fields[1:]):
        raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")

    first_field = getattr(self, class_fields[0].name)
    other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])

    def is_tensor_with_colo(x):
        """
        Tests if `x` is a `ColoTensor` or `torch.Tensor`.
        """
        if isinstance(x, torch.Tensor):
            return True
45

46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
        return isinstance(x, ColoTensor)

    if other_fields_are_none and not is_tensor_with_colo(first_field):
        if isinstance(first_field, dict):
            iterator = first_field.items()
            first_field_iterator = True
        else:
            try:
                iterator = iter(first_field)
                first_field_iterator = True
            except TypeError:
                first_field_iterator = False

        # if we provided an iterator as first field and the iterator is a (key, value) iterator
        # set the associated fields
        if first_field_iterator:
            for element in iterator:
63
                if (not isinstance(element, (list, tuple)) or not len(element) == 2 or not isinstance(element[0], str)):
64
65
66
67
68
69
70
71
72
73
74
75
                    break
                setattr(self, element[0], element[1])
                if element[1] is not None:
                    self[element[0]] = element[1]
        elif first_field is not None:
            self[class_fields[0].name] = first_field
    else:
        for field in class_fields:
            v = getattr(self, field.name)
            if v is not None:
                self[field.name] = v

76

77
ModelOutput.__post_init__ = _post_init_colotensor
78
# complete the hack
79

80

81
82
83
84
85
86
87
88
def set_seed(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True

89

90
91
def init_1d_row_linear(weight):
    spec = TensorSpec(
ver217's avatar
ver217 committed
92
        distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [-1], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
93
        [ParallelAction(priority=1, compute_pattern=ComputePattern.TP1D, parallel_mode=ParallelMode.PARALLEL_1D)])
94
95
96
    with DistSpecManager.no_grad():
        weight.set_spec(spec)

97

98
99
def init_1d_col_linear(weight, gather_out=True):
    spec = TensorSpec(
ver217's avatar
ver217 committed
100
        distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]), [
101
102
103
104
105
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1D,
                           parallel_mode=ParallelMode.PARALLEL_1D,
                           gather_out=gather_out)
        ])
106
107
108
    with DistSpecManager.no_grad():
        weight.set_spec(spec)

109

110
111
def init_1d_row_embedding(weight):
    spec = TensorSpec(
ver217's avatar
ver217 committed
112
        distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
113
        [ParallelAction(priority=1, compute_pattern=ComputePattern.TP1D, parallel_mode=ParallelMode.PARALLEL_1D)])
114
115
116
    with DistSpecManager.no_grad():
        weight.set_spec(spec)

117

118
119
def init_1d_col_embedding(weight):
    spec = TensorSpec(
ver217's avatar
ver217 committed
120
        distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [-1], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
121
        [ParallelAction(priority=1, compute_pattern=ComputePattern.TP1D, parallel_mode=ParallelMode.PARALLEL_1D)])
122
123
    with DistSpecManager.no_grad():
        weight.set_spec(spec)
124

125

Ziyue Jiang's avatar
Ziyue Jiang committed
126
def run_1d_hybrid_tp(model_name):
127
    # A simple net with two stacked nn.Linear
128
    get_components_func = non_distributed_component_funcs.get_callable(model_name)
129
130
131
132
133
134
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
    rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)

    set_seed(1)
    with ColoInitContext(device=get_current_device()):
        model = model_builder(checkpoint=True)
135

136
137
138
139
140
141
142
143
    if rank == 0:
        model_torch = model_builder(checkpoint=True)
        model_torch = model_torch.cuda()
        colo_optimizer_torch = ColoOptimizer(dict(model_torch.named_parameters()), torch.optim.SGD, lr=0.1)

        # Make two models have the same init params
        for p1, p2 in zip(model.parameters(), model_torch.parameters()):
            p2.data.copy_(p1.data)
144

145
    if 'bert' == model_name:
ver217's avatar
ver217 committed
146
        for name, p in model.named_parameters():
147
148
            if not isinstance(p, ColoTensor):
                continue
149
            # print(name)
Ziyue Jiang's avatar
Ziyue Jiang committed
150
151
            # num_class = type_vocab_size = 2 | (8, 2)
            if 'classifier' in name and 'weight' in name:
152
                init_1d_row_linear(p)
Ziyue Jiang's avatar
Ziyue Jiang committed
153
154
            # num_class = vocab_size = 30524 | (30524, 8)
            if 'word_embeddings' in name and 'weight' in name:
155
                init_1d_row_embedding(p)
Ziyue Jiang's avatar
Ziyue Jiang committed
156
157
            # num_class = seq_len = 512 | (512, 8)
            if 'position_embeddings' in name and 'weight' in name:
158
                init_1d_row_embedding(p)
Ziyue Jiang's avatar
Ziyue Jiang committed
159
160
            # num_class = type_vocab_size = 2 | (2, 8)
            if 'token_type_embeddings' in name and 'weight' in name:
161
                init_1d_col_embedding(p)
162
163
    elif "simple_net" == model_name:
        # A naive way to set spec for all weights in Linear
ver217's avatar
ver217 committed
164
        for name, p in model.named_parameters():
165
166
            if not isinstance(p, ColoTensor):
                continue
167
            if 'embed' in name and 'weight' in name:
168
                init_1d_col_embedding(p)
169
            if 'proj1' in name and ('weight' in name or 'bias' in name):
170
                init_1d_col_linear(p)
171
            if 'proj2' in name and 'weight' in name:
172
                init_1d_row_linear(p)
173
            if 'classifier' in name and ('weight' in name or 'bias' in name):
174
                init_1d_col_linear(p, gather_out=False)
175

176
    model = model.cuda()
177
    colo_optimizer = ColoOptimizer(dict(model.named_parameters()), torch.optim.SGD, lr=0.1)
178
    for i, (data, label) in enumerate(train_dataloader):
179
180
181
182
183
        model.eval()
        colo_optimizer.zero_grad()
        if rank == 0:
            model_torch.eval()
            colo_optimizer_torch.zero_grad()
184

185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
        data = data.to(get_current_device())
        label = label.to(get_current_device())

        torch.distributed.broadcast(data, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
        torch.distributed.broadcast(label, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
        # Bcast rank0 data to all processes
        if criterion:
            output = model(data)
            loss = criterion(output, label)
        else:
            output = model(data, label)
            loss = output

        # For reference
        if rank == 0:
            if criterion:
                output_torch = model_torch(data)
                loss_torch = criterion(output_torch, label)
            else:
                output_torch = model_torch(data, label)
                loss_torch = output_torch

        if rank == 0:
208
            with torch.no_grad():
ver217's avatar
ver217 committed
209
                assert torch.allclose(loss, loss_torch, rtol=1e-2)
210
211

        loss.backward()
212
        colo_optimizer.step()
213
214
215

        if rank == 0:
            loss_torch.backward()
216
217
218
219
220
221
222
223
224
            colo_optimizer_torch.step()

            with torch.no_grad():
                # check param
                for p1, p2 in zip(model.parameters(), model_torch.parameters()):
                    if p1.size() == p2.size():
                        assert torch.allclose(p1, p2)
                    else:
                        # TODO(jzy) Only check 1D spec. Need to be replaced by new DistSpec.
225
                        if p1.size(-1) < p2.size(-1):    # col
226
227
                            world_size = p2.size(-1) // p1.size(-1)
                            split_p2 = torch.chunk(p2, world_size, dim=-1)[0]
228
229

                        elif p1.size(0) < p2.size(0):    # row
230
231
232
233
234
                            world_size = p2.size(0) // p1.size(0)
                            split_p2 = torch.chunk(p2, world_size, dim=0)[0]

                        assert torch.allclose(p1, split_p2)

235
236
        if i > 5:
            break
237

238

239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
# Test the overrided parameters() and named_parameters() member functions
def test_model_parameters():
    # build a module with 2 Linear, 4 parameters in total.
    class Net(torch.nn.Module):

        def __init__(self):
            super().__init__()
            self.fcs = torch.nn.Sequential(torch.nn.Linear(2, 3), torch.nn.Linear(3, 2))
            self.extra_param = torch.nn.Parameter(torch.randn(2))

    with ColoInitContext(device=get_current_device()):
        model = Net()

    param_cnt = 0
    for name, p in model.named_parameters():
        param_cnt += 1
    assert param_cnt == 5

ver217's avatar
ver217 committed
257
    for name, colo_p in model.named_parameters():
258
259
        assert colo_p.is_model_data()

260
261
262
263
264
265
266
267
268
269
270
    param_cnt = 0
    for name, p in model.named_parameters(recurse=False):
        param_cnt += 1
    assert param_cnt == 1

    param_cnt = 0
    for p in model.fcs[0].parameters(recurse=False):
        param_cnt += 1
    assert param_cnt == 2


271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
def test_colo_optimizer():
    get_components_func = non_distributed_component_funcs.get_callable('simple_net')
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
    set_seed(1)
    with ColoInitContext(lazy_memory_allocate=False, device=get_current_device()):
        model = model_builder(checkpoint=True)

    colo_optimizer = ColoOptimizer(dict(model.named_parameters()), torch.optim.SGD, lr=0.1)
    for i, (data, label) in enumerate(train_dataloader):
        colo_optimizer.zero_grad()
        data = data.to(get_current_device())
        label = label.to(get_current_device())

        # Bcast rank0 data to all processes
        if criterion:
            output = model(data)
            loss = criterion(output, label)
        else:
            output = model(data, label)
            loss = output

        loss.backward()
        colo_optimizer.step()

        if i > 5:
            break


299
def run_1d_row_tp(model_name: str):
300
    # A simple net with two stacked nn.Linear
301
    get_components_func = non_distributed_component_funcs.get_callable(model_name)
302
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
303
    rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
304

305
    set_seed(1)
306
    with ColoInitContext(device=get_current_device()):
307
308
        model = model_builder(checkpoint=True)

309
310
311
312
    set_seed(1)
    if rank == 0:
        model_torch = model_builder(checkpoint=True)
        model_torch = model_torch.cuda()
313
    # A naive way to set spec for all weights in Linear
ver217's avatar
ver217 committed
314
    for name, p in model.named_parameters():
315
316
317
        if not isinstance(p, ColoTensor):
            continue
        if 'weight' in name and 'LayerNorm' not in name and 'ln' not in name and 'embed' not in name:
318
            init_1d_row_linear(p)
319
        if 'embed' in name and 'weight' in name:
320
            init_1d_row_embedding(p)
321

322
    model = model.cuda()
323
324

    for i, (data, label) in enumerate(train_dataloader):
325
326
        data = data.to(get_current_device())
        label = label.to(get_current_device())
327

328
329
330
331
        torch.distributed.broadcast(data, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
        torch.distributed.broadcast(label, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))

        # Bcast rank0 data to all processes
332
        if criterion:
333
            output = model(data)
334
335
            loss = criterion(output, label)
        else:
336
            output = model(data, label)
337
338
            loss = output

339
340
341
342
343
344
345
346
347
348
        # For reference
        if rank == 0:
            if criterion:
                output_torch = model_torch(data)
                loss_torch = criterion(output_torch, label)
            else:
                output_torch = model_torch(data, label)
                loss_torch = output_torch

        if rank == 0:
ver217's avatar
ver217 committed
349
            assert torch.allclose(loss, loss_torch, rtol=1e-2)
350

351
352
        loss.backward()

353
354
        if rank == 0:
            loss_torch.backward()
355
356
357
358
        if i > 5:
            break


359
360
361
362
363
364
365
366
367
368
369
370
371
def _run_pretrain_load():
    from _utils import check_equal
    from transformers import BertForMaskedLM
    set_seed(1)
    model_pretrained = BertForMaskedLM.from_pretrained('bert-base-uncased')
    with ColoInitContext(lazy_memory_allocate=False, device=get_current_device()):
        model = BertForMaskedLM.from_pretrained('bert-base-uncased')

    model_pretrained = model_pretrained.cuda()
    model = model.cuda()

    dict_pretrained = {}
    dict_col = {}
Ziyue Jiang's avatar
Ziyue Jiang committed
372
    c_ref = 0
373
374
    for name, param in model_pretrained.named_parameters():
        dict_pretrained[name] = param
Ziyue Jiang's avatar
Ziyue Jiang committed
375
        c_ref += 1
376
377
    c1 = 0
    c2 = 0
ver217's avatar
ver217 committed
378
    for name, param in model.named_parameters():
379
        if isinstance(param, ColoParameter):
Ziyue Jiang's avatar
Ziyue Jiang committed
380
            c1 += 1
381
        else:
382
            c2 += 1
383
        dict_col[name] = param
Ziyue Jiang's avatar
Ziyue Jiang committed
384
385
386
387
    assert c_ref == c1
    assert c2 == 0
    if model_pretrained.cls.predictions.decoder.bias is model_pretrained.cls.predictions.bias:
        assert model.cls.predictions.decoder.bias is model.cls.predictions.bias
388
389
390
391
392
393

    for name, param in dict_pretrained.items():
        check_equal(param, dict_col[name])


def run_model_dist(rank, world_size, port):
394
395
    config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
    colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
Ziyue Jiang's avatar
Ziyue Jiang committed
396
    for name in ['simple_net']:
397
        run_1d_row_tp(name)
398
    for name in ['bert', 'simple_net']:
Ziyue Jiang's avatar
Ziyue Jiang committed
399
        run_1d_hybrid_tp(name)
400

401

402
@pytest.mark.dist
Ziyue Jiang's avatar
Ziyue Jiang committed
403
@pytest.mark.parametrize('world_size', [1, 4])
404
# @parameterize('world_size', [1, 4])
405
@rerun_if_address_is_in_use()
406
def test_model(world_size):
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
    run_func = partial(run_model_dist, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)


def run_pretrain_load_dist(rank, world_size, port):
    config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
    colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    _run_pretrain_load()


# The test case has to download huggingface pretrained models from the internet
# So we manually trigger the test.
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
@rerun_if_address_is_in_use()
def _test_pretrain_load(world_size):
    run_func = partial(run_pretrain_load_dist, world_size=world_size, port=free_port())
424
425
    mp.spawn(run_func, nprocs=world_size)

426
427

if __name__ == '__main__':
428
    # test_model_parameters()
429
    # test_colo_optimizer()
430
431
    test_model(4)
    # _test_pretrain_load(4)