test_model.py 13 KB
Newer Older
1
2
3
4
from tests.components_to_test.registry import non_distributed_component_funcs

import colossalai
import pytest
5
import torch
6
7
8
9
10
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.utils import ColoInitContext
11
from colossalai.tensor import named_params_with_colotensor, TensorSpec, ComputePattern, ParallelAction, ColoTensor, ColoOptimizer
12
from colossalai.context import ParallelMode
13
from colossalai.core import global_context as gpc
14
15

from functools import partial
16
17
18
import random
import os
import numpy as np
19

20
21
22
23
# Hack huggingface Bert ModelOutput
# Make it available to our ColoTensor
from transformers.file_utils import ModelOutput
from dataclasses import fields
24
25
26


def _post_init_colo(self):
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
    class_fields = fields(self)
    # Safety and consistency checks
    if not len(class_fields):
        raise ValueError(f"{self.__class__.__name__} has no fields.")
    if not all(field.default is None for field in class_fields[1:]):
        raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")

    first_field = getattr(self, class_fields[0].name)
    other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])

    def is_tensor_with_colo(x):
        """
        Tests if `x` is a `ColoTensor` or `torch.Tensor`.
        """
        if isinstance(x, torch.Tensor):
            return True
43

44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
        return isinstance(x, ColoTensor)

    if other_fields_are_none and not is_tensor_with_colo(first_field):
        if isinstance(first_field, dict):
            iterator = first_field.items()
            first_field_iterator = True
        else:
            try:
                iterator = iter(first_field)
                first_field_iterator = True
            except TypeError:
                first_field_iterator = False

        # if we provided an iterator as first field and the iterator is a (key, value) iterator
        # set the associated fields
        if first_field_iterator:
            for element in iterator:
61
                if (not isinstance(element, (list, tuple)) or not len(element) == 2 or not isinstance(element[0], str)):
62
63
64
65
66
67
68
69
70
71
72
73
                    break
                setattr(self, element[0], element[1])
                if element[1] is not None:
                    self[element[0]] = element[1]
        elif first_field is not None:
            self[class_fields[0].name] = first_field
    else:
        for field in class_fields:
            v = getattr(self, field.name)
            if v is not None:
                self[field.name] = v

74
75

ModelOutput.__post_init__ = _post_init_colo
76
# complete the hack
77

78

79
80
81
82
83
84
85
86
def set_seed(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True

87

Ziyue Jiang's avatar
Ziyue Jiang committed
88
def run_1d_hybrid_tp(model_name):
89
    # A simple net with two stacked nn.Linear
90
    get_components_func = non_distributed_component_funcs.get_callable(model_name)
91
92
93
94
95
96
97
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
    rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)

    set_seed(1)
    with ColoInitContext(device=get_current_device()):
        model = model_builder(checkpoint=True)

98
    if 'bert' == model_name:
Ziyue Jiang's avatar
Ziyue Jiang committed
99
        parallel_action_list_row = [
100
            ParallelAction(priority=1,
Ziyue Jiang's avatar
Ziyue Jiang committed
101
                           compute_pattern=ComputePattern.TP1DRow_Linear,
102
103
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
Ziyue Jiang's avatar
Ziyue Jiang committed
104
        spec_linear_row = TensorSpec(parallel_action_list_row)
105
106
107
108
109
110
111
112

        parallel_action_list_embedding_col = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DCol_Embedding,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_embedding_col = TensorSpec(parallel_action_list_embedding_col)

Ziyue Jiang's avatar
Ziyue Jiang committed
113
114
115
116
117
118
119
        parallel_action_list_embedding_row = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DRow_Embedding,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_embedding_row = TensorSpec(parallel_action_list_embedding_row)

120
121
122
123
        for name, p in model.colo_named_parameters():
            if not isinstance(p, ColoTensor):
                continue
            #print(name)
Ziyue Jiang's avatar
Ziyue Jiang committed
124
125
126
127
128
129
130
131
132
133
134
            # num_class = type_vocab_size = 2 | (8, 2)
            if 'classifier' in name and 'weight' in name:
                p.set_spec(spec_linear_row)
            # num_class = vocab_size = 30524 | (30524, 8)
            if 'word_embeddings' in name and 'weight' in name:
                p.set_spec(spec_embedding_row)
            # num_class = seq_len = 512 | (512, 8)
            if 'position_embeddings' in name and 'weight' in name:
                p.set_spec(spec_embedding_row)
            # num_class = type_vocab_size = 2 | (2, 8)
            if 'token_type_embeddings' in name and 'weight' in name:
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
                p.set_spec(spec_embedding_col)
    elif "simple_net" == model_name:
        parallel_action_list_row = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DRow_Linear,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_row = TensorSpec(parallel_action_list_row)

        parallel_action_list_col = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DCol_Linear,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_col = TensorSpec(parallel_action_list_col)

        parallel_action_list_embedding_col = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DCol_Embedding,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_embedding_col = TensorSpec(parallel_action_list_embedding_col)
        # A naive way to set spec for all weights in Linear
        for name, p in model.colo_named_parameters():
            if not isinstance(p, ColoTensor):
                continue
            if 'proj1' in name and ('weight' in name or 'bias' in name):
                p.set_spec(spec_col)
            if 'proj2' in name and 'weight' in name:
                p.set_spec(spec_row)
            if 'embed' in name and 'weight' in name:
                p.set_spec(spec_embedding_col)
167

168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
    set_seed(1)
    if rank == 0:
        model_torch = model_builder(checkpoint=True)
        model_torch = model_torch.cuda()

    model = model.cuda()

    for i, (data, label) in enumerate(train_dataloader):
        data = data.to(get_current_device())
        label = label.to(get_current_device())

        torch.distributed.broadcast(data, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
        torch.distributed.broadcast(label, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))

        # Bcast rank0 data to all processes
        if criterion:
            output = model(data)
            loss = criterion(output, label)
        else:
            output = model(data, label)
            loss = output

        # For reference
        if rank == 0:
            if criterion:
                output_torch = model_torch(data)
                loss_torch = criterion(output_torch, label)
            else:
                output_torch = model_torch(data, label)
                loss_torch = output_torch

        if rank == 0:
            # print(loss.torch_tensor().item())
            # print('loss torch', loss_torch.item())
            assert torch.allclose(loss.torch_tensor(), loss_torch, rtol=1e-2)

        loss.backward()

        if rank == 0:
            loss_torch.backward()
        if i > 5:
            break
210

211

212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
# Test the overrided parameters() and named_parameters() member functions
def test_model_parameters():
    # build a module with 2 Linear, 4 parameters in total.
    class Net(torch.nn.Module):

        def __init__(self):
            super().__init__()
            self.fcs = torch.nn.Sequential(torch.nn.Linear(2, 3), torch.nn.Linear(3, 2))
            self.extra_param = torch.nn.Parameter(torch.randn(2))

    with ColoInitContext(device=get_current_device()):
        model = Net()

    param_cnt = 0
    for name, p in model.named_parameters():
        param_cnt += 1
    assert param_cnt == 5

230
231
232
    for name, colo_p in model.colo_named_parameters():
        assert colo_p.is_model_data()

233
234
235
236
237
238
239
240
241
242
243
    param_cnt = 0
    for name, p in model.named_parameters(recurse=False):
        param_cnt += 1
    assert param_cnt == 1

    param_cnt = 0
    for p in model.fcs[0].parameters(recurse=False):
        param_cnt += 1
    assert param_cnt == 2


244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
def test_colo_optimizer():
    get_components_func = non_distributed_component_funcs.get_callable('simple_net')
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
    set_seed(1)
    with ColoInitContext(lazy_memory_allocate=False, device=get_current_device()):
        model = model_builder(checkpoint=True)

    colo_optimizer = ColoOptimizer(dict(model.named_parameters()), torch.optim.SGD, lr=0.1)
    for i, (data, label) in enumerate(train_dataloader):
        colo_optimizer.zero_grad()
        data = data.to(get_current_device())
        label = label.to(get_current_device())

        # Bcast rank0 data to all processes
        if criterion:
            output = model(data)
            loss = criterion(output, label)
        else:
            output = model(data, label)
            loss = output

        loss.backward()
        colo_optimizer.step()

        if i > 5:
            break


272
def run_1d_row_tp(model_name: str):
273
    # A simple net with two stacked nn.Linear
274
    get_components_func = non_distributed_component_funcs.get_callable(model_name)
275
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
276
    rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
277

278
    set_seed(1)
279
    with ColoInitContext(device=get_current_device()):
280
281
        model = model_builder(checkpoint=True)

282
283
284
285
286
    set_seed(1)
    if rank == 0:
        model_torch = model_builder(checkpoint=True)
        model_torch = model_torch.cuda()

287
    parallel_action_list = [
288
289
290
        ParallelAction(priority=1,
                       compute_pattern=ComputePattern.TP1DRow_Linear,
                       parallel_mode=ParallelMode.PARALLEL_1D)
291
292
293
    ]
    spec = TensorSpec(parallel_action_list)

294
    parallel_action_list_embedding_row = [
295
296
297
        ParallelAction(priority=1,
                       compute_pattern=ComputePattern.TP1DRow_Embedding,
                       parallel_mode=ParallelMode.PARALLEL_1D)
298
299
300
    ]
    spec_embedding_row = TensorSpec(parallel_action_list_embedding_row)

301
    # A naive way to set spec for all weights in Linear
302
    for name, p in model.colo_named_parameters():
303
304
305
306
        if not isinstance(p, ColoTensor):
            continue
        if 'weight' in name and 'LayerNorm' not in name and 'ln' not in name and 'embed' not in name:
            p.set_spec(spec)
307
308
        if 'embed' in name and 'weight' in name:
            p.set_spec(spec_embedding_row)
309

310
    model = model.cuda()
311
312

    for i, (data, label) in enumerate(train_dataloader):
313
314
        data = data.to(get_current_device())
        label = label.to(get_current_device())
315

316
317
318
319
        torch.distributed.broadcast(data, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
        torch.distributed.broadcast(label, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))

        # Bcast rank0 data to all processes
320
        if criterion:
321
            output = model(data)
322
323
            loss = criterion(output, label)
        else:
324
            output = model(data, label)
325
326
            loss = output

327
328
329
330
331
332
333
334
335
336
337
338
339
340
        # For reference
        if rank == 0:
            if criterion:
                output_torch = model_torch(data)
                loss_torch = criterion(output_torch, label)
            else:
                output_torch = model_torch(data, label)
                loss_torch = output_torch

        if rank == 0:
            # print(loss.torch_tensor().item())
            # print('loss torch', loss_torch.item())
            assert torch.allclose(loss.torch_tensor(), loss_torch, rtol=1e-2)

341
342
        loss.backward()

343
344
        if rank == 0:
            loss_torch.backward()
345
346
347
348
349
350
351
        if i > 5:
            break


def run_dist(rank, world_size, port):
    config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
    colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
Ziyue Jiang's avatar
Ziyue Jiang committed
352
    for name in ['simple_net']:
353
        run_1d_row_tp(name)
Ziyue Jiang's avatar
Ziyue Jiang committed
354
355
    for name in ['bert', 'simple_net']:    
        run_1d_hybrid_tp(name)
356
357


358
@pytest.mark.dist
Ziyue Jiang's avatar
Ziyue Jiang committed
359
360
@pytest.mark.parametrize('world_size', [1, 4])
#@parameterize('world_size', [1, 4])
361
@rerun_if_address_is_in_use()
362
363
def test_model(world_size):
    run_func = partial(run_dist, world_size=world_size, port=free_port())
364
365
    mp.spawn(run_func, nprocs=world_size)

366
367

if __name__ == '__main__':
368
    # test_model_parameters()
369
    # test_colo_optimizer()
370
    test_model()