"applications/vscode:/vscode.git/clone" did not exist on "842768a1749bf3d9961a48d2bf96ca5abef7d2da"
test_model.py 13.5 KB
Newer Older
1
2
3
4
from tests.components_to_test.registry import non_distributed_component_funcs

import colossalai
import pytest
5
import torch
6
7
8
9
10
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.utils import ColoInitContext
11
from colossalai.tensor import named_params_with_colotensor, TensorSpec, ComputePattern, ParallelAction, ColoTensor, ColoOptimizer
12
from colossalai.context import ParallelMode
13
from colossalai.core import global_context as gpc
14
15

from functools import partial
16
17
18
import random
import os
import numpy as np
19

20
21
22
23
# Hack huggingface Bert ModelOutput
# Make it available to our ColoTensor
from transformers.file_utils import ModelOutput
from dataclasses import fields
24
25
26


def _post_init_colo(self):
27
28
    class_fields = fields(self)
    # Safety and consistency checks
29
    if len(class_fields) == 0:
30
31
32
33
34
35
36
37
38
39
40
41
42
        raise ValueError(f"{self.__class__.__name__} has no fields.")
    if not all(field.default is None for field in class_fields[1:]):
        raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")

    first_field = getattr(self, class_fields[0].name)
    other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])

    def is_tensor_with_colo(x):
        """
        Tests if `x` is a `ColoTensor` or `torch.Tensor`.
        """
        if isinstance(x, torch.Tensor):
            return True
43

44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
        return isinstance(x, ColoTensor)

    if other_fields_are_none and not is_tensor_with_colo(first_field):
        if isinstance(first_field, dict):
            iterator = first_field.items()
            first_field_iterator = True
        else:
            try:
                iterator = iter(first_field)
                first_field_iterator = True
            except TypeError:
                first_field_iterator = False

        # if we provided an iterator as first field and the iterator is a (key, value) iterator
        # set the associated fields
        if first_field_iterator:
            for element in iterator:
61
                if (not isinstance(element, (list, tuple)) or not len(element) == 2 or not isinstance(element[0], str)):
62
63
64
65
66
67
68
69
70
71
72
73
                    break
                setattr(self, element[0], element[1])
                if element[1] is not None:
                    self[element[0]] = element[1]
        elif first_field is not None:
            self[class_fields[0].name] = first_field
    else:
        for field in class_fields:
            v = getattr(self, field.name)
            if v is not None:
                self[field.name] = v

74
75

ModelOutput.__post_init__ = _post_init_colo
76
# complete the hack
77

78

79
80
81
82
83
84
85
86
def set_seed(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True

87

Ziyue Jiang's avatar
Ziyue Jiang committed
88
def run_1d_hybrid_tp(model_name):
89
    # A simple net with two stacked nn.Linear
90
    get_components_func = non_distributed_component_funcs.get_callable(model_name)
91
92
93
94
95
96
97
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
    rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)

    set_seed(1)
    with ColoInitContext(device=get_current_device()):
        model = model_builder(checkpoint=True)

98
    if 'bert' == model_name:
Ziyue Jiang's avatar
Ziyue Jiang committed
99
        parallel_action_list_row = [
100
            ParallelAction(priority=1,
Ziyue Jiang's avatar
Ziyue Jiang committed
101
                           compute_pattern=ComputePattern.TP1DRow_Linear,
102
103
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
Ziyue Jiang's avatar
Ziyue Jiang committed
104
        spec_linear_row = TensorSpec(parallel_action_list_row)
105
106
107
108
109
110
111
112

        parallel_action_list_embedding_col = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DCol_Embedding,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_embedding_col = TensorSpec(parallel_action_list_embedding_col)

Ziyue Jiang's avatar
Ziyue Jiang committed
113
114
115
116
117
118
119
        parallel_action_list_embedding_row = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DRow_Embedding,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_embedding_row = TensorSpec(parallel_action_list_embedding_row)

120
121
122
123
        for name, p in model.colo_named_parameters():
            if not isinstance(p, ColoTensor):
                continue
            #print(name)
Ziyue Jiang's avatar
Ziyue Jiang committed
124
125
126
127
128
129
130
131
132
133
134
            # num_class = type_vocab_size = 2 | (8, 2)
            if 'classifier' in name and 'weight' in name:
                p.set_spec(spec_linear_row)
            # num_class = vocab_size = 30524 | (30524, 8)
            if 'word_embeddings' in name and 'weight' in name:
                p.set_spec(spec_embedding_row)
            # num_class = seq_len = 512 | (512, 8)
            if 'position_embeddings' in name and 'weight' in name:
                p.set_spec(spec_embedding_row)
            # num_class = type_vocab_size = 2 | (2, 8)
            if 'token_type_embeddings' in name and 'weight' in name:
135
136
137
138
139
140
141
142
143
144
145
146
                p.set_spec(spec_embedding_col)
    elif "simple_net" == model_name:
        parallel_action_list_row = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DRow_Linear,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_row = TensorSpec(parallel_action_list_row)

        parallel_action_list_col = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DCol_Linear,
147
                           parallel_mode=ParallelMode.PARALLEL_1D),
148
149
150
        ]
        spec_col = TensorSpec(parallel_action_list_col)

151
152
153
154
155
156
157
158
        parallel_action_list_classifier_col = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DCol_Linear,
                           parallel_mode=ParallelMode.PARALLEL_1D,
                           gather_out=False),
        ]
        spec_classifier_col = TensorSpec(parallel_action_list_classifier_col)

159
160
161
162
163
164
165
166
167
168
        parallel_action_list_embedding_col = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DCol_Embedding,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_embedding_col = TensorSpec(parallel_action_list_embedding_col)
        # A naive way to set spec for all weights in Linear
        for name, p in model.colo_named_parameters():
            if not isinstance(p, ColoTensor):
                continue
169
170
            if 'embed' in name and 'weight' in name:
                p.set_spec(spec_embedding_col)
171
172
173
174
            if 'proj1' in name and ('weight' in name or 'bias' in name):
                p.set_spec(spec_col)
            if 'proj2' in name and 'weight' in name:
                p.set_spec(spec_row)
175
176
            if 'classifier' in name and ('weight' in name or 'bias' in name):
                p.set_spec(spec_classifier_col)
177

178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
    set_seed(1)
    if rank == 0:
        model_torch = model_builder(checkpoint=True)
        model_torch = model_torch.cuda()

    model = model.cuda()

    for i, (data, label) in enumerate(train_dataloader):
        data = data.to(get_current_device())
        label = label.to(get_current_device())

        torch.distributed.broadcast(data, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
        torch.distributed.broadcast(label, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))

        # Bcast rank0 data to all processes
        if criterion:
            output = model(data)
            loss = criterion(output, label)
        else:
            output = model(data, label)
            loss = output

        # For reference
        if rank == 0:
            if criterion:
                output_torch = model_torch(data)
                loss_torch = criterion(output_torch, label)
            else:
                output_torch = model_torch(data, label)
                loss_torch = output_torch

        if rank == 0:
            # print(loss.torch_tensor().item())
            # print('loss torch', loss_torch.item())
            assert torch.allclose(loss.torch_tensor(), loss_torch, rtol=1e-2)

        loss.backward()

        if rank == 0:
            loss_torch.backward()
        if i > 5:
            break
220

221

222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
# Test the overrided parameters() and named_parameters() member functions
def test_model_parameters():
    # build a module with 2 Linear, 4 parameters in total.
    class Net(torch.nn.Module):

        def __init__(self):
            super().__init__()
            self.fcs = torch.nn.Sequential(torch.nn.Linear(2, 3), torch.nn.Linear(3, 2))
            self.extra_param = torch.nn.Parameter(torch.randn(2))

    with ColoInitContext(device=get_current_device()):
        model = Net()

    param_cnt = 0
    for name, p in model.named_parameters():
        param_cnt += 1
    assert param_cnt == 5

240
241
242
    for name, colo_p in model.colo_named_parameters():
        assert colo_p.is_model_data()

243
244
245
246
247
248
249
250
251
252
253
    param_cnt = 0
    for name, p in model.named_parameters(recurse=False):
        param_cnt += 1
    assert param_cnt == 1

    param_cnt = 0
    for p in model.fcs[0].parameters(recurse=False):
        param_cnt += 1
    assert param_cnt == 2


254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
def test_colo_optimizer():
    get_components_func = non_distributed_component_funcs.get_callable('simple_net')
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
    set_seed(1)
    with ColoInitContext(lazy_memory_allocate=False, device=get_current_device()):
        model = model_builder(checkpoint=True)

    colo_optimizer = ColoOptimizer(dict(model.named_parameters()), torch.optim.SGD, lr=0.1)
    for i, (data, label) in enumerate(train_dataloader):
        colo_optimizer.zero_grad()
        data = data.to(get_current_device())
        label = label.to(get_current_device())

        # Bcast rank0 data to all processes
        if criterion:
            output = model(data)
            loss = criterion(output, label)
        else:
            output = model(data, label)
            loss = output

        loss.backward()
        colo_optimizer.step()

        if i > 5:
            break


282
def run_1d_row_tp(model_name: str):
283
    # A simple net with two stacked nn.Linear
284
    get_components_func = non_distributed_component_funcs.get_callable(model_name)
285
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
286
    rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
287

288
    set_seed(1)
289
    with ColoInitContext(device=get_current_device()):
290
291
        model = model_builder(checkpoint=True)

292
293
294
295
296
    set_seed(1)
    if rank == 0:
        model_torch = model_builder(checkpoint=True)
        model_torch = model_torch.cuda()

297
    parallel_action_list = [
298
299
300
        ParallelAction(priority=1,
                       compute_pattern=ComputePattern.TP1DRow_Linear,
                       parallel_mode=ParallelMode.PARALLEL_1D)
301
302
303
    ]
    spec = TensorSpec(parallel_action_list)

304
    parallel_action_list_embedding_row = [
305
306
307
        ParallelAction(priority=1,
                       compute_pattern=ComputePattern.TP1DRow_Embedding,
                       parallel_mode=ParallelMode.PARALLEL_1D)
308
309
310
    ]
    spec_embedding_row = TensorSpec(parallel_action_list_embedding_row)

311
    # A naive way to set spec for all weights in Linear
312
    for name, p in model.colo_named_parameters():
313
314
315
316
        if not isinstance(p, ColoTensor):
            continue
        if 'weight' in name and 'LayerNorm' not in name and 'ln' not in name and 'embed' not in name:
            p.set_spec(spec)
317
318
        if 'embed' in name and 'weight' in name:
            p.set_spec(spec_embedding_row)
319

320
    model = model.cuda()
321
322

    for i, (data, label) in enumerate(train_dataloader):
323
324
        data = data.to(get_current_device())
        label = label.to(get_current_device())
325

326
327
328
329
        torch.distributed.broadcast(data, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
        torch.distributed.broadcast(label, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))

        # Bcast rank0 data to all processes
330
        if criterion:
331
            output = model(data)
332
333
            loss = criterion(output, label)
        else:
334
            output = model(data, label)
335
336
            loss = output

337
338
339
340
341
342
343
344
345
346
347
348
349
350
        # For reference
        if rank == 0:
            if criterion:
                output_torch = model_torch(data)
                loss_torch = criterion(output_torch, label)
            else:
                output_torch = model_torch(data, label)
                loss_torch = output_torch

        if rank == 0:
            # print(loss.torch_tensor().item())
            # print('loss torch', loss_torch.item())
            assert torch.allclose(loss.torch_tensor(), loss_torch, rtol=1e-2)

351
352
        loss.backward()

353
354
        if rank == 0:
            loss_torch.backward()
355
356
357
358
359
360
361
        if i > 5:
            break


def run_dist(rank, world_size, port):
    config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
    colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
Ziyue Jiang's avatar
Ziyue Jiang committed
362
    for name in ['simple_net']:
363
        run_1d_row_tp(name)
364
    for name in ['bert', 'simple_net']:
Ziyue Jiang's avatar
Ziyue Jiang committed
365
        run_1d_hybrid_tp(name)
366
367


368
@pytest.mark.dist
Ziyue Jiang's avatar
Ziyue Jiang committed
369
370
@pytest.mark.parametrize('world_size', [1, 4])
#@parameterize('world_size', [1, 4])
371
@rerun_if_address_is_in_use()
372
373
def test_model(world_size):
    run_func = partial(run_dist, world_size=world_size, port=free_port())
374
375
    mp.spawn(run_func, nprocs=world_size)

376
377

if __name__ == '__main__':
378
    # test_model_parameters()
379
    # test_colo_optimizer()
380
    test_model()