test_model.py 13.6 KB
Newer Older
1
2
3
4
from tests.components_to_test.registry import non_distributed_component_funcs

import colossalai
import pytest
5
import torch
6
7
8
9
10
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.utils import ColoInitContext
11
from colossalai.tensor import named_params_with_colotensor, TensorSpec, ComputePattern, ParallelAction, ColoTensor, ColoOptimizer
12
from colossalai.context import ParallelMode
13
from colossalai.core import global_context as gpc
14
15

from functools import partial
16
17
18
import random
import os
import numpy as np
19

20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# Hack huggingface Bert ModelOutput
# Make it available to our ColoTensor
from transformers.file_utils import ModelOutput
from dataclasses import fields
def post_init_colo(self):
    class_fields = fields(self)
    # Safety and consistency checks
    if not len(class_fields):
        raise ValueError(f"{self.__class__.__name__} has no fields.")
    if not all(field.default is None for field in class_fields[1:]):
        raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")

    first_field = getattr(self, class_fields[0].name)
    other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])

    def is_tensor_with_colo(x):
        """
        Tests if `x` is a `ColoTensor` or `torch.Tensor`.
        """
        if isinstance(x, torch.Tensor):
            return True
    
        return isinstance(x, ColoTensor)

    if other_fields_are_none and not is_tensor_with_colo(first_field):
        if isinstance(first_field, dict):
            iterator = first_field.items()
            first_field_iterator = True
        else:
            try:
                iterator = iter(first_field)
                first_field_iterator = True
            except TypeError:
                first_field_iterator = False

        # if we provided an iterator as first field and the iterator is a (key, value) iterator
        # set the associated fields
        if first_field_iterator:
            for element in iterator:
                if (
                    not isinstance(element, (list, tuple))
                    or not len(element) == 2
                    or not isinstance(element[0], str)
                ):
                    break
                setattr(self, element[0], element[1])
                if element[1] is not None:
                    self[element[0]] = element[1]
        elif first_field is not None:
            self[class_fields[0].name] = first_field
    else:
        for field in class_fields:
            v = getattr(self, field.name)
            if v is not None:
                self[field.name] = v

ModelOutput.__post_init__ = post_init_colo
# complete the hack
78

79
80
81
82
83
84
85
86
def set_seed(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True

87

88
89
90
91
92
93
94
95
96
97
98
def run_1d_col_tp():
    # A simple net with two stacked nn.Linear
    get_components_func = non_distributed_component_funcs.get_callable('simple_net')
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
    rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)

    set_seed(1)
    with ColoInitContext(device=get_current_device()):
        model = model_builder(checkpoint=True)

    parallel_action_list_row = [
99
100
101
        ParallelAction(priority=1,
                       compute_pattern=ComputePattern.TP1DRow_Linear,
                       parallel_mode=ParallelMode.PARALLEL_1D)
102
103
104
105
    ]
    spec_row = TensorSpec(parallel_action_list_row)

    parallel_action_list_col = [
106
107
108
        ParallelAction(priority=1,
                       compute_pattern=ComputePattern.TP1DCol_Linear,
                       parallel_mode=ParallelMode.PARALLEL_1D)
109
110
111
    ]
    spec_col = TensorSpec(parallel_action_list_col)

112
    parallel_action_list_embedding_col = [
113
114
115
        ParallelAction(priority=1,
                       compute_pattern=ComputePattern.TP1DCol_Embedding,
                       parallel_mode=ParallelMode.PARALLEL_1D)
116
117
118
    ]
    spec_embedding_col = TensorSpec(parallel_action_list_embedding_col)

119
120
121
122
123
124
    set_seed(1)
    if rank == 0:
        model_torch = model_builder(checkpoint=True)
        model_torch = model_torch.cuda()

    # A naive way to set spec for all weights in Linear
125
    for name, p in model.colo_named_parameters():
126
127
128
129
130
131
        if not isinstance(p, ColoTensor):
            continue
        if 'proj1' in name and ('weight' in name or 'bias' in name):
            p.set_spec(spec_col)
        if 'proj2' in name and 'weight' in name:
            p.set_spec(spec_row)
132
133
        if 'embed' in name and 'weight' in name:
            p.set_spec(spec_embedding_col)
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171

    model = model.cuda()

    for i, (data, label) in enumerate(train_dataloader):
        data = data.to(get_current_device())
        label = label.to(get_current_device())

        torch.distributed.broadcast(data, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
        torch.distributed.broadcast(label, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))

        # Bcast rank0 data to all processes
        if criterion:
            output = model(data)
            loss = criterion(output, label)
        else:
            output = model(data, label)
            loss = output

        # For reference
        if rank == 0:
            if criterion:
                output_torch = model_torch(data)
                loss_torch = criterion(output_torch, label)
            else:
                output_torch = model_torch(data, label)
                loss_torch = output_torch

        if rank == 0:
            # print(loss.torch_tensor().item())
            # print('loss torch', loss_torch.item())
            assert torch.allclose(loss.torch_tensor(), loss_torch, rtol=1e-2)

        loss.backward()

        if rank == 0:
            loss_torch.backward()
        if i > 5:
            break
172

173

174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
# Test the overrided parameters() and named_parameters() member functions
def test_model_parameters():
    # build a module with 2 Linear, 4 parameters in total.
    class Net(torch.nn.Module):

        def __init__(self):
            super().__init__()
            self.fcs = torch.nn.Sequential(torch.nn.Linear(2, 3), torch.nn.Linear(3, 2))
            self.extra_param = torch.nn.Parameter(torch.randn(2))

    with ColoInitContext(device=get_current_device()):
        model = Net()

    param_cnt = 0
    for name, p in model.named_parameters():
        param_cnt += 1
    assert param_cnt == 5

192
193
194
    for name, colo_p in model.colo_named_parameters():
        assert colo_p.is_model_data()

195
196
197
198
199
200
201
202
203
204
205
    param_cnt = 0
    for name, p in model.named_parameters(recurse=False):
        param_cnt += 1
    assert param_cnt == 1

    param_cnt = 0
    for p in model.fcs[0].parameters(recurse=False):
        param_cnt += 1
    assert param_cnt == 2


206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
def test_colo_optimizer():
    get_components_func = non_distributed_component_funcs.get_callable('simple_net')
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
    set_seed(1)
    with ColoInitContext(lazy_memory_allocate=False, device=get_current_device()):
        model = model_builder(checkpoint=True)

    colo_optimizer = ColoOptimizer(dict(model.named_parameters()), torch.optim.SGD, lr=0.1)
    for i, (data, label) in enumerate(train_dataloader):
        colo_optimizer.zero_grad()
        data = data.to(get_current_device())
        label = label.to(get_current_device())

        # Bcast rank0 data to all processes
        if criterion:
            output = model(data)
            loss = criterion(output, label)
        else:
            output = model(data, label)
            loss = output

        loss.backward()
        colo_optimizer.step()

        if i > 5:
            break


234
def run_1d_row_tp():
235
236
237
    # A simple net with two stacked nn.Linear
    get_components_func = non_distributed_component_funcs.get_callable('simple_net')
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
238
    rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
239

240
    set_seed(1)
241
    with ColoInitContext(device=get_current_device()):
242
243
        model = model_builder(checkpoint=True)

244
    parallel_action_list = [
245
246
247
        ParallelAction(priority=1,
                       compute_pattern=ComputePattern.TP1DRow_Linear,
                       parallel_mode=ParallelMode.PARALLEL_1D)
248
249
250
    ]
    spec = TensorSpec(parallel_action_list)

251
    parallel_action_list_embedding_row = [
252
253
254
        ParallelAction(priority=1,
                       compute_pattern=ComputePattern.TP1DRow_Embedding,
                       parallel_mode=ParallelMode.PARALLEL_1D)
255
256
257
    ]
    spec_embedding_row = TensorSpec(parallel_action_list_embedding_row)

258
259
260
261
262
    set_seed(1)
    if rank == 0:
        model_torch = model_builder(checkpoint=True)
        model_torch = model_torch.cuda()

263
    # A naive way to set spec for all weights in Linear
264
    for name, p in model.colo_named_parameters():
265
266
267
268
        if not isinstance(p, ColoTensor):
            continue
        if 'weight' in name and 'LayerNorm' not in name and 'ln' not in name and 'embed' not in name:
            p.set_spec(spec)
269
270
        if 'embed' in name and 'weight' in name:
            p.set_spec(spec_embedding_row)
271

272
    model = model.cuda()
273
274

    for i, (data, label) in enumerate(train_dataloader):
275
276
        data = data.to(get_current_device())
        label = label.to(get_current_device())
277

278
279
280
281
        torch.distributed.broadcast(data, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
        torch.distributed.broadcast(label, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))

        # Bcast rank0 data to all processes
282
        if criterion:
283
            output = model(data)
284
285
            loss = criterion(output, label)
        else:
286
            output = model(data, label)
287
288
            loss = output

289
290
291
292
293
294
295
296
297
298
299
300
301
302
        # For reference
        if rank == 0:
            if criterion:
                output_torch = model_torch(data)
                loss_torch = criterion(output_torch, label)
            else:
                output_torch = model_torch(data, label)
                loss_torch = output_torch

        if rank == 0:
            # print(loss.torch_tensor().item())
            # print('loss torch', loss_torch.item())
            assert torch.allclose(loss.torch_tensor(), loss_torch, rtol=1e-2)

303
304
        loss.backward()

305
306
        if rank == 0:
            loss_torch.backward()
307
308
309
        if i > 5:
            break

310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
def run_bert_1d():
    get_components_func = non_distributed_component_funcs.get_callable('bert')
    model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func()
    device = get_current_device()
    
    set_seed(1)
    with ColoInitContext(device=device):
        model = model_builder(checkpoint=True)
    
    # parallel_action_list_row = [
    #     ParallelAction(priority=1, compute_pattern=ComputePattern.TP1DRow_Linear, parallel_mode=ParallelMode.PARALLEL_1D)
    # ]
    # spec_row = TensorSpec(parallel_action_list_row)

    parallel_action_list_col = [
        ParallelAction(priority=1, compute_pattern=ComputePattern.TP1DCol_Linear, parallel_mode=ParallelMode.PARALLEL_1D)
    ]
    spec_col = TensorSpec(parallel_action_list_col)

    parallel_action_list_embedding_col = [
        ParallelAction(priority=1, compute_pattern=ComputePattern.TP1DCol_Embedding, parallel_mode=ParallelMode.PARALLEL_1D)
    ]
    spec_embedding_col = TensorSpec(parallel_action_list_embedding_col)

    for name, p in model.colo_named_parameters():
        if not isinstance(p, ColoTensor):
            continue
        #print(name)
        if 'classifier' in name and ('weight' in name or 'bias' in name):
            p.set_spec(spec_col)
        if '_embeddings' in name and 'weight' in name:
            p.set_spec(spec_embedding_col)
    # for name, p in model.colo_named_parameters():
    #     if not isinstance(p, ColoTensor):
    #         continue
    #     print(f"{name}: is_gathered {p.is_gathered()}")

    model = model.cuda()

    for i, (data, label) in enumerate(train_dataloader):
        if i > 5:
            break
        data = data.to(device)
        label = label.to(device)

        model.train()
        if criterion:
            output = model(data)
            loss = criterion(output, label)
        else:
            output = model(data, label)
            loss = output

        loss.backward()
364
365
366
367

def run_dist(rank, world_size, port):
    config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
    colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
368
    run_1d_row_tp()
369
    run_1d_col_tp()
370

371
372
373
374
def run_dist_bert(rank, world_size, port):
    config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
    colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    run_bert_1d()
375

376
@pytest.mark.dist
377
@pytest.mark.parametrize('world_size', [1, 4])
378
379
380
381
382
@rerun_if_address_is_in_use()
def test_simple_net(world_size):
    run_func = partial(run_dist, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)

383
384
385
386
387
388
389
390
391
@pytest.mark.dist
#@pytest.mark.parametrize('world_size', [1, 4])
#Don't really add it to pytest now. After finishing Classifier and Loss, I(jzy) will remove this annotation.
@parameterize('world_size', [1])
@rerun_if_address_is_in_use()
def test_bert(world_size):
    run_func = partial(run_dist_bert, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)

392
393

if __name__ == '__main__':
394
    # test_simple_net()
395
    # test_model_parameters()
396
    # test_colo_optimizer()
397
    test_bert()