test_model.py 12.2 KB
Newer Older
1
2
3
4
from tests.components_to_test.registry import non_distributed_component_funcs

import colossalai
import pytest
5
import torch
6
7
8
9
10
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.utils import ColoInitContext
11
from colossalai.tensor import named_params_with_colotensor, TensorSpec, ComputePattern, ParallelAction, ColoTensor, ColoOptimizer
12
from colossalai.context import ParallelMode
13
from colossalai.core import global_context as gpc
14
15

from functools import partial
16
17
18
import random
import os
import numpy as np
19

20
21
22
23
# Hack huggingface Bert ModelOutput
# Make it available to our ColoTensor
from transformers.file_utils import ModelOutput
from dataclasses import fields
24
25
26


def _post_init_colo(self):
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
    class_fields = fields(self)
    # Safety and consistency checks
    if not len(class_fields):
        raise ValueError(f"{self.__class__.__name__} has no fields.")
    if not all(field.default is None for field in class_fields[1:]):
        raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")

    first_field = getattr(self, class_fields[0].name)
    other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])

    def is_tensor_with_colo(x):
        """
        Tests if `x` is a `ColoTensor` or `torch.Tensor`.
        """
        if isinstance(x, torch.Tensor):
            return True
43

44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
        return isinstance(x, ColoTensor)

    if other_fields_are_none and not is_tensor_with_colo(first_field):
        if isinstance(first_field, dict):
            iterator = first_field.items()
            first_field_iterator = True
        else:
            try:
                iterator = iter(first_field)
                first_field_iterator = True
            except TypeError:
                first_field_iterator = False

        # if we provided an iterator as first field and the iterator is a (key, value) iterator
        # set the associated fields
        if first_field_iterator:
            for element in iterator:
61
                if (not isinstance(element, (list, tuple)) or not len(element) == 2 or not isinstance(element[0], str)):
62
63
64
65
66
67
68
69
70
71
72
73
                    break
                setattr(self, element[0], element[1])
                if element[1] is not None:
                    self[element[0]] = element[1]
        elif first_field is not None:
            self[class_fields[0].name] = first_field
    else:
        for field in class_fields:
            v = getattr(self, field.name)
            if v is not None:
                self[field.name] = v

74
75

ModelOutput.__post_init__ = _post_init_colo
76
# complete the hack
77

78

79
80
81
82
83
84
85
86
def set_seed(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True

87

88
def run_1d_col_tp(model_name):
89
    # A simple net with two stacked nn.Linear
90
    get_components_func = non_distributed_component_funcs.get_callable(model_name)
91
92
93
94
95
96
97
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
    rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)

    set_seed(1)
    with ColoInitContext(device=get_current_device()):
        model = model_builder(checkpoint=True)

98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
    if 'bert' == model_name:
        parallel_action_list_col = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DCol_Linear,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_col = TensorSpec(parallel_action_list_col)

        parallel_action_list_embedding_col = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DCol_Embedding,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_embedding_col = TensorSpec(parallel_action_list_embedding_col)

        for name, p in model.colo_named_parameters():
            if not isinstance(p, ColoTensor):
                continue
            #print(name)
            if 'classifier' in name and ('weight' in name or 'bias' in name):
                p.set_spec(spec_col)
            if '_embeddings' in name and 'weight' in name:
                p.set_spec(spec_embedding_col)
    elif "simple_net" == model_name:
        parallel_action_list_row = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DRow_Linear,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_row = TensorSpec(parallel_action_list_row)

        parallel_action_list_col = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DCol_Linear,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_col = TensorSpec(parallel_action_list_col)

        parallel_action_list_embedding_col = [
            ParallelAction(priority=1,
                           compute_pattern=ComputePattern.TP1DCol_Embedding,
                           parallel_mode=ParallelMode.PARALLEL_1D)
        ]
        spec_embedding_col = TensorSpec(parallel_action_list_embedding_col)
        # A naive way to set spec for all weights in Linear
        for name, p in model.colo_named_parameters():
            if not isinstance(p, ColoTensor):
                continue
            if 'proj1' in name and ('weight' in name or 'bias' in name):
                p.set_spec(spec_col)
            if 'proj2' in name and 'weight' in name:
                p.set_spec(spec_row)
            if 'embed' in name and 'weight' in name:
                p.set_spec(spec_embedding_col)
152

153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
    set_seed(1)
    if rank == 0:
        model_torch = model_builder(checkpoint=True)
        model_torch = model_torch.cuda()

    model = model.cuda()

    for i, (data, label) in enumerate(train_dataloader):
        data = data.to(get_current_device())
        label = label.to(get_current_device())

        torch.distributed.broadcast(data, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
        torch.distributed.broadcast(label, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))

        # Bcast rank0 data to all processes
        if criterion:
            output = model(data)
            loss = criterion(output, label)
        else:
            output = model(data, label)
            loss = output

        # For reference
        if rank == 0:
            if criterion:
                output_torch = model_torch(data)
                loss_torch = criterion(output_torch, label)
            else:
                output_torch = model_torch(data, label)
                loss_torch = output_torch

        if rank == 0:
            # print(loss.torch_tensor().item())
            # print('loss torch', loss_torch.item())
            assert torch.allclose(loss.torch_tensor(), loss_torch, rtol=1e-2)

        loss.backward()

        if rank == 0:
            loss_torch.backward()
        if i > 5:
            break
195

196

197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
# Test the overrided parameters() and named_parameters() member functions
def test_model_parameters():
    # build a module with 2 Linear, 4 parameters in total.
    class Net(torch.nn.Module):

        def __init__(self):
            super().__init__()
            self.fcs = torch.nn.Sequential(torch.nn.Linear(2, 3), torch.nn.Linear(3, 2))
            self.extra_param = torch.nn.Parameter(torch.randn(2))

    with ColoInitContext(device=get_current_device()):
        model = Net()

    param_cnt = 0
    for name, p in model.named_parameters():
        param_cnt += 1
    assert param_cnt == 5

215
216
217
    for name, colo_p in model.colo_named_parameters():
        assert colo_p.is_model_data()

218
219
220
221
222
223
224
225
226
227
228
    param_cnt = 0
    for name, p in model.named_parameters(recurse=False):
        param_cnt += 1
    assert param_cnt == 1

    param_cnt = 0
    for p in model.fcs[0].parameters(recurse=False):
        param_cnt += 1
    assert param_cnt == 2


229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
def test_colo_optimizer():
    get_components_func = non_distributed_component_funcs.get_callable('simple_net')
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
    set_seed(1)
    with ColoInitContext(lazy_memory_allocate=False, device=get_current_device()):
        model = model_builder(checkpoint=True)

    colo_optimizer = ColoOptimizer(dict(model.named_parameters()), torch.optim.SGD, lr=0.1)
    for i, (data, label) in enumerate(train_dataloader):
        colo_optimizer.zero_grad()
        data = data.to(get_current_device())
        label = label.to(get_current_device())

        # Bcast rank0 data to all processes
        if criterion:
            output = model(data)
            loss = criterion(output, label)
        else:
            output = model(data, label)
            loss = output

        loss.backward()
        colo_optimizer.step()

        if i > 5:
            break


257
def run_1d_row_tp(model_name: str):
258
    # A simple net with two stacked nn.Linear
259
    get_components_func = non_distributed_component_funcs.get_callable(model_name)
260
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
261
    rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
262

263
    set_seed(1)
264
    with ColoInitContext(device=get_current_device()):
265
266
        model = model_builder(checkpoint=True)

267
268
269
270
271
    set_seed(1)
    if rank == 0:
        model_torch = model_builder(checkpoint=True)
        model_torch = model_torch.cuda()

272
    parallel_action_list = [
273
274
275
        ParallelAction(priority=1,
                       compute_pattern=ComputePattern.TP1DRow_Linear,
                       parallel_mode=ParallelMode.PARALLEL_1D)
276
277
278
    ]
    spec = TensorSpec(parallel_action_list)

279
    parallel_action_list_embedding_row = [
280
281
282
        ParallelAction(priority=1,
                       compute_pattern=ComputePattern.TP1DRow_Embedding,
                       parallel_mode=ParallelMode.PARALLEL_1D)
283
284
285
    ]
    spec_embedding_row = TensorSpec(parallel_action_list_embedding_row)

286
    # A naive way to set spec for all weights in Linear
287
    for name, p in model.colo_named_parameters():
288
289
290
291
        if not isinstance(p, ColoTensor):
            continue
        if 'weight' in name and 'LayerNorm' not in name and 'ln' not in name and 'embed' not in name:
            p.set_spec(spec)
292
293
        if 'embed' in name and 'weight' in name:
            p.set_spec(spec_embedding_row)
294

295
    model = model.cuda()
296
297

    for i, (data, label) in enumerate(train_dataloader):
298
299
        data = data.to(get_current_device())
        label = label.to(get_current_device())
300

301
302
303
304
        torch.distributed.broadcast(data, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
        torch.distributed.broadcast(label, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))

        # Bcast rank0 data to all processes
305
        if criterion:
306
            output = model(data)
307
308
            loss = criterion(output, label)
        else:
309
            output = model(data, label)
310
311
            loss = output

312
313
314
315
316
317
318
319
320
321
322
323
324
325
        # For reference
        if rank == 0:
            if criterion:
                output_torch = model_torch(data)
                loss_torch = criterion(output_torch, label)
            else:
                output_torch = model_torch(data, label)
                loss_torch = output_torch

        if rank == 0:
            # print(loss.torch_tensor().item())
            # print('loss torch', loss_torch.item())
            assert torch.allclose(loss.torch_tensor(), loss_torch, rtol=1e-2)

326
327
        loss.backward()

328
329
        if rank == 0:
            loss_torch.backward()
330
331
332
333
334
335
336
        if i > 5:
            break


def run_dist(rank, world_size, port):
    config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
    colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
337
338
339
    for name in ['bert', 'simple_net']:
        run_1d_row_tp(name)
        run_1d_col_tp(name)
340
341


342
@pytest.mark.dist
343
344
# FIXME(jzy) world size = 4 will fialed
# @pytest.mark.parametrize('world_size', [4])
345
346
@parameterize('world_size', [1])
@rerun_if_address_is_in_use()
347
348
def test_model(world_size):
    run_func = partial(run_dist, world_size=world_size, port=free_port())
349
350
    mp.spawn(run_func, nprocs=world_size)

351
352

if __name__ == '__main__':
353
    # test_model_parameters()
354
    # test_colo_optimizer()
355
    test_model()