test_model.py 3.59 KB
Newer Older
1
2
3
4
from tests.components_to_test.registry import non_distributed_component_funcs

import colossalai
import pytest
5
import torch
6
7
8
9
10
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.utils import ColoInitContext
11
12
from colossalai.tensor import named_params_with_colotensor, TensorSpec, ComputePattern, ParallelAction, ColoTensor
from colossalai.context import ParallelMode
13
from colossalai.core import global_context as gpc
14
15

from functools import partial
16
17
18
import random
import os
import numpy as np
19
20


21
22
23
24
25
26
27
28
29
30
def set_seed(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True


def run_1d_row_tp():
31
32
33
    # A simple net with two stacked nn.Linear
    get_components_func = non_distributed_component_funcs.get_callable('simple_net')
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
34
    rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
35

36
    set_seed(1)
37
    with ColoInitContext(device=get_current_device()):
38
39
        model = model_builder(checkpoint=True)

40
41
42
43
44
    parallel_action_list = [
        ParallelAction(priority=1, compute_pattern=ComputePattern.TP1DRow, parallel_mode=ParallelMode.PARALLEL_1D)
    ]
    spec = TensorSpec(parallel_action_list)

45
46
47
48
49
    set_seed(1)
    if rank == 0:
        model_torch = model_builder(checkpoint=True)
        model_torch = model_torch.cuda()

50
51
52
53
54
55
56
    # A naive way to set spec for all weights in Linear
    for name, p in named_params_with_colotensor(model):
        if not isinstance(p, ColoTensor):
            continue
        if 'weight' in name and 'LayerNorm' not in name and 'ln' not in name and 'embed' not in name:
            p.set_spec(spec)

57
    model = model.cuda()
58
59

    for i, (data, label) in enumerate(train_dataloader):
60
61
        data = data.to(get_current_device())
        label = label.to(get_current_device())
62

63
64
65
66
        torch.distributed.broadcast(data, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
        torch.distributed.broadcast(label, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))

        # Bcast rank0 data to all processes
67
        if criterion:
68
            output = model(data)
69
70
            loss = criterion(output, label)
        else:
71
            output = model(data, label)
72
73
            loss = output

74
75
76
77
78
79
80
81
82
83
84
85
86
87
        # For reference
        if rank == 0:
            if criterion:
                output_torch = model_torch(data)
                loss_torch = criterion(output_torch, label)
            else:
                output_torch = model_torch(data, label)
                loss_torch = output_torch

        if rank == 0:
            # print(loss.torch_tensor().item())
            # print('loss torch', loss_torch.item())
            assert torch.allclose(loss.torch_tensor(), loss_torch, rtol=1e-2)

88
89
        loss.backward()

90
91
        if rank == 0:
            loss_torch.backward()
92
93
94
95
96
97
98
        if i > 5:
            break


def run_dist(rank, world_size, port):
    config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
    colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
99
    run_1d_row_tp()
100
101
102
103
104
105
106
107
108
109
110
111


@pytest.mark.dist
@parameterize('world_size', [1, 4])
@rerun_if_address_is_in_use()
def test_simple_net(world_size):
    run_func = partial(run_dist, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)


if __name__ == '__main__':
    test_simple_net()