test_gpt2.py 4.38 KB
Newer Older
ver217's avatar
ver217 committed
1
import pytest
2

3
from functools import partial
4
from tests.test_tensor.common_utils import tensor_equal, tensor_shard_equal, set_seed
5
6
7

import torch
from torch.nn.parallel import DistributedDataParallel as DDP
ver217's avatar
ver217 committed
8
import torch.multiprocessing as mp
9
10

import colossalai
ver217's avatar
ver217 committed
11
12
13
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
14
from colossalai.utils.model.colo_init_context import ColoInitContext
15
from colossalai.tensor import ShardSpec, ComputePattern, ComputeSpec, ProcessGroup, ColoTensor, ColoTensorSpec
16
from colossalai.nn.parallel.data_parallel import ColoDDP
17
from tests.components_to_test.registry import non_distributed_component_funcs
ver217's avatar
ver217 committed
18
19


20
def init_1d_row_spec(model, pg: ProcessGroup):
21
    tensor_spec = (ShardSpec([0], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
22
23
24
25
    for n, p in model.named_parameters():
        p.set_process_group(pg)
        if 'weight' in n and 'ln' not in n:
            p.set_tensor_spec(*tensor_spec)
ver217's avatar
ver217 committed
26
27


28
def init_1d_col_spec(model, pg: ProcessGroup):
29
    spec = (ShardSpec([-1], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
30
31
32
33
34

    for n, p in model.named_parameters():
        p.set_process_group(pg)
        if 'ln' not in n and ('weight' in n or 'bias' in n):
            p.set_tensor_spec(*spec)
ver217's avatar
ver217 committed
35
36


37
def check_param_equal(model, torch_model, pg: ProcessGroup):
ver217's avatar
ver217 committed
38
    for p, torch_p in zip(model.parameters(), torch_model.parameters()):
39
40
41
        assert pg.tp_local_rank() is not None, f"{pg.rank()} {pg.tp_world_size()} {pg._tp_degree} {pg.tp_local_rank()}1"
        assert pg.tp_world_size() is not None
        assert tensor_shard_equal(torch_p, p, pg.tp_local_rank(), pg.tp_world_size())
ver217's avatar
ver217 committed
42
43


44
def check_grad_equal(model, torch_model, pg: ProcessGroup):
ver217's avatar
ver217 committed
45
    for p, torch_p in zip(model.parameters(), torch_model.parameters()):
46
        assert tensor_shard_equal(torch_p.grad, p.grad, pg.tp_local_rank(), pg.tp_world_size())
ver217's avatar
ver217 committed
47
48


49
def run_gpt(init_spec_func, use_ddp):
50
    world_size = torch.distributed.get_world_size()
51
52

    # build a PG with TP and DP hybrid
53
    pg = ProcessGroup(dp_degree=(2 if (use_ddp and world_size >= 2) else 1))
54
55
56
57

    # set seed make processes of the same tp group use the same seed
    # set_seed(pg.tp_local_rank())

58
59
60
    get_components_func = non_distributed_component_funcs.get_callable('gpt2')
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()

61
    # make sure torch_model and model has the same parameter values
ver217's avatar
ver217 committed
62
    with ColoInitContext(device=get_current_device()):
63
        model = model_builder()
ver217's avatar
ver217 committed
64
    model = model.cuda()
65
    torch_model = model_builder().cuda()
66

67
68
    if use_ddp:
        torch_model = DDP(torch_model, device_ids=[pg.rank()], process_group=pg.dp_process_group())
69
        model = ColoDDP(model, process_group=pg)
70

ver217's avatar
ver217 committed
71
72
    for torch_p, p in zip(torch_model.parameters(), model.parameters()):
        torch_p.data.copy_(p)
73

74
    init_spec_func(model, pg)
75

76
77
    check_param_equal(model, torch_model, pg)

78
79
80
81
82
    # close the dropout in eval mode
    model.eval()
    torch_model.eval()
    set_seed(pg.dp_local_rank())
    torch.distributed.barrier()
83
    for i, (input_ids, attn_mask) in enumerate(train_dataloader):
84
85
        colo_input = ColoTensor.from_torch_tensor(input_ids, ColoTensorSpec(pg))
        logits = model(colo_input, attn_mask)
ver217's avatar
ver217 committed
86
        torch_logits = torch_model(input_ids, attn_mask)
87
        assert tensor_equal(torch_logits, logits), f"{torch_logits - logits}"
ver217's avatar
ver217 committed
88
89
        loss = criterion(logits, input_ids)
        torch_loss = criterion(torch_logits, input_ids)
90
91
92
93
        if use_ddp:
            model.backward(loss)
        else:
            loss.backward()
ver217's avatar
ver217 committed
94
        torch_loss.backward()
95
        check_grad_equal(model, torch_model, pg)
96
97
        if i > 0:
            break
98
    set_seed(313)
ver217's avatar
ver217 committed
99
100


101
102
103
def run_dist(rank, world_size, port, use_ddp):
    if use_ddp and world_size == 1:
        return
104
    colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
105
    run_gpt(init_1d_row_spec, use_ddp)
106
    run_gpt(init_1d_col_spec, use_ddp)
ver217's avatar
ver217 committed
107
108
109
110


@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
111
@pytest.mark.parametrize('use_ddp', [False, True])
ver217's avatar
ver217 committed
112
@rerun_if_address_is_in_use()
113
114
def test_gpt(world_size, use_ddp):
    run_func = partial(run_dist, world_size=world_size, port=free_port(), use_ddp=use_ddp)
ver217's avatar
ver217 committed
115
116
117
118
    mp.spawn(run_func, nprocs=world_size)


if __name__ == '__main__':
119
    test_gpt(4, use_ddp=True)