test_fwd_bwd.py 4.09 KB
Newer Older
1
2
import pytest
import torch
3
import torch.distributed as dist
4
from torch.nn.parallel import DistributedDataParallel as DDP
HELSON's avatar
HELSON committed
5
from torch.testing import assert_close
6
7
8

import colossalai
from colossalai.amp import convert_to_apex_amp
HELSON's avatar
HELSON committed
9
from colossalai.nn.optimizer import HybridAdam
10
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
11
from colossalai.utils.cuda import get_current_device
12
13
14
from colossalai.zero import GeminiDDP, GeminiOptimizer
from colossalai.zero.gemini.chunk import search_chunk_configuration
from tests.components_to_test import run_fwd_bwd
15
from tests.components_to_test.registry import non_distributed_component_funcs
16
from tests.test_tensor.common_utils import set_seed
17

18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
PLACEMENT_CONFIGS = [
    {
        'placement_policy': 'static',
        'shard_param_frac': 0.0
    },    # zero2
    {
        'placement_policy': 'static',
        'shard_param_frac': 1.0
    },    # zero3
    {
        'placement_policy': 'static',
        'shard_param_frac': 0.5
    },    # zero3-half
    {
        'placement_policy': 'auto'
    }
]


def check_grad(model: GeminiDDP, torch_model: torch.nn.Module):
38
39
40
41
42
43
44
    chunk_manager = model.chunk_manager
    param_list = [p for p in model.parameters()]
    chunk_list = chunk_manager.get_chunks(param_list)
    for chunk in chunk_list:
        chunk_manager.access_chunk(chunk)

    for (p0, p1) in zip(model.parameters(), torch_model.parameters()):
HELSON's avatar
HELSON committed
45
        assert_close(p0, p1.grad, rtol=1e-3, atol=5e-5)
46
47


48
@parameterize('placement_config', PLACEMENT_CONFIGS)
49
@parameterize('keep_gather', [False, True])
50
@parameterize('model_name', ['gpt2', 'bert', 'albert'])
51
@parameterize('use_grad_checkpoint', [False, True])
52
def exam_gpt_fwd_bwd(
53
    placement_config,
54
55
56
57
58
    keep_gather,
    model_name: str,
    use_grad_checkpoint: bool = False,
):
    init_device = get_current_device()
59
    get_components_func = non_distributed_component_funcs.get_callable(model_name)
60
61
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()

62
    set_seed(42)
63
    model = model_builder(use_grad_checkpoint)
64

65
    set_seed(42)
66
    torch_model = model_builder(use_grad_checkpoint).cuda()
67
68
69
70
    for torch_p, p in zip(torch_model.parameters(), model.parameters()):
        torch_p.data.copy_(p.data)

    world_size = torch.distributed.get_world_size()
71
    config_dict, *_ = search_chunk_configuration(model, search_range_m=1, search_interval=100)
72
    config_dict[world_size]['chunk_size'] = 5000
73
    config_dict[world_size]['keep_gathered'] = keep_gather
74
    model = GeminiDDP(model, config_dict, init_device, pin_memory=True, **placement_config)
HELSON's avatar
HELSON committed
75
    optimizer = HybridAdam(model.parameters(), lr=1e-3)
76
    zero_optim = GeminiOptimizer(optimizer, model, initial_scale=1)
77

78
    rank = dist.get_rank()
79
80
81
    amp_config = dict(opt_level='O2', keep_batchnorm_fp32=False, loss_scale=1)
    torch_optim = torch.optim.Adam(torch_model.parameters(), lr=1e-3)
    torch_model, torch_optim = convert_to_apex_amp(torch_model, torch_optim, amp_config)
82
    torch_model = DDP(torch_model, device_ids=[rank])
83

84
    set_seed(rank)
85
    for i, (input_ids, label) in enumerate(train_dataloader):
86
87
        # you can only test a single fwd + bwd.
        # after bwd param is grad for Gemini, due to the chunk reuse optimization.
88
89
        if i > 0:
            break
HELSON's avatar
HELSON committed
90
        input_ids, label = input_ids.cuda(), label.cuda()
91
92
93
94
95
96

        torch_optim.zero_grad()
        zero_optim.zero_grad()

        # set random seed is same as torch_model.eval()
        set_seed(42)
HELSON's avatar
HELSON committed
97
        torch_loss = run_fwd_bwd(torch_model, input_ids, label, criterion, torch_optim)
98
        set_seed(42)
HELSON's avatar
HELSON committed
99
        loss = run_fwd_bwd(model, input_ids, label, criterion, zero_optim)
100

HELSON's avatar
HELSON committed
101
        assert torch.equal(torch_loss, loss)
102

103
        check_grad(model, torch_model)
104
105
106
107
108
109
110
111
112
113
114
115


def run_dist(rank, world_size, port):
    config = {}
    colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    exam_gpt_fwd_bwd()


@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
@rerun_if_address_is_in_use()
def test_gpt(world_size):
116
    spawn(run_dist, world_size)
117
118
119


if __name__ == '__main__':
HELSON's avatar
HELSON committed
120
    test_gpt(4)