test_fwd_bwd.py 4 KB
Newer Older
1
2
import pytest
import torch
3
import torch.distributed as dist
4
from torch.nn.parallel import DistributedDataParallel as DDP
HELSON's avatar
HELSON committed
5
from torch.testing import assert_close
6
7

import colossalai
8
from colossalai.legacy.amp import convert_to_apex_amp
HELSON's avatar
HELSON committed
9
from colossalai.nn.optimizer import HybridAdam
10
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
11
from colossalai.utils import set_seed
12
from colossalai.utils.cuda import get_current_device
13
14
15
from colossalai.zero import GeminiDDP, GeminiOptimizer
from colossalai.zero.gemini.chunk import search_chunk_configuration
from tests.components_to_test import run_fwd_bwd
16
17
from tests.components_to_test.registry import non_distributed_component_funcs

18
PLACEMENT_CONFIGS = [
19
20
21
22
    {"placement_policy": "static", "shard_param_frac": 0.0},  # zero2
    {"placement_policy": "static", "shard_param_frac": 1.0},  # zero3
    {"placement_policy": "static", "shard_param_frac": 0.5},  # zero3-half
    {"placement_policy": "auto"},
23
24
25
26
]


def check_grad(model: GeminiDDP, torch_model: torch.nn.Module):
27
28
29
30
31
32
    chunk_manager = model.chunk_manager
    param_list = [p for p in model.parameters()]
    chunk_list = chunk_manager.get_chunks(param_list)
    for chunk in chunk_list:
        chunk_manager.access_chunk(chunk)

33
    for p0, p1 in zip(model.parameters(), torch_model.parameters()):
HELSON's avatar
HELSON committed
34
        assert_close(p0, p1.grad, rtol=1e-3, atol=5e-5)
35
36


37
38
39
40
@parameterize("placement_config", PLACEMENT_CONFIGS)
@parameterize("keep_gather", [False, True])
@parameterize("model_name", ["gpt2", "bert", "albert"])
@parameterize("use_grad_checkpoint", [False, True])
41
def exam_gpt_fwd_bwd(
42
    placement_config,
43
44
45
46
47
    keep_gather,
    model_name: str,
    use_grad_checkpoint: bool = False,
):
    init_device = get_current_device()
48
    get_components_func = non_distributed_component_funcs.get_callable(model_name)
49
50
    model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()

51
    set_seed(42)
52
    model = model_builder(use_grad_checkpoint)
53

54
    set_seed(42)
55
    torch_model = model_builder(use_grad_checkpoint).cuda()
56
57
58
59
    for torch_p, p in zip(torch_model.parameters(), model.parameters()):
        torch_p.data.copy_(p.data)

    world_size = torch.distributed.get_world_size()
60
    config_dict, *_ = search_chunk_configuration(model, search_range_m=1, search_interval=100)
61
62
    config_dict[world_size]["chunk_size"] = 5000
    config_dict[world_size]["keep_gathered"] = keep_gather
63
    model = GeminiDDP(model, config_dict, init_device, pin_memory=True, **placement_config)
HELSON's avatar
HELSON committed
64
    optimizer = HybridAdam(model.parameters(), lr=1e-3)
65
    zero_optim = GeminiOptimizer(optimizer, model, initial_scale=1)
66

67
    rank = dist.get_rank()
68
    amp_config = dict(opt_level="O2", keep_batchnorm_fp32=False, loss_scale=1)
69
70
    torch_optim = torch.optim.Adam(torch_model.parameters(), lr=1e-3)
    torch_model, torch_optim = convert_to_apex_amp(torch_model, torch_optim, amp_config)
71
    torch_model = DDP(torch_model, device_ids=[rank])
72

73
    set_seed(rank)
74
    for i, (input_ids, label) in enumerate(train_dataloader):
75
76
        # you can only test a single fwd + bwd.
        # after bwd param is grad for Gemini, due to the chunk reuse optimization.
77
78
        if i > 0:
            break
HELSON's avatar
HELSON committed
79
        input_ids, label = input_ids.cuda(), label.cuda()
80
81
82
83
84
85

        torch_optim.zero_grad()
        zero_optim.zero_grad()

        # set random seed is same as torch_model.eval()
        set_seed(42)
HELSON's avatar
HELSON committed
86
        torch_loss = run_fwd_bwd(torch_model, input_ids, label, criterion, torch_optim)
87
        set_seed(42)
HELSON's avatar
HELSON committed
88
        loss = run_fwd_bwd(model, input_ids, label, criterion, zero_optim)
89

HELSON's avatar
HELSON committed
90
        assert torch.equal(torch_loss, loss)
91

92
        check_grad(model, torch_model)
93
94
95
96


def run_dist(rank, world_size, port):
    config = {}
97
    colossalai.launch(config=config, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
98
99
100
101
    exam_gpt_fwd_bwd()


@pytest.mark.dist
102
@pytest.mark.parametrize("world_size", [1, 4])
103
104
@rerun_if_address_is_in_use()
def test_gpt(world_size):
105
    spawn(run_dist, world_size)
106
107


108
if __name__ == "__main__":
HELSON's avatar
HELSON committed
109
    test_gpt(4)