test_sharded_optim_v2.py 4.68 KB
Newer Older
ver217's avatar
ver217 committed
1
2
3
from functools import partial

import colossalai
4
from colossalai.utils.cuda import get_current_device
ver217's avatar
ver217 committed
5
6
7
8
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
9
from colossalai.amp import convert_to_apex_amp
10
from colossalai.nn.optimizer import CPUAdam
ver217's avatar
ver217 committed
11
from colossalai.testing import parameterize, rerun_on_exception
ver217's avatar
ver217 committed
12
from colossalai.utils import free_port
13
from colossalai.zero.init_ctx import ZeroInitContext
ver217's avatar
ver217 committed
14
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
ver217's avatar
ver217 committed
15
from colossalai.zero.sharded_model import ShardedModelV2
16
from colossalai.zero.sharded_model.utils import col_model_deepcopy
ver217's avatar
ver217 committed
17
from colossalai.zero.sharded_optim import ShardedOptimizerV2
18
from colossalai.zero.sharded_optim._utils import has_inf_or_nan
ver217's avatar
ver217 committed
19
20
from tests.components_to_test.registry import non_distributed_component_funcs
from torch.nn.parallel import DistributedDataParallel as DDP
21

22
from common import CONFIG, check_sharded_model_params
ver217's avatar
ver217 committed
23
24


25
def _run_step(model, optimizer, data, label, criterion, enable_autocast=False):
ver217's avatar
ver217 committed
26
    model.train()
ver217's avatar
ver217 committed
27
    optimizer.zero_grad()
ver217's avatar
ver217 committed
28
    with torch.cuda.amp.autocast(enabled=enable_autocast):
29
30
31
32
33
        if criterion:
            y = model(data)
            loss = criterion(y, label)
        else:
            loss = model(data, label)
ver217's avatar
ver217 committed
34

35
    loss = loss.float()
ver217's avatar
ver217 committed
36
37
38
39
40
    if isinstance(model, ShardedModelV2):
        optimizer.backward(loss)
    else:
        loss.backward()
    optimizer.step()
ver217's avatar
ver217 committed
41
42


43
44
@parameterize("cpu_offload", [True, False])
@parameterize("use_cpuadam", [True, False])
45
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
46
47
@parameterize("gpu_margin_mem_ratio", [0.0, 0.7])
def _run_test_sharded_optim_v2(cpu_offload, shard_strategy_class, use_cpuadam, gpu_margin_mem_ratio):
48
    test_models = ['repeated_computed_layers', 'resnet18', 'bert', 'no_leaf_module']
49
    shard_strategy = shard_strategy_class()
50
51
52

    if use_cpuadam and cpu_offload is False:
        return
53
54
    if gpu_margin_mem_ratio > 0.0 and not (cpu_offload and use_cpuadam):
        return
55

ver217's avatar
ver217 committed
56
57
    for model_name in test_models:
        get_components_func = non_distributed_component_funcs.get_callable(model_name)
58
59
        model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func()

60
61
62
        with ZeroInitContext(target_device=torch.device(f'cpu:0') if cpu_offload else get_current_device(),
                             shard_strategy=shard_strategy,
                             shard_param=True):
63
            zero_model = model_builder(checkpoint=True)
64
65
66
        zero_model = ShardedModelV2(
            zero_model,
            shard_strategy,
67
            tensor_placement_policy='cpu' if cpu_offload else 'cuda',
68
69
            reuse_fp16_shard=use_cpuadam,
        )
70
71
72
73
74

        model = model_builder(checkpoint=True).half()
        col_model_deepcopy(zero_model, model)
        model = model.cuda().float()

75
        if use_cpuadam:
76
77
78
            optimizer_class = CPUAdam
        optim = optimizer_class(model.parameters(), lr=1e-3)
        sharded_optim = optimizer_class(zero_model.parameters(), lr=1e-3)
79
80
81
82
        sharded_optim = ShardedOptimizerV2(zero_model,
                                           sharded_optim,
                                           initial_scale=2**5,
                                           gpu_margin_mem_ratio=gpu_margin_mem_ratio)
83

84
85
86
        amp_config = dict(opt_level='O2', keep_batchnorm_fp32=False)
        apex_model, apex_optimizer = convert_to_apex_amp(model, optim, amp_config)
        if dist.get_world_size() > 1:
87
            apex_model = DDP(apex_model, device_ids=[torch.cuda.current_device()])
88

ver217's avatar
ver217 committed
89
        for i, (data, label) in enumerate(train_dataloader):
90
            if i > 5:
ver217's avatar
ver217 committed
91
92
                break
            data, label = data.cuda(), label.cuda()
93
            _run_step(apex_model, apex_optimizer, data, label, criterion, False)
94
            _run_step(zero_model, sharded_optim, data, label, criterion, False)
95
            check_sharded_model_params(model, zero_model, loose=True, reuse_fp16_shard=use_cpuadam)
96
97
            for param in model.parameters():
                assert not has_inf_or_nan(param)
ver217's avatar
ver217 committed
98
99


100
101
102
def _run_dist(rank, world_size, port):
    colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    _run_test_sharded_optim_v2()
103
104


105
# use_cpuadam = True can be used with cpu_offload = False
ver217's avatar
ver217 committed
106
@pytest.mark.dist
107
@pytest.mark.parametrize("world_size", [1, 2])
108
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
109
110
def test_sharded_optim_v2(world_size):
    run_func = partial(_run_dist, world_size=world_size, port=free_port())
ver217's avatar
ver217 committed
111
112
113
114
    mp.spawn(run_func, nprocs=world_size)


if __name__ == '__main__':
115
    test_sharded_optim_v2(world_size=2)