test_naive_fp16.py 3.49 KB
Newer Older
1
2
3
4
import copy
from functools import partial

import pytest
5
import torch
6
import torch.multiprocessing as mp
7
8
9

import colossalai
from colossalai.amp import convert_to_apex_amp, convert_to_naive_amp
10
from colossalai.testing import assert_close_loose, rerun_if_address_is_in_use
11
from colossalai.utils import free_port
12
13
from tests.components_to_test.registry import non_distributed_component_funcs

14
15
16
17
18
19
20
21
22
23

def check_equal(a, b):
    """
    This function checks if two tensors are equal within tolerance
    """
    assert torch.allclose(a.float(), b.float(), rtol=1e-4, atol=1e-3), f'a = {a}, b = {b}'


def run_naive_amp():
    """
24
    In this test, we compare the naive fp16 optimizer implemented in colossalai
25
26
27
    and fp32 torch optimizer
    """

28
29
30
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True

31
    # create layer
32
    test_models = ['repeated_computed_layers', 'nested_model', 'resnet18']
33
34
    for test_name in test_models:
        get_component_func = non_distributed_component_funcs.get_callable(test_name)
35
        model_builder, train_dataloader, _, optim_class, _ = get_component_func()
36
37

        # create model
38
39
        naive_amp_model = model_builder(checkpoint=True).cuda()
        apex_amp_model = copy.deepcopy(naive_amp_model)
40
41

        # create optimizer
42
43
44
        # we use SGD here, since the correctness of gradient clipping can't be tested with Adam
        naive_amp_optimizer = torch.optim.SGD(naive_amp_model.parameters(), lr=1e-3)
        apex_amp_optimizer = torch.optim.SGD(apex_amp_model.parameters(), lr=1e-3)
45

46
        # inject naive and apex amp
47
        naive_amp_config = dict(initial_scale=128, clip_grad_norm=1.0)
48
49
50
51
        naive_amp_model, naive_amp_optimizer = convert_to_naive_amp(naive_amp_model, naive_amp_optimizer,
                                                                    naive_amp_config)
        apex_amp_config = dict(opt_level='O2', loss_scale=128, keep_batchnorm_fp32=False)
        apex_amp_model, apex_amp_optimizer = convert_to_apex_amp(apex_amp_model, apex_amp_optimizer, apex_amp_config)
52
53
54
55
56
57
58

        # create data
        data_iter = iter(train_dataloader)
        data, label = next(data_iter)
        data = data.cuda()

        # forward pass
59
60
61
        naive_amp_output = naive_amp_model(data)
        apex_amp_output = apex_amp_model(data)
        assert_close_loose(naive_amp_output, apex_amp_output)
62
63

        # backward
64
65
66
        # use sum() to get big gradient
        naive_amp_optimizer.backward(naive_amp_output.sum())
        apex_amp_optimizer.backward(apex_amp_output.sum())
67
68

        # check grad
69
70
        for naive_amp_param, apex_amp_param in zip(naive_amp_model.parameters(), apex_amp_model.parameters()):
            assert_close_loose(naive_amp_param.grad, apex_amp_param.grad)
71

72
73
74
        # clip gradient
        apex_amp_optimizer.clip_grad_norm(model=apex_amp_model, max_norm=1.0)

75
        # step
76
77
        naive_amp_optimizer.step()
        apex_amp_optimizer.step()
78
79

        # check updated param
80
81
        for naive_amp_param, apex_amp_param in zip(naive_amp_model.parameters(), apex_amp_model.parameters()):
            assert_close_loose(naive_amp_param, apex_amp_param)
82
83
84
85
86
87
88
89


def run_dist(rank, world_size, port):
    colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')
    run_naive_amp()


@pytest.mark.dist
90
@rerun_if_address_is_in_use()
91
92
93
94
95
96
97
98
def test_naive_amp():
    world_size = 1
    run_func = partial(run_dist, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)


if __name__ == '__main__':
    test_naive_amp()