"vscode:/vscode.git/clone" did not exist on "ed0c3035cd44918c124e6afc5acd2fb997c61e07"
test_adam.py 10.6 KB
Newer Older
Jun Ru Anderson's avatar
Jun Ru Anderson committed
1
2
3
4
5
6
7
8
9
10
11
12
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.

from copy import deepcopy
import functools

import pytest
import torch

try:
13
    from fairscale.optim.adam import Adam, Precision
Jun Ru Anderson's avatar
Jun Ru Anderson committed
14
15
16
17
18
19
20
21
22

    imported_adam = True
except ImportError:
    imported_adam = False

skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
skip_if_no_adam = pytest.mark.skipif(not imported_adam, reason="Fairscale Adam not available")


23
24
25
def assert_almost_zero(x):
    assert abs(x) < 2 * 1e-3
    return 1.0
Jun Ru Anderson's avatar
Jun Ru Anderson committed
26
27


28
def step_test(optimizer, weight, bias, input):
Jun Ru Anderson's avatar
Jun Ru Anderson committed
29
30
31
32
33
34
35
36
37
38
39
40
41
    def fn():
        optimizer.zero_grad()
        y = weight.mv(input)
        if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device():
            y = y.cuda(bias.get_device())
        loss = (y + bias).pow(2).sum()
        loss.backward()
        return loss

    initial_value = fn().item()
    for _i in range(5):
        optimizer.step(fn)
    assert fn().item() < initial_value
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69


def state_dict_test(optimizer, weight, bias, input):
    def fn_base(optimizer, weight, bias, input):
        optimizer.zero_grad()
        loss = (weight.mv(input) + bias).pow(2).sum()
        loss.backward()
        return loss

    fn = functools.partial(fn_base, optimizer, weight, bias, input)

    # Prime the optimizer
    for _i in range(5):
        optimizer.step(fn)
    # Clone the weights and construct new optimizer for them
    weight_c = weight.data.clone().requires_grad_()
    bias_c = bias.data.clone().requires_grad_()
    optimizer_c = Adam([weight_c, bias_c], lr=1e-3)
    fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c, input)
    # Load state dict
    state_dict = deepcopy(optimizer.state_dict())
    optimizer_c.load_state_dict(state_dict)
    # Run both optimizations in parallel
    for _i in range(5):
        optimizer.step(fn)
        optimizer_c.step(fn_c)
        (weight - weight_c).to("cpu").detach().apply_(assert_almost_zero)
        (bias - bias_c).to("cpu").detach().apply_(assert_almost_zero)
70
71
72
73


@skip_if_no_cuda
@skip_if_no_adam
74
75
76
77
def test_step_full_precision_inferred():
    weight = torch.randn(10, 5).cuda().requires_grad_()
    bias = torch.randn(10).cuda().requires_grad_()
    input = torch.randn(5).cuda()
78
79
80
81
    optimizer = Adam([weight, bias], lr=1e-3)

    # to check if the optimizer can be printed as a string
    optimizer.__repr__()
82
    step_test(optimizer, weight, bias, input)
83
84
85
86

    for group in optimizer.param_groups:
        for p in group["params"]:
            if p.requires_grad:
87
88
                assert p.dtype == torch.float32
    assert not optimizer.fp32_param_groups
89
90
91
92


@skip_if_no_cuda
@skip_if_no_adam
93
def test_step_mixed_precision_inferred():
94
95
96
    weight = torch.randn(10, 5).cuda().half().requires_grad_()
    bias = torch.randn(10).cuda().half().requires_grad_()
    input = torch.randn(5).half().cuda()
97
    optimizer = Adam([weight, bias], lr=1e-3)
98
99
100

    # to check if the optimizer can be printed as a string
    optimizer.__repr__()
101
    step_test(optimizer, weight, bias, input)
102
103
104
105
106
107
108
109
110
111
112
113
114
115

    assert len(optimizer.fp32_param_groups) == len(optimizer.param_groups)

    for fp32_group, fp16_group in zip(optimizer.fp32_param_groups, optimizer.param_groups):
        for fp32_p, fp16_p in zip(fp32_group["params"], fp16_group["params"]):

            def assert_almost_zero(x):
                assert abs(x) < 1e-3
                return 1.0

            assert fp32_p.dtype == torch.float32
            if fp16_p.requires_grad:
                assert fp16_p.dtype == torch.float16
                (fp32_p - fp16_p).to("cpu").detach().apply_(assert_almost_zero)
Jun Ru Anderson's avatar
Jun Ru Anderson committed
116
117


118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
@skip_if_no_cuda
@skip_if_no_adam
def test_step_memory_efficient():
    weight = torch.randn(10, 5).cuda().half().requires_grad_()
    bias = torch.randn(10).cuda().half().requires_grad_()
    input = torch.randn(5).half().cuda()
    optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.MEMORY_EFFICIENT_MIXED_PRECISION)

    # to check if the optimizer can be printed as a string
    optimizer.__repr__()
    step_test(optimizer, weight, bias, input)

    for group in optimizer.param_groups:
        for p in group["params"]:
            if p.requires_grad:
                assert p.dtype == torch.float16
    assert not optimizer.fp32_param_groups


@skip_if_no_cuda
@skip_if_no_adam
def test_step_pure_fp16():
    weight = torch.randn(10, 5).half().cuda().requires_grad_()
    bias = torch.randn(10).half().cuda().requires_grad_()
    input = torch.randn(5).half().cuda()
    optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.PURE_FP16)

    # to check if the optimizer can be printed as a string
    optimizer.__repr__()
    step_test(optimizer, weight, bias, input)

    assert optimizer.state[weight]["exp_avg"].dtype == torch.float16
    assert optimizer.state[weight]["exp_avg_sq"].dtype == torch.float16
    assert optimizer.state[bias]["exp_avg"].dtype == torch.float16
    assert optimizer.state[bias]["exp_avg_sq"].dtype == torch.float16
    assert not optimizer.fp32_param_groups


Jun Ru Anderson's avatar
Jun Ru Anderson committed
156
157
158
159
160
161
162
163
164
165
166
167
@skip_if_no_cuda
@skip_if_no_adam
def test_step_multigpu():
    if not torch.cuda.device_count() > 1:
        return
    weight = torch.randn(10, 5).cuda(0).requires_grad_()
    bias = torch.randn(10).cuda(1).requires_grad_()
    input = torch.randn(5).cuda(0)
    optimizer = Adam([weight, bias], lr=1e-3)

    # to check if the optimizer can be printed as a string
    optimizer.__repr__()
168
    step_test(optimizer, weight, bias, input)
Jun Ru Anderson's avatar
Jun Ru Anderson committed
169
170


171
172
173
174
175
176
177
178
@skip_if_no_cuda
@skip_if_no_adam
def test_step_multigpu_mixed_precision():
    if not torch.cuda.device_count() > 1:
        return
    weight = torch.randn(10, 5).cuda(0).half().requires_grad_()
    bias = torch.randn(10).cuda(1).half().requires_grad_()
    input = torch.randn(5).cuda(0).half()
179
    optimizer = Adam([weight, bias], lr=1e-3)
180
181
182

    # to check if the optimizer can be printed as a string
    optimizer.__repr__()
183
    step_test(optimizer, weight, bias, input)
184
185


186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
@skip_if_no_cuda
@skip_if_no_adam
def test_step_pure_fp16_multigpu():
    if not torch.cuda.device_count() > 1:
        return
    weight = torch.randn(10, 5).half().cuda(0).requires_grad_()
    bias = torch.randn(10).half().cuda(1).requires_grad_()
    input = torch.randn(5).half().cuda(0)
    optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.PURE_FP16)

    # to check if the optimizer can be printed as a string
    optimizer.__repr__()
    step_test(optimizer, weight, bias, input)

    assert optimizer.state[weight]["exp_avg"].dtype == torch.float16
    assert optimizer.state[weight]["exp_avg_sq"].dtype == torch.float16
    assert optimizer.state[bias]["exp_avg"].dtype == torch.float16
    assert optimizer.state[bias]["exp_avg_sq"].dtype == torch.float16
204
205


Jun Ru Anderson's avatar
Jun Ru Anderson committed
206
207
@skip_if_no_cuda
@skip_if_no_adam
208
def test_state_dict_full_precision():
Jun Ru Anderson's avatar
Jun Ru Anderson committed
209
210
211
212
213
    weight = torch.randn(10, 5).float().cuda().requires_grad_()
    bias = torch.randn(10).float().cuda().requires_grad_()
    input = torch.randn(5).float().cuda()
    optimizer = Adam([weight, bias], lr=1e-3)

214
    state_dict_test(optimizer, weight, bias, input)
Jun Ru Anderson's avatar
Jun Ru Anderson committed
215
216


217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
@skip_if_no_cuda
@skip_if_no_adam
def test_state_dict_mixed_precision():
    weight = torch.randn(10, 5).half().cuda().requires_grad_()
    bias = torch.randn(10).half().cuda().requires_grad_()
    input = torch.randn(5).half().cuda()
    optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.MIXED_PRECISION)

    state_dict_test(optimizer, weight, bias, input)


@skip_if_no_cuda
@skip_if_no_adam
def test_state_dict_memory_efficient():
    weight = torch.randn(10, 5).half().cuda().requires_grad_()
    bias = torch.randn(10).half().cuda().requires_grad_()
    input = torch.randn(5).half().cuda()
    optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.MEMORY_EFFICIENT_MIXED_PRECISION)

    state_dict_test(optimizer, weight, bias, input)


@skip_if_no_cuda
@skip_if_no_adam
def test_state_dict_pure_fp16():
    weight = torch.randn(10, 5).half().cuda().requires_grad_()
    bias = torch.randn(10).half().cuda().requires_grad_()
    input = torch.randn(5).half().cuda()
    optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.PURE_FP16)

    state_dict_test(optimizer, weight, bias, input)
Jun Ru Anderson's avatar
Jun Ru Anderson committed
248
249


250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
@skip_if_no_cuda
@skip_if_no_adam
def test_build_fp32_params():
    weight = torch.randn(10, 5).cuda().half().requires_grad_()
    bias = torch.randn(10).cuda().half().requires_grad_()
    optimizer = Adam([weight, bias], lr=1e-3)
    optimizer._build_fp32_params([weight, bias])
    for fp32_group, fp16_group in zip(optimizer.fp32_param_groups, optimizer.param_groups):
        for fp32_p, fp16_p in zip(fp32_group["params"], fp16_group["params"]):
            assert fp32_p.dtype == torch.float32
            if fp16_p.requires_grad:
                assert fp16_p.dtype == torch.float16
                (fp32_p - fp16_p).to("cpu").detach().apply_(assert_almost_zero)


Jun Ru Anderson's avatar
Jun Ru Anderson committed
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
@skip_if_no_cuda
@skip_if_no_adam
def test_invalid_beta():
    weight = torch.randn(10, 5, requires_grad=True).float().cuda()
    bias = torch.randn(10, requires_grad=True).float().cuda()
    with pytest.raises(ValueError):
        Adam([weight, bias], lr=1e-2, betas=(1.0, 0.0))


@skip_if_no_cuda
@skip_if_no_adam
def test_invalid_weight_decay():
    weight = torch.randn(10, 5, requires_grad=True).float().cuda()
    bias = torch.randn(10, requires_grad=True).float().cuda()
    with pytest.raises(ValueError):
        Adam([weight, bias], lr=1e-2, weight_decay=-1)


@skip_if_no_cuda
@skip_if_no_adam
def test_amsgrad():
    weight = torch.randn(10, 5, requires_grad=True).float().cuda()
    bias = torch.randn(10, requires_grad=True).float().cuda()
    with pytest.raises(RuntimeError):
        Adam([weight, bias], lr=1e-2, amsgrad=True)
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316


@skip_if_no_cuda
@skip_if_no_adam
def test_mixed_precision_with_full_precision_parameters():
    weight = torch.randn(10, 5, requires_grad=True).float().cuda()
    bias = torch.randn(10, requires_grad=True).float().cuda()
    with pytest.raises(AssertionError):
        Adam([weight, bias], lr=1e-2, precision=Precision.MIXED_PRECISION)


@skip_if_no_cuda
@skip_if_no_adam
def test_memory_efficient_with_full_precision_parameters():
    weight = torch.randn(10, 5, requires_grad=True).float().cuda()
    bias = torch.randn(10, requires_grad=True).float().cuda()
    with pytest.raises(AssertionError):
        Adam([weight, bias], lr=1e-2, precision=Precision.MEMORY_EFFICIENT_MIXED_PRECISION)


@skip_if_no_cuda
@skip_if_no_adam
def test_pure_fp16_with_full_precision_parameters():
    weight = torch.randn(10, 5, requires_grad=True).float().cuda()
    bias = torch.randn(10, requires_grad=True).float().cuda()
    with pytest.raises(AssertionError):
        Adam([weight, bias], lr=1e-2, precision=Precision.PURE_FP16)