test_optim.py 22.5 KB
Newer Older
Tim Dettmers's avatar
Tim Dettmers committed
1
import os
Aarni Koskela's avatar
Aarni Koskela committed
2
from os.path import join
Tim Dettmers's avatar
Tim Dettmers committed
3
import shutil
4
import time
Tim Dettmers's avatar
Tim Dettmers committed
5
import uuid
6

7
from lion_pytorch import Lion
Aarni Koskela's avatar
Aarni Koskela committed
8
import pytest
Tim Dettmers's avatar
Tim Dettmers committed
9
import torch
10

Tim Dettmers's avatar
Tim Dettmers committed
11
12
import bitsandbytes as bnb
import bitsandbytes.functional as F
Aarni Koskela's avatar
Aarni Koskela committed
13
from tests.helpers import describe_dtype, id_formatter
Tim Dettmers's avatar
Tim Dettmers committed
14

15
# import apex
Tim Dettmers's avatar
Tim Dettmers committed
16
17

k = 20
Tim Dettmers's avatar
Tim Dettmers committed
18

Ruff's avatar
Ruff committed
19

20
def assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):
Tim Dettmers's avatar
Tim Dettmers committed
21
    idx = torch.isclose(a, b, rtol=rtol, atol=atol)
22
23
    error_count = (idx == 0).sum().item()
    if error_count > max_error_count:
24
        print(f"Too many values not close: assert {error_count} < {max_error_count}")
Tim Dettmers's avatar
Tim Dettmers committed
25
        torch.testing.assert_close(a, b, rtol=rtol, atol=atol)
26

27

Tim Dettmers's avatar
Tim Dettmers committed
28
def get_temp_dir():
Aarni Koskela's avatar
Aarni Koskela committed
29
    path = f"/tmp/autoswap/{uuid.uuid4()}"
Tim Dettmers's avatar
Tim Dettmers committed
30
31
32
    os.makedirs(path, exist_ok=True)
    return path

33

Tim Dettmers's avatar
Tim Dettmers committed
34
35
36
def rm_path(path):
    shutil.rmtree(path)

Ruff's avatar
Ruff committed
37

Tim Dettmers's avatar
Tim Dettmers committed
38
str2optimizers = {}
39
40

## TODO: maybe remove these three.
41
str2optimizers["adam_pytorch"] = (None, torch.optim.Adam, bnb.optim.Adam)
42
str2optimizers["lion_pytorch"] = (None, Lion, bnb.optim.Lion)
43
44
45
46
47
str2optimizers["momentum_pytorch"] = (
    None,
    lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),
    bnb.optim.Adam,
)
48

49
str2optimizers["adam"] = (torch.optim.Adam, bnb.optim.Adam)
50
str2optimizers["adam8bit_blockwise"] = (torch.optim.Adam, lambda pxx: bnb.optim.Adam8bit(pxx, block_wise=True))
Tim Dettmers's avatar
Tim Dettmers committed
51
str2optimizers["paged_adam"] = (torch.optim.Adam, bnb.optim.PagedAdam)
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
str2optimizers["paged_adamw"] = (torch.optim.AdamW, bnb.optim.PagedAdamW)
str2optimizers["paged_adam8bit_blockwise"] = (
    torch.optim.Adam,
    lambda pxx: bnb.optim.PagedAdam8bit(pxx, block_wise=True),
)
str2optimizers["paged_adamw8bit_blockwise"] = (
    torch.optim.AdamW,
    lambda pxx: bnb.optim.PagedAdamW8bit(pxx, block_wise=True),
)

str2optimizers["ademamix"] = (bnb.optim.ademamix._ReferenceAdEMAMix, bnb.optim.AdEMAMix)
str2optimizers["ademamix8bit_blockwise"] = (
    bnb.optim.ademamix._ReferenceAdEMAMix,
    lambda pxx: bnb.optim.AdEMAMix8bit(pxx),
)
str2optimizers["paged_ademamix"] = (bnb.optim.ademamix._ReferenceAdEMAMix, bnb.optim.PagedAdEMAMix)
str2optimizers["paged_ademamix8bit_blockwise"] = (
    bnb.optim.ademamix._ReferenceAdEMAMix,
    lambda pxx: bnb.optim.PagedAdEMAMix8bit(pxx),
)
str2optimizers["ademamix_scheduled"] = (
    lambda pxx: bnb.optim.ademamix._ReferenceAdEMAMix(pxx, t_alpha=k, t_beta3=k),
    lambda pxx: bnb.optim.AdEMAMix(pxx, t_alpha=k, t_beta3=k),
)
76
77
78
79
str2optimizers["paged_ademamix_scheduled"] = (
    lambda pxx: bnb.optim.ademamix._ReferenceAdEMAMix(pxx, t_alpha=k, t_beta3=k),
    lambda pxx: bnb.optim.PagedAdEMAMix(pxx, t_alpha=k, t_beta3=k),
)
80
81
82
83
str2optimizers["ademamix8bit_blockwise_scheduled"] = (
    lambda pxx: bnb.optim.ademamix._ReferenceAdEMAMix(pxx, t_alpha=100, t_beta3=100),
    lambda pxx: bnb.optim.AdEMAMix8bit(pxx, t_alpha=100, t_beta3=100),
)
84
85
86
87
str2optimizers["paged_ademamix8bit_blockwise_scheduled"] = (
    lambda pxx: bnb.optim.ademamix._ReferenceAdEMAMix(pxx, t_alpha=100, t_beta3=100),
    lambda pxx: bnb.optim.PagedAdEMAMix8bit(pxx, t_alpha=100, t_beta3=100),
)
88

89
str2optimizers["lion"] = (Lion, bnb.optim.Lion)
Tim Dettmers's avatar
Tim Dettmers committed
90
str2optimizers["paged_lion"] = (Lion, bnb.optim.PagedLion)
91
str2optimizers["lion8bit_blockwise"] = (Lion, lambda pxx: bnb.optim.Lion8bit(pxx, block_wise=True))
92
93
str2optimizers["paged_lion8bit_blockwise"] = (Lion, lambda pxx: bnb.optim.PagedLion8bit(pxx, block_wise=True))

94
95
96
97
98
99
100
101
str2optimizers["momentum"] = (
    lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),
    lambda pxx: bnb.optim.SGD(pxx, 0.01, 0.9, block_wise=False),
)
str2optimizers["momentum8bit_blockwise"] = (
    lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),
    lambda pxx: bnb.optim.SGD8bit(pxx, 0.01, 0.9, block_wise=True),
)
102
103
104
105
106

str2optimizers["rmsprop"] = (
    lambda pxx: torch.optim.RMSprop(pxx, 0.01, 0.9),
    lambda pxx: bnb.optim.RMSprop(pxx, 0.01, 0.9, block_wise=False),
)
107
108
109
110
str2optimizers["rmsprop8bit_blockwise"] = (
    lambda pxx: torch.optim.RMSprop(pxx, 0.01, 0.9),
    lambda pxx: bnb.optim.RMSprop8bit(pxx, 0.01, 0.9, block_wise=True),
)
Tim Dettmers's avatar
Tim Dettmers committed
111
112

str2statenames = {}
113
str2statenames["adam"] = [("exp_avg", "state1"), ("exp_avg_sq", "state2")]
Tim Dettmers's avatar
Tim Dettmers committed
114
115
str2statenames["paged_adamw"] = [("exp_avg", "state1"), ("exp_avg_sq", "state2")]
str2statenames["paged_adam"] = [("exp_avg", "state1"), ("exp_avg_sq", "state2")]
116
str2statenames["lion"] = [("exp_avg", "state1")]
Tim Dettmers's avatar
Tim Dettmers committed
117
str2statenames["paged_lion"] = [("exp_avg", "state1")]
118
119
120
str2statenames["momentum"] = [("momentum_buffer", "state1")]
str2statenames["lamb"] = [("exp_avg", "state1"), ("exp_avg_sq", "state2")]
str2statenames["rmsprop"] = [("square_avg", "state1")]
121

Ruff's avatar
Ruff committed
122
123
124
125
126
127
128
129
130
131
132
133
str2statenames["adam8bit_blockwise"] = [
    ("exp_avg", "state1", "qmap1", "absmax1"),
    ("exp_avg_sq", "state2", "qmap2", "absmax2"),
]
str2statenames["paged_adam8bit_blockwise"] = [
    ("exp_avg", "state1", "qmap1", "absmax1"),
    ("exp_avg_sq", "state2", "qmap2", "absmax2"),
]
str2statenames["paged_adamw8bit_blockwise"] = [
    ("exp_avg", "state1", "qmap1", "absmax1"),
    ("exp_avg_sq", "state2", "qmap2", "absmax2"),
]
134

Tim Dettmers's avatar
Tim Dettmers committed
135
136
str2statenames["momentum8bit_blockwise"] = [("momentum_buffer", "state1", "qmap1", "absmax1")]
str2statenames["rmsprop8bit_blockwise"] = [("square_avg", "state1", "qmap1", "absmax1")]
137
str2statenames["lion8bit_blockwise"] = [("exp_avg", "state1", "qmap1", "absmax1")]
Tim Dettmers's avatar
Tim Dettmers committed
138
str2statenames["paged_lion8bit_blockwise"] = [("exp_avg", "state1", "qmap1", "absmax1")]
Tim Dettmers's avatar
Tim Dettmers committed
139

140
str2statenames["ademamix"] = str2statenames["ademamix_scheduled"] = [("m1_m2", "state1"), ("nu", "state2")]
141
str2statenames["paged_ademamix"] = str2statenames["paged_ademamix_scheduled"] = [("m1_m2", "state1"), ("nu", "state2")]
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
str2statenames["ademamix8bit_blockwise"] = str2statenames["ademamix8bit_blockwise_scheduled"] = [
    ("m1_m2", "state1", "qmap1", "absmax1"),
    ("nu", "state2", "qmap2", "absmax2"),
]
str2statenames["paged_ademamix8bit_blockwise"] = [
    ("m1_m2", "state1", "qmap1", "absmax1"),
    ("nu", "state2", "qmap2", "absmax2"),
]

optimizer_names_32bit = [
    "adam",
    "paged_adamw",
    "paged_adam",
    "momentum",
    "rmsprop",
    "lion",
    "paged_lion",
    "ademamix",
    "ademamix_scheduled",
    "paged_ademamix",
162
    "paged_ademamix_scheduled",
163
]
Aarni Koskela's avatar
Aarni Koskela committed
164
165
166
167
168
169


@pytest.mark.parametrize("optim_name", optimizer_names_32bit, ids=id_formatter("opt"))
@pytest.mark.parametrize("gtype", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)
@pytest.mark.parametrize("dim1", [1024], ids=id_formatter("dim1"))
@pytest.mark.parametrize("dim2", [32, 1024, 4097, 1], ids=id_formatter("dim2"))
170
def test_optimizer32bit(requires_cuda, dim1, dim2, gtype, optim_name):
Ruff's avatar
Ruff committed
171
    if gtype == torch.bfloat16 and optim_name in ["momentum", "rmsprop"]:
Aarni Koskela's avatar
Aarni Koskela committed
172
        pytest.skip()
173
174
175
    if dim1 == 1 and dim2 == 1:
        return
    p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
Tim Dettmers's avatar
Tim Dettmers committed
176
177
178
179
180
181
182
    p2 = p1.clone()
    p1 = p1.float()

    torch_optimizer = str2optimizers[optim_name][0]([p1])
    bnb_optimizer = str2optimizers[optim_name][1]([p2])

    if gtype == torch.float32:
Tim Dettmers's avatar
Tim Dettmers committed
183
        atol, rtol = 1e-6, 1e-5
184
185
    elif gtype == torch.bfloat16:
        atol, rtol = 1e-3, 1e-2
Tim Dettmers's avatar
Tim Dettmers committed
186
187
188
    else:
        atol, rtol = 1e-4, 1e-3

Tim Dettmers's avatar
Tim Dettmers committed
189
    for i in range(k):
190
        g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
Tim Dettmers's avatar
Tim Dettmers committed
191
192
193
194
195
196
197
        p1.grad = g.clone().float()
        p2.grad = g.clone()

        bnb_optimizer.step()
        torch_optimizer.step()

        for name1, name2 in str2statenames[optim_name]:
Tim Dettmers's avatar
Tim Dettmers committed
198
            torch.testing.assert_close(
199
                torch_optimizer.state[p1][name1],
Tim Dettmers's avatar
Tim Dettmers committed
200
                bnb_optimizer.state[p2][name2].cuda(),
201
202
203
                atol=atol,
                rtol=rtol,
            )
Tim Dettmers's avatar
Tim Dettmers committed
204

205
206
        # since Lion can have pretty noisy updates where things lie at the boundary
        # allow up to 10 errors for Lion
207
        assert_most_approx_close(p1, p2.float(), atol=atol, rtol=rtol, max_error_count=10)
Tim Dettmers's avatar
Tim Dettmers committed
208

209
        if i % (k // 5) == 0 and i > 0:
Tim Dettmers's avatar
Tim Dettmers committed
210
            path = get_temp_dir()
211
            torch.save(bnb_optimizer.state_dict(), join(path, "opt.pt"))
Tim Dettmers's avatar
Tim Dettmers committed
212
213
214
            del bnb_optimizer
            bnb_optimizer = None
            bnb_optimizer = str2optimizers[optim_name][1]([p2])
215
            bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt")))
Tim Dettmers's avatar
Tim Dettmers committed
216
            rm_path(path)
217
218
            # since Lion can have pretty noisy updates where things lie at the boundary
            # allow up to 10 errors for Lion
219
            assert_most_approx_close(p1, p2.float(), atol=atol, rtol=rtol, max_error_count=10)
Tim Dettmers's avatar
Tim Dettmers committed
220
            for name1, name2 in str2statenames[optim_name]:
221
222
                # since Lion can have pretty noisy updates where things lie at the boundary
                # allow up to 10 errors for Lion
Ruff's avatar
Ruff committed
223
224
225
226
227
228
229
                assert_most_approx_close(
                    torch_optimizer.state[p1][name1],
                    bnb_optimizer.state[p2][name2],
                    atol=atol,
                    rtol=rtol,
                    max_error_count=10,
                )
Tim Dettmers's avatar
Tim Dettmers committed
230

231
        if gtype != torch.float32:
Tim Dettmers's avatar
Tim Dettmers committed
232
            # the adam buffers should also be close because they are 32-bit
233
            # but the parameters can diverge because they are 16-bit
Tim Dettmers's avatar
Tim Dettmers committed
234
235
            # the difference grow larger and larger with each update
            # --> copy the state to keep weights close
236
            p1.data = p1.data.to(p2.dtype).float()
Tim Dettmers's avatar
Tim Dettmers committed
237
            p2.copy_(p1.data)
Tim Dettmers's avatar
Tim Dettmers committed
238
            torch.testing.assert_close(p1.to(p2.dtype), p2)
239
240
241
        if optim_name in ["lars", "lamb"]:
            assert bnb_optimizer.state[p2]["unorm_vec"] > 0.0

Tim Dettmers's avatar
Tim Dettmers committed
242

Aarni Koskela's avatar
Aarni Koskela committed
243
244
245
@pytest.mark.parametrize("dim1", [1024], ids=id_formatter("dim1"))
@pytest.mark.parametrize("dim2", [32, 1024, 4097], ids=id_formatter("dim2"))
@pytest.mark.parametrize("gtype", [torch.float32, torch.float16], ids=describe_dtype)
246
def test_global_config(requires_cuda, dim1, dim2, gtype):
247
248
249
250
251
    if dim1 == 1 and dim2 == 1:
        return
    p1 = torch.randn(dim1, dim2, device="cpu", dtype=gtype) * 0.1
    p2 = torch.randn(dim1, dim2, device="cpu", dtype=gtype) * 0.1
    p3 = torch.randn(dim1, dim2, device="cpu", dtype=gtype) * 0.1
Tim Dettmers's avatar
Tim Dettmers committed
252
253
254
255
256
257
258
    mask = torch.rand_like(p2) < 0.1
    beta1 = 0.9
    beta2 = 0.999
    lr = 0.001
    eps = 1e-8

    bnb.optim.GlobalOptimManager.get_instance().initialize()
Ruff's avatar
Ruff committed
259
    bnb.optim.GlobalOptimManager.get_instance().override_config(p3, "optim_bits", 8)
Tim Dettmers's avatar
Tim Dettmers committed
260

Ruff's avatar
Ruff committed
261
    bnb.optim.GlobalOptimManager.get_instance().register_parameters([p1, p2, p3])
Tim Dettmers's avatar
Tim Dettmers committed
262
263
264
265
266
267
268
269
270
271
272
273
    p1 = p1.cuda()
    p2 = p2.cuda()
    p3 = p3.cuda()

    adam2 = bnb.optim.Adam([p1, p2, p3], lr, (beta1, beta2), eps)

    if gtype == torch.float32:
        atol, rtol = 1e-6, 1e-5
    else:
        atol, rtol = 1e-4, 1e-3

    for i in range(50):
274
275
276
        g1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 + 0.001
        g2 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 + 0.001
        g3 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 + 0.001
Tim Dettmers's avatar
Tim Dettmers committed
277
278
279
280
281
282
        p1.grad = g1
        p2.grad = g2
        p3.grad = g3

        adam2.step()

283
284
        assert adam2.state[p3]["state1"].dtype == torch.uint8
        assert adam2.state[p3]["state2"].dtype == torch.uint8
Tim Dettmers's avatar
Tim Dettmers committed
285
286


Aarni Koskela's avatar
Aarni Koskela committed
287
optimizer_names_8bit = [
288
289
290
291
292
    # Non-blockwise optimizers are deprecated.
    # "adam8bit",
    # "lion8bit",
    # "momentum8bit",
    # "rmsprop8bit",
293
    "adam8bit_blockwise",
294
    "lion8bit_blockwise",
295
296
    "momentum8bit_blockwise",
    "rmsprop8bit_blockwise",
297
298
    "ademamix8bit_blockwise",
    "ademamix8bit_blockwise_scheduled",
299
300
301
]


Aarni Koskela's avatar
Aarni Koskela committed
302
303
304
305
@pytest.mark.parametrize("optim_name", optimizer_names_8bit, ids=id_formatter("opt"))
@pytest.mark.parametrize("gtype", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)
@pytest.mark.parametrize("dim2", [32, 1024, 4097], ids=id_formatter("dim2"))
@pytest.mark.parametrize("dim1", [1024], ids=id_formatter("dim1"))
306
def test_optimizer8bit(requires_cuda, dim1, dim2, gtype, optim_name):
307
308
    torch.set_printoptions(precision=6)

309
    if gtype == torch.bfloat16 and "blockwise" not in optim_name:
Ruff's avatar
Ruff committed
310
        pytest.skip()
311

312
313
314
    if dim1 == 1 and dim2 == 1:
        return
    p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
Tim Dettmers's avatar
Tim Dettmers committed
315
316
    p2 = p1.clone()
    p1 = p1.float()
317
    blocksize = 256
Tim Dettmers's avatar
Tim Dettmers committed
318
319
320
321
322
323
324

    torch_optimizer = str2optimizers[optim_name][0]([p1])
    bnb_optimizer = str2optimizers[optim_name][1]([p2])

    if gtype == torch.float32:
        atol, rtol = 3e-3, 1e-3
        patol, prtol = 1e-5, 1e-3
Tim Dettmers's avatar
Tim Dettmers committed
325
326
327
    elif gtype == torch.bfloat16:
        atol, rtol = 3e-3, 1e-3
        patol, prtol = 1e-4, 1e-2
Tim Dettmers's avatar
Tim Dettmers committed
328
329
330
331
332
333
334
    else:
        atol, rtol = 3e-3, 1e-3
        patol, prtol = 1e-5, 1e-3

    errors = []
    relerrors = []

335
    for i in range(50):
336
        g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
Tim Dettmers's avatar
Tim Dettmers committed
337
338
339
340
341
342
        p1.grad = g.clone().float()
        p2.grad = g.clone()

        bnb_optimizer.step()
        torch_optimizer.step()

343
        # since Lion can have pretty noisy updates where things lie at the boundary
344
        assert_most_approx_close(p1, p2.float(), patol, prtol, max_error_count=0)
Tim Dettmers's avatar
Tim Dettmers committed
345
346
347

        dequant_states = []
        for name1, name2, qmap, max_val in str2statenames[optim_name]:
348
349
            # print(bnb_optimizer.state[p2][max_val], name1)
            if "blockwise" in optim_name:
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
                ## For AdEMAMix, we need to dequantize [p2][name2][0] and [p2][name2][1]
                ## separately and then stack them. The qmap is shared, but absmax is also stacked.
                if optim_name == "ademamix8bit_blockwise" and name1 == "m1_m2":
                    m1 = F.dequantize_blockwise(
                        code=bnb_optimizer.state[p2][qmap],
                        absmax=bnb_optimizer.state[p2][max_val][0],
                        A=bnb_optimizer.state[p2][name2][0],
                        blocksize=blocksize,
                    )
                    m2 = F.dequantize_blockwise(
                        code=bnb_optimizer.state[p2][qmap],
                        absmax=bnb_optimizer.state[p2][max_val][1],
                        A=bnb_optimizer.state[p2][name2][1],
                        blocksize=blocksize,
                    )

                    s1 = torch.stack((m1, m2))

                else:
                    s1 = F.dequantize_blockwise(
                        code=bnb_optimizer.state[p2][qmap],
                        absmax=bnb_optimizer.state[p2][max_val],
                        A=bnb_optimizer.state[p2][name2],
                        blocksize=blocksize,
                    )
Tim Dettmers's avatar
Tim Dettmers committed
375
            else:
376
377
378
379
380
                s1 = F.dequantize(
                    code=bnb_optimizer.state[p2][qmap],
                    absmax=bnb_optimizer.state[p2][max_val],
                    A=bnb_optimizer.state[p2][name2],
                )
Ruff's avatar
Ruff committed
381
382
            num_not_close = torch.isclose(torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol) == 0
            # assert num_not_close.sum().item() < 20
Tim Dettmers's avatar
Tim Dettmers committed
383
384
            dequant_states.append(s1.clone())

385
        err = torch.abs(p1 - p2)
Ruff's avatar
Ruff committed
386
        relerr = err / (torch.abs(p1) + 1e-9)
Tim Dettmers's avatar
Tim Dettmers committed
387
        if g.dtype == torch.bfloat16:
388
389
            assert err.mean() <= 0.00017
            assert relerr.mean() <= 0.0016
Tim Dettmers's avatar
Tim Dettmers committed
390
        else:
391
392
            assert err.mean() < 0.00006
            assert relerr.mean() < 0.0006
Tim Dettmers's avatar
Tim Dettmers committed
393
394
395
396
397

        errors.append(err.mean().item())
        relerrors.append(relerr.mean().item())

        if i % 10 == 0 and i > 0:
Ruff's avatar
Ruff committed
398
            for (name1, name2, qmap, max_val), s in zip(str2statenames[optim_name], dequant_states):
Tim Dettmers's avatar
Tim Dettmers committed
399
400
401
402
403
                s1cpy = s.clone()
                raws1cpy = bnb_optimizer.state[p2][name2].clone()
                qmap1 = bnb_optimizer.state[p2][qmap].clone()

                path = get_temp_dir()
404
                torch.save(bnb_optimizer.state_dict(), join(path, "opt.pt"))
Tim Dettmers's avatar
Tim Dettmers committed
405
406
407
                del bnb_optimizer
                bnb_optimizer = None
                bnb_optimizer = str2optimizers[optim_name][1]([p2])
408
                bnb_optimizer.load_state_dict(torch.load(join(path, "opt.pt")))
Tim Dettmers's avatar
Tim Dettmers committed
409
                rm_path(path)
Tim Dettmers's avatar
Tim Dettmers committed
410
411
                torch.testing.assert_close(raws1cpy, bnb_optimizer.state[p2][name2])
                torch.testing.assert_close(qmap1, bnb_optimizer.state[p2][qmap])
Tim Dettmers's avatar
Tim Dettmers committed
412

413
                if "blockwise" in optim_name:
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
                    ## For AdEMAMix, we need to dequantize [p2][name2][0] and [p2][name2][1]
                    ## separately and then stack them. The qmap is shared, but absmax is also stacked.
                    if optim_name == "ademamix8bit_blockwise" and name1 == "m1_m2":
                        s1 = torch.stack(
                            (
                                F.dequantize_blockwise(
                                    code=bnb_optimizer.state[p2][qmap],
                                    absmax=bnb_optimizer.state[p2][max_val][0],
                                    A=bnb_optimizer.state[p2][name2][0],
                                    blocksize=blocksize,
                                ),
                                F.dequantize_blockwise(
                                    code=bnb_optimizer.state[p2][qmap],
                                    absmax=bnb_optimizer.state[p2][max_val][1],
                                    A=bnb_optimizer.state[p2][name2][1],
                                    blocksize=blocksize,
                                ),
                            )
                        )
                    else:
                        s1 = F.dequantize_blockwise(
                            code=bnb_optimizer.state[p2][qmap],
                            absmax=bnb_optimizer.state[p2][max_val],
                            A=bnb_optimizer.state[p2][name2],
                            blocksize=blocksize,
                        )
Tim Dettmers's avatar
Tim Dettmers committed
440
                else:
441
442
443
444
445
                    s1 = F.dequantize(
                        code=bnb_optimizer.state[p2][qmap],
                        absmax=bnb_optimizer.state[p2][max_val],
                        A=bnb_optimizer.state[p2][name2],
                    )
Tim Dettmers's avatar
Tim Dettmers committed
446
                torch.testing.assert_close(s1cpy, s1)
Tim Dettmers's avatar
Tim Dettmers committed
447

Ruff's avatar
Ruff committed
448
                num_not_close = torch.isclose(torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol) == 0
Tim Dettmers's avatar
Tim Dettmers committed
449
                assert num_not_close.sum().item() < 20
450
451
452

            # Lion can have pretty noisy updates where things lie at the boundary
            assert_most_approx_close(p1, p2.float(), patol, prtol, max_error_count=0)
Tim Dettmers's avatar
Tim Dettmers committed
453
454
455
456
457

        # the parameters diverge quickly. Here we keep them close
        # together so we can test against the Adam error
        p1.data = p1.data.to(gtype).float()
        p2.copy_(p1.data)
Tim Dettmers's avatar
Tim Dettmers committed
458
        torch.testing.assert_close(p1.to(gtype), p2)
Tim Dettmers's avatar
Tim Dettmers committed
459
        for (name1, name2, qmap, max_val), s in zip(str2statenames[optim_name], dequant_states):
Tim Dettmers's avatar
Tim Dettmers committed
460
461
            torch_optimizer.state[p1][name1].copy_(s.data)

462
463
    # print(sum(errors)/len(errors))
    # print(sum(relerrors)/len(relerrors))
Tim Dettmers's avatar
Tim Dettmers committed
464
465


Aarni Koskela's avatar
Aarni Koskela committed
466
467
468
469
@pytest.mark.parametrize("optim_bits", [32, 8], ids=id_formatter("optim_bits"))
@pytest.mark.parametrize("gtype", [torch.float32], ids=describe_dtype)
@pytest.mark.parametrize("dim2", [32, 1024, 4097], ids=id_formatter("dim2"))
@pytest.mark.parametrize("dim1", [1024], ids=id_formatter("dim1"))
470
471
@pytest.mark.deprecated
def test_adam_percentile_clipping(requires_cuda, dim1, dim2, gtype, optim_bits):
472
473
474
    if dim1 == 1 and dim2 == 1:
        return
    p1 = torch.randn(dim1, dim2, device="cpu", dtype=gtype) * 0.1
Tim Dettmers's avatar
Tim Dettmers committed
475
476
477
478
479
480
481
    beta1 = 0.9
    beta2 = 0.999
    lr = 0.001
    eps = 1e-8
    p1 = p1.cuda()
    p2 = p1.clone()
    adam1 = bnb.optim.Adam([p1], lr, (beta1, beta2), eps, optim_bits=optim_bits)
482
    adam2 = bnb.optim.Adam(
483
484
485
486
487
488
        [p2],
        lr,
        (beta1, beta2),
        eps,
        optim_bits=optim_bits,
        percentile_clipping=5,
489
    )
Tim Dettmers's avatar
Tim Dettmers committed
490
491
492
493
494
495

    gnorm_vec = torch.zeros(100).cuda()
    step = 0

    for i in range(50):
        step += 1
Ruff's avatar
Ruff committed
496
        g1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1 + (0.01 * i)
Tim Dettmers's avatar
Tim Dettmers committed
497
498
499
        g2 = g1.clone()
        p2.grad = g2

Ruff's avatar
Ruff committed
500
        current_gnorm, clip_val, gnorm_scale = F.percentile_clipping(g1, gnorm_vec, step, 5)
501
        g1 = (g1.float() * gnorm_scale).to(gtype)
Tim Dettmers's avatar
Tim Dettmers committed
502
503
504
505
506
507
508
        p1.grad = g1

        adam1.step()
        adam2.step()

        # gnorm_scale is not deterministic (warp reductions), as such there can be slight differences in state
        if optim_bits == 32:
Tim Dettmers's avatar
Tim Dettmers committed
509
510
            torch.testing.assert_close(p1, p2)
            torch.testing.assert_close(
511
512
513
514
515
                adam1.state[p1]["state1"],
                adam2.state[p2]["state1"],
                atol=5e-5,
                rtol=1e-4,
            )
Tim Dettmers's avatar
Tim Dettmers committed
516
            torch.testing.assert_close(
517
518
519
520
521
                adam1.state[p1]["state2"],
                adam2.state[p2]["state2"],
                atol=5e-5,
                rtol=1e-4,
            )
Tim Dettmers's avatar
Tim Dettmers committed
522
        elif optim_bits == 8:
Tim Dettmers's avatar
Tim Dettmers committed
523
524
            torch.testing.assert_close(p1, p2, atol=1e-4, rtol=1e-3)
            torch.testing.assert_close(
525
526
527
528
                adam1.state[p1]["state1"],
                adam2.state[p2]["state1"],
                atol=2,
                rtol=1e-3,
529
            )
Tim Dettmers's avatar
Tim Dettmers committed
530
            torch.testing.assert_close(
531
532
533
534
                adam1.state[p1]["state2"],
                adam2.state[p2]["state2"],
                atol=2,
                rtol=1e-3,
535
536
537
            )
            adam1.state[p1]["state1"].copy_(adam2.state[p2]["state1"])
            adam1.state[p1]["state2"].copy_(adam2.state[p2]["state2"])
Tim Dettmers's avatar
Tim Dettmers committed
538
539
        if i % 10 == 0 and i > 0:
            path = get_temp_dir()
540
            torch.save(adam2.state_dict(), join(path, "opt.pt"))
Tim Dettmers's avatar
Tim Dettmers committed
541
542
            del adam2
            adam2 = None
543
544
545
546
547
548
549
550
551
            adam2 = bnb.optim.Adam(
                [p2],
                lr,
                (beta1, beta2),
                eps,
                optim_bits=optim_bits,
                percentile_clipping=5,
            )
            adam2.load_state_dict(torch.load(join(path, "opt.pt")))
Tim Dettmers's avatar
Tim Dettmers committed
552
553


Aarni Koskela's avatar
Aarni Koskela committed
554
555
556
optimizer_names_benchmark = [
    "adam8bit_blockwise",
    "paged_adam8bit_blockwise",
557
558
559
560
561
    "ademamix8bit_blockwise",
    "paged_ademamix8bit_blockwise",
    "ademamix8bit_blockwise_scheduled",
    "paged_ademamix8bit_blockwise_scheduled",
    "lion8bit_blockwise",
Aarni Koskela's avatar
Aarni Koskela committed
562
    "paged_lion8bit_blockwise",
563
    "paged_ademamix8bit_blockwise",
564
]
565
566


Aarni Koskela's avatar
Aarni Koskela committed
567
568
@pytest.mark.parametrize("dim1", [4096], ids=id_formatter("dim1"))
@pytest.mark.parametrize("dim2", [4096], ids=id_formatter("dim2"))
569
@pytest.mark.parametrize("gtype", [torch.float32, torch.bfloat16, torch.float16], ids=describe_dtype)
Aarni Koskela's avatar
Aarni Koskela committed
570
571
@pytest.mark.parametrize("optim_name", optimizer_names_benchmark, ids=id_formatter("opt"))
@pytest.mark.benchmark
Tim Dettmers's avatar
Tim Dettmers committed
572
def test_benchmark_blockwise(dim1, dim2, gtype, optim_name):
573
574
575
    if dim1 == 1 and dim2 == 1:
        return
    p1 = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.1
Tim Dettmers's avatar
Tim Dettmers committed
576
577
578

    bnb_optimizer = str2optimizers[optim_name][1]([p1])

579
    g = torch.randn(dim1, dim2, device="cuda", dtype=gtype) * 0.01
Tim Dettmers's avatar
Tim Dettmers committed
580
    p1.grad = g
581
582
583
    total_steps = 500
    for i in range(total_steps):
        if i == total_steps // 5:
Tim Dettmers's avatar
Tim Dettmers committed
584
585
586
587
588
589
590
            # 100 iterations for burn-in
            torch.cuda.synchronize()
            t0 = time.time()

        bnb_optimizer.step()

    torch.cuda.synchronize()
591
592
    s = time.time() - t0
    print("")
593
594
    params = (total_steps - total_steps // 5) * dim1 * dim2
    print(optim_name, gtype, s, params, s / params)
595
    # assert s < 3.9