test_checkpointing.py 18.5 KB
Newer Older
1
2
import torch
import deepspeed
3
4
from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer
from deepspeed.runtime.zero.stage1 import FP16_DeepSpeedZeroOptimizer_Stage1
5

6
7
from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer
from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer
8
9
10
11
12

import argparse
import pytest
import json
import os
Jeff Rasley's avatar
Jeff Rasley committed
13
import numbers
14
15
16
17
from common import distributed_test
from simple_model import SimpleModel, random_dataloader, args_from_dict


18
19
20
21
22
23
24
25
26
def compare_deepspeed_states(saved_model, loaded_model):
    # These are compared in more depth in other places
    assert hasattr(loaded_model, 'module')

    assert saved_model.csr_tensor_module_names == loaded_model.csr_tensor_module_names
    assert saved_model.skipped_steps == loaded_model.skipped_steps
    assert saved_model.global_steps == loaded_model.global_steps


27
def compare_model_states(saved_model, loaded_model):
28
29
    compare_deepspeed_states(saved_model, loaded_model)

30
    for p0, p1 in zip(saved_model.module.parameters(), loaded_model.module.parameters()):
Jeff Rasley's avatar
Jeff Rasley committed
31
        assert torch.allclose(p0, p1, atol=1e-07), f"FP16 model state {p0} is not equal to {p1}"
32
33
34

    if isinstance(saved_model.optimizer, FP16_DeepSpeedZeroOptimizer):
        for p0, p1 in zip(saved_model.optimizer.single_partition_of_fp32_groups, loaded_model.optimizer.single_partition_of_fp32_groups):
Jeff Rasley's avatar
Jeff Rasley committed
35
            assert torch.allclose(p0, p1, atol=1e-07), f"Fp32 model states {p0} is not equal to {p1}"
36

Jeff Rasley's avatar
Jeff Rasley committed
37
38
39
    elif isinstance(saved_model.optimizer, FP16_DeepSpeedZeroOptimizer_Stage1):
        for partition0, partition1 in zip(saved_model.optimizer.local_sub_partitions_of_fp32_groups, loaded_model.optimizer.local_sub_partitions_of_fp32_groups):
            for p0, p1 in zip(partition0, partition1):
Jeff Rasley's avatar
Jeff Rasley committed
40
                assert torch.allclose(p0, p1, atol=1e-07), f"Fp32 model states {p0} is not equal to {p1}"
Jeff Rasley's avatar
Jeff Rasley committed
41

42
43
    elif isinstance(saved_model.optimizer, FP16_Optimizer):
        for p0, p1 in zip(saved_model.optimizer.fp32_groups_flat, loaded_model.optimizer.fp32_groups_flat):
Jeff Rasley's avatar
Jeff Rasley committed
44
            assert torch.allclose(p0, p1, atol=1e-07), f"FP32 model states {p0} is not equal to {p1}"
45
46
47
48

    elif isinstance(saved_model.optimizer, FP16_UnfusedOptimizer):
        for params0, params1 in zip(saved_model.optimizer.fp32_groups, loaded_model.optimizer.fp32_groups):
            for p0, p1 in zip(params0, params1):
Jeff Rasley's avatar
Jeff Rasley committed
49
                assert torch.allclose(p0, p1, atol=1e-07), f"FP32 model states {p0} is not equal to {p1}"
50
51
    elif isinstance(saved_model.optimizer, torch.optim.Optimizer):
        pass
52
    else:
53
54
        assert False, f'Unexpected Optimizer Type: {saved_model.optimizer}'

55

56
57
58
def compare_optimizer_states(saved_model, loaded_model, hidden_dim, fp16=True):
    saved_optimizer = saved_model.optimizer.optimizer if fp16 else saved_model.optimizer
    loaded_optimizer = loaded_model.optimizer.optimizer if fp16 else loaded_model.optimizer
59

60
61
    for state0, state1 in zip(saved_optimizer.state.values(),
                              loaded_optimizer.state.values()):
62
63
64
65
66
67
68
        for s0, s1 in zip(state0.values(), state1.values()):
            if isinstance(s0, torch.Tensor) and isinstance(s1, torch.Tensor):
                assert torch.equal(s0, s1)
            else:
                assert s0 == s1


Jeff Rasley's avatar
Jeff Rasley committed
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
def compare_lr_scheduler_states(saved_model, loaded_model):
    assert hasattr(saved_model, 'lr_scheduler')
    assert hasattr(loaded_model, 'lr_scheduler')

    saved_scheduler = saved_model.lr_scheduler
    loaded_scheduler = loaded_model.lr_scheduler

    assert hasattr(saved_scheduler, 'state_dict')
    assert hasattr(loaded_scheduler, 'state_dict')

    saved_sd = saved_scheduler.state_dict()
    loaded_sd = loaded_scheduler.state_dict()

    print(f"saved_sd = {saved_sd}")
    print(f"loaded_sd = {loaded_sd}")

    assert saved_sd.keys() == loaded_sd.keys()

    for state0, state1 in zip(saved_sd.values(), loaded_sd.values()):
        if isinstance(state0, numbers.Number) and isinstance(state1, numbers.Number):
            assert state0 == state1


def checkpoint_correctness_verification(args,
93
94
                                        model,
                                        hidden_dim,
Jeff Rasley's avatar
Jeff Rasley committed
95
96
                                        tmpdir,
                                        load_optimizer_states=False,
97
98
99
                                        load_lr_scheduler_states=False,
                                        fp16=True):
    dtype = torch.half if fp16 else torch.float32
Jeff Rasley's avatar
Jeff Rasley committed
100
101
102
    ds_model, _, _, _ = deepspeed.initialize(args=args,
                                             model=model,
                                             model_parameters=model.parameters())
103
104
105
    data_loader = random_dataloader(model=ds_model,
                                    total_samples=50,
                                    hidden_dim=hidden_dim,
106
107
                                    device=ds_model.device,
                                    dtype=dtype)
108
109
110
111
112
113
114
    for n, batch in enumerate(data_loader):
        loss = ds_model(batch[0], batch[1])
        ds_model.backward(loss)
        ds_model.step()

    trained_model = ds_model

Jeff Rasley's avatar
Jeff Rasley committed
115
    save_folder = os.path.join(tmpdir, 'saved_checkpoint')
116
117
118
119
    save_tag = '1'

    trained_model.save_checkpoint(save_folder, save_tag)

Jeff Rasley's avatar
Jeff Rasley committed
120
121
122
    loaded_model, _, _, _ = deepspeed.initialize(args=args,
                                                 model=model,
                                                 model_parameters=model.parameters())
123
124
125

    loaded_model.load_checkpoint(save_folder,
                                 save_tag,
Jeff Rasley's avatar
Jeff Rasley committed
126
127
                                 load_optimizer_states=load_optimizer_states,
                                 load_lr_scheduler_states=load_lr_scheduler_states)
128

Jeff Rasley's avatar
Jeff Rasley committed
129
    compare_model_states(trained_model, loaded_model)
130

131
    if load_optimizer_states:
132
        compare_optimizer_states(trained_model, loaded_model, hidden_dim, fp16)
Jeff Rasley's avatar
Jeff Rasley committed
133
134
135

    if load_lr_scheduler_states:
        compare_lr_scheduler_states(trained_model, loaded_model)
136
137
138
139
140
141
142
143
144


def test_checkpoint_unfused_optimizer(tmpdir):
    config_dict = {
        "train_batch_size": 2,
        "steps_per_print": 1,
        "optimizer": {
            "type": "Lamb",
            "params": {
145
                "lr": 0.00015
146
147
            }
        },
148
        "gradient_clipping": 1.0,
149
150
        "fp16": {
            "enabled": True
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
        },
        "scheduler": {
            "type": "OneCycle",
            "params": {
                "cycle_first_step_size": 1000,
                "cycle_first_stair_count": 500,
                "cycle_second_step_size": 1000,
                "cycle_second_stair_count": 500,
                "decay_step_size": 1000,
                "cycle_min_lr": 0.0001,
                "cycle_max_lr": 0.0010,
                "decay_lr_rate": 0.001,
                "cycle_min_mom": 0.85,
                "cycle_max_mom": 0.99,
                "decay_mom_rate": 0.0
            }
167
168
169
170
171
172
173
174
175
176
177
178
179
        }
    }

    args = args_from_dict(tmpdir, config_dict)
    hidden_dim = 10

    model = SimpleModel(hidden_dim, empty_grad=False)

    @distributed_test(world_size=[2])
    def _test_checkpoint_unfused_optimizer(args,
                                           model,
                                           hidden_dim,
                                           load_optimizer_states):
Jeff Rasley's avatar
Jeff Rasley committed
180
        checkpoint_correctness_verification(args,
181
182
                                            model,
                                            hidden_dim,
Jeff Rasley's avatar
Jeff Rasley committed
183
                                            tmpdir,
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
                                            load_optimizer_states=load_optimizer_states)

    _test_checkpoint_unfused_optimizer(args=args,
                                       model=model,
                                       hidden_dim=hidden_dim,
                                       load_optimizer_states=True)
    _test_checkpoint_unfused_optimizer(args=args,
                                       model=model,
                                       hidden_dim=hidden_dim,
                                       load_optimizer_states=False)


def test_checkpoint_fused_optimizer(tmpdir):
    config_dict = {
        "train_batch_size": 2,
        "steps_per_print": 1,
        "optimizer": {
            "type": "Adam",
            "params": {
                "lr": 0.00015,
                "betas": [0.8,
                          0.999],
                "eps": 1e-8,
                "weight_decay": 3e-7
            }
        },
        "fp16": {
            "enabled": True
        }
    }

    args = args_from_dict(tmpdir, config_dict)
    hidden_dim = 10

    model = SimpleModel(hidden_dim, empty_grad=False)

    @distributed_test(world_size=[2])
    def _test_checkpoint_fused_optimizer(args, model, hidden_dim, load_optimizer_states):
Jeff Rasley's avatar
Jeff Rasley committed
222
        checkpoint_correctness_verification(args,
223
224
                                            model,
                                            hidden_dim,
Jeff Rasley's avatar
Jeff Rasley committed
225
                                            tmpdir,
226
227
228
229
230
231
232
233
234
235
236
237
                                            load_optimizer_states=load_optimizer_states)

    _test_checkpoint_fused_optimizer(args=args,
                                     model=model,
                                     hidden_dim=hidden_dim,
                                     load_optimizer_states=True)
    _test_checkpoint_fused_optimizer(args=args,
                                     model=model,
                                     hidden_dim=hidden_dim,
                                     load_optimizer_states=False)


Jeff Rasley's avatar
Jeff Rasley committed
238
239
240
241
242
243
244
245
246
247
248
249
250
@pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer',
                         [
                             (1,
                              False,
                              'Adam'),
                             (2,
                              False,
                              'Adam'),
                             (2,
                              True,
                              'deepspeed_adam'),
                         ])
def test_checkpoint_zero_optimizer(tmpdir, zero_stage, use_cpu_offload, adam_optimizer):
251
252
253
254
    config_dict = {
        "train_batch_size": 2,
        "steps_per_print": 1,
        "optimizer": {
Jeff Rasley's avatar
Jeff Rasley committed
255
            "type": adam_optimizer,
256
257
258
259
260
261
262
263
264
265
266
            "params": {
                "lr": 0.00015,
                "betas": [0.8,
                          0.999],
                "eps": 1e-8,
                "weight_decay": 3e-7
            }
        },
        "fp16": {
            "enabled": True
        },
Jeff Rasley's avatar
Jeff Rasley committed
267
        "zero_optimization": {
Jeff Rasley's avatar
Jeff Rasley committed
268
269
270
            "stage": zero_stage,
            "cpu_offload": use_cpu_offload
        }
271
272
273
274
275
276
277
278
    }
    args = args_from_dict(tmpdir, config_dict)
    hidden_dim = 10

    model = SimpleModel(hidden_dim, empty_grad=False)

    @distributed_test(world_size=[2])
    def _test_checkpoint_zero_optimizer(args, model, hidden_dim, load_optimizer_states):
Jeff Rasley's avatar
Jeff Rasley committed
279
        checkpoint_correctness_verification(args,
280
281
                                            model,
                                            hidden_dim,
Jeff Rasley's avatar
Jeff Rasley committed
282
                                            tmpdir,
283
284
285
286
287
288
                                            load_optimizer_states=load_optimizer_states)

    _test_checkpoint_zero_optimizer(args=args,
                                    model=model,
                                    hidden_dim=hidden_dim,
                                    load_optimizer_states=True)
Jeff Rasley's avatar
Jeff Rasley committed
289
290


Jeff Rasley's avatar
Jeff Rasley committed
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
@pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer',
                         [
                             (1,
                              False,
                              "Adam"),
                             (2,
                              False,
                              "Adam"),
                             (2,
                              True,
                              'deepspeed_adam'),
                         ])
def test_checkpoint_zero_no_optimizer(tmpdir,
                                      zero_stage,
                                      use_cpu_offload,
                                      adam_optimizer):
Jeff Rasley's avatar
Jeff Rasley committed
307
308
309
310
    config_dict = {
        "train_batch_size": 2,
        "steps_per_print": 1,
        "optimizer": {
Jeff Rasley's avatar
Jeff Rasley committed
311
            "type": adam_optimizer,
Jeff Rasley's avatar
Jeff Rasley committed
312
313
314
315
316
317
318
319
320
321
322
323
            "params": {
                "lr": 0.00015,
                "betas": [0.8,
                          0.999],
                "eps": 1e-8,
                "weight_decay": 3e-7
            }
        },
        "fp16": {
            "enabled": True
        },
        "zero_optimization": {
Jeff Rasley's avatar
Jeff Rasley committed
324
325
326
            "stage": zero_stage,
            "cpu_offload": use_cpu_offload
        }
Jeff Rasley's avatar
Jeff Rasley committed
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
    }
    args = args_from_dict(tmpdir, config_dict)
    hidden_dim = 10

    model = SimpleModel(hidden_dim, empty_grad=False)

    @distributed_test(world_size=[2])
    def _test_checkpoint_zero_no_optimizer(args,
                                           model,
                                           hidden_dim,
                                           load_optimizer_states):
        checkpoint_correctness_verification(args,
                                            model,
                                            hidden_dim,
                                            tmpdir,
                                            load_optimizer_states=load_optimizer_states)

    _test_checkpoint_zero_no_optimizer(args=args,
                                       model=model,
                                       hidden_dim=hidden_dim,
                                       load_optimizer_states=False)


Jeff Rasley's avatar
Jeff Rasley committed
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
@pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer',
                         [
                             (0,
                              False,
                              'Adam'),
                             (1,
                              False,
                              'Adam'),
                             (2,
                              False,
                              'Adam'),
                             (2,
                              True,
                              'deepspeed_adam'),
                         ])
def test_checkpoint_lr_scheduler(tmpdir, zero_stage, use_cpu_offload, adam_optimizer):
Jeff Rasley's avatar
Jeff Rasley committed
366
367
368
369
    config_dict = {
        "train_batch_size": 2,
        "steps_per_print": 1,
        "optimizer": {
Jeff Rasley's avatar
Jeff Rasley committed
370
            "type": adam_optimizer,
Jeff Rasley's avatar
Jeff Rasley committed
371
372
373
374
375
376
377
378
379
380
381
382
            "params": {
                "lr": 0.00015,
                "betas": [0.8,
                          0.999],
                "eps": 1e-8,
                "weight_decay": 3e-7
            }
        },
        "fp16": {
            "enabled": True
        },
        "zero_optimization": {
Jeff Rasley's avatar
Jeff Rasley committed
383
384
            "stage": zero_stage,
            "cpu_offload": use_cpu_offload
Jeff Rasley's avatar
Jeff Rasley committed
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
        },
        "scheduler": {
            "type": "WarmupLR",
            "params": {
                "warmup_min_lr": 0,
                "warmup_max_lr": 0.001,
                "warmup_num_steps": 1000
            }
        }
    }
    args = args_from_dict(tmpdir, config_dict)
    hidden_dim = 10

    model = SimpleModel(hidden_dim, empty_grad=False)

    @distributed_test(world_size=[2])
    def _test_checkpoint_lr_scheduler(args,
                                      model,
                                      hidden_dim,
                                      load_optimizer_states,
                                      load_lr_scheduler_states):
        checkpoint_correctness_verification(
            args,
            model,
            hidden_dim,
            tmpdir,
            load_optimizer_states=load_optimizer_states,
            load_lr_scheduler_states=load_lr_scheduler_states)

    _test_checkpoint_lr_scheduler(args=args,
                                  model=model,
                                  hidden_dim=hidden_dim,
                                  load_optimizer_states=False,
                                  load_lr_scheduler_states=True)


Jeff Rasley's avatar
Jeff Rasley committed
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
@pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer',
                         [
                             (0,
                              False,
                              'Adam'),
                             (1,
                              False,
                              'Adam'),
                             (2,
                              False,
                              'Adam'),
                             (2,
                              True,
                              'deepspeed_adam'),
                         ])
def test_checkpoint_no_lr_scheduler(tmpdir, zero_stage, use_cpu_offload, adam_optimizer):
Jeff Rasley's avatar
Jeff Rasley committed
437
438
439
440
    config_dict = {
        "train_batch_size": 2,
        "steps_per_print": 1,
        "optimizer": {
Jeff Rasley's avatar
Jeff Rasley committed
441
            "type": adam_optimizer,
Jeff Rasley's avatar
Jeff Rasley committed
442
443
444
445
446
447
448
449
            "params": {
                "lr": 1e-5
            }
        },
        "fp16": {
            "enabled": True
        },
        "zero_optimization": {
Jeff Rasley's avatar
Jeff Rasley committed
450
451
            "stage": zero_stage,
            "cpu_offload": use_cpu_offload
Jeff Rasley's avatar
Jeff Rasley committed
452
453
454
455
456
457
458
459
        },
        "scheduler": {
            "type": "WarmupLR",
            "params": {
                "warmup_min_lr": 0,
                "warmup_max_lr": 0.001,
                "warmup_num_steps": 1000
            }
Jeff Rasley's avatar
Jeff Rasley committed
460
        },
Jeff Rasley's avatar
Jeff Rasley committed
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
    }
    args = args_from_dict(tmpdir, config_dict)
    hidden_dim = 10

    model = SimpleModel(hidden_dim, empty_grad=False)

    @distributed_test(world_size=[2])
    def _test_checkpoint_no_lr_scheduler(args,
                                         model,
                                         hidden_dim,
                                         load_optimizer_states,
                                         load_lr_scheduler_states):
        checkpoint_correctness_verification(
            args,
            model,
            hidden_dim,
            tmpdir,
            load_optimizer_states=load_optimizer_states,
            load_lr_scheduler_states=load_lr_scheduler_states)

    _test_checkpoint_no_lr_scheduler(args=args,
                                     model=model,
                                     hidden_dim=hidden_dim,
                                     load_optimizer_states=False,
                                     load_lr_scheduler_states=False)
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516


def test_checkpoint_fp32_optimizer(tmpdir):
    config_dict = {
        "train_batch_size": 2,
        "steps_per_print": 1,
        "optimizer": {
            "type": "Adam",
            "params": {
                "lr": 0.00015,
                "betas": [0.8,
                          0.999],
                "eps": 1e-8,
                "weight_decay": 3e-7
            }
        },
        "fp16": {
            "enabled": False
        }
    }

    args = args_from_dict(tmpdir, config_dict)
    hidden_dim = 10

    model = SimpleModel(hidden_dim, empty_grad=False)

    @distributed_test(world_size=[2])
    def _test_checkpoint_fp32_optimizer(args, model, hidden_dim):
        checkpoint_correctness_verification(args, model, hidden_dim, tmpdir, fp16=False)

    _test_checkpoint_fp32_optimizer(args=args, model=model, hidden_dim=hidden_dim)