test_deepspeed.py 37 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import dataclasses
16
import io
17
import json
18
19
import os
import unittest
20
from copy import deepcopy
21

22
from parameterized import parameterized
23
from transformers import AutoModel, TrainingArguments, is_torch_available, logging
24
from transformers.deepspeed import HfDeepSpeedConfig, is_deepspeed_available
25
from transformers.file_utils import WEIGHTS_NAME
26
from transformers.testing_utils import (
27
    CaptureLogger,
28
    CaptureStderr,
29
    ExtendSysPath,
30
    LoggingLevel,
31
32
33
    TestCasePlus,
    execute_subprocess_async,
    get_gpu_count,
34
    mockenv_context,
35
    require_deepspeed,
36
37
38
39
    require_torch_gpu,
    require_torch_multi_gpu,
    slow,
)
40
from transformers.trainer_utils import get_last_checkpoint, set_seed
41
42


43
44
45
tests_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
root_dir = os.path.dirname(tests_dir)
with ExtendSysPath(tests_dir):
46
47
48
    from test_trainer import TrainerIntegrationCommon  # noqa

    if is_torch_available():
49
        from test_trainer import RegressionModelConfig, RegressionPreTrainedModel, get_regression_trainer  # noqa
50
51


52
set_seed(42)
53

54
T5_SMALL = "t5-small"
55
T5_TINY = "patrickvonplaten/t5-tiny-random"
56
GPT2_TINY = "sshleifer/tiny-gpt2"
57
58


59
60
61
62
63
def load_json(path):
    with open(path) as f:
        return json.load(f)


64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def require_deepspeed_aio(test_case):
    """
    Decorator marking a test that requires deepspeed aio (nvme)
    """
    if not is_deepspeed_available():
        return unittest.skip("test requires deepspeed")(test_case)

    import deepspeed
    from deepspeed.ops.aio import AsyncIOBuilder

    if not deepspeed.ops.__compatible_ops__[AsyncIOBuilder.NAME]:
        return unittest.skip("test requires deepspeed async-io")(test_case)
    else:
        return test_case


80
81
if is_deepspeed_available():
    from deepspeed.utils import logger as deepspeed_logger  # noqa
82
    from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
83
    from transformers.deepspeed import deepspeed_config, is_deepspeed_zero3_enabled  # noqa
84

85
86
87
88
89
90
91
92
93
94

def get_launcher(distributed=False):
    # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
    # - it won't be able to handle that
    # 2. for now testing with just 2 gpus max (since some quality tests may give different
    # results with mode gpus because we use very little data)
    num_gpus = min(2, get_gpu_count()) if distributed else 1
    return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()


95
96
97
98
99
ZERO2 = "zero2"
ZERO3 = "zero3"
stages = [ZERO2, ZERO3]


100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
@require_deepspeed
@require_torch_gpu
class CoreIntegrationDeepSpeed(TestCasePlus, TrainerIntegrationCommon):
    """
    Testing non-Trainer DeepSpeed integration
    """

    def setUp(self):
        super().setUp()

        self.dist_env_1_gpu = dict(
            MASTER_ADDR="localhost", MASTER_PORT="10999", RANK="0", LOCAL_RANK="0", WORLD_SIZE="1"
        )

    def test_init_zero3(self):
        # test that zero.Init() works correctly under zero3
        ds_config = {
            "train_batch_size": 1,
            "zero_optimization": {
                "stage": 3,
            },
        }

        dschf = HfDeepSpeedConfig(ds_config)

        self.assertTrue(dschf.is_zero3())
        self.assertTrue(is_deepspeed_zero3_enabled())

        with LoggingLevel(logging.INFO):
            with mockenv_context(**self.dist_env_1_gpu):
                logger = logging.get_logger("transformers.modeling_utils")
                with CaptureLogger(logger) as cl:
                    AutoModel.from_pretrained(T5_TINY)
        self.assertIn("Detected DeepSpeed ZeRO-3", cl.out)

        # now remove zero optimization
        del ds_config["zero_optimization"]
        dschf = HfDeepSpeedConfig(ds_config)

        self.assertFalse(dschf.is_zero3())
        self.assertFalse(is_deepspeed_zero3_enabled())

        with LoggingLevel(logging.INFO):
            with mockenv_context(**self.dist_env_1_gpu):
                logger = logging.get_logger("transformers.modeling_utils")
                with CaptureLogger(logger) as cl:
                    AutoModel.from_pretrained(T5_TINY)
        self.assertNotIn("Detected DeepSpeed ZeRO-3", cl.out)


150
@require_deepspeed
151
@require_torch_gpu
152
153
154
155
156
class TrainerIntegrationDeepSpeed(TestCasePlus, TrainerIntegrationCommon):
    """

    This class is for testing directly via get_regression_trainer

157
158
159
160
161
162
163
164
165
166
167
168
169
    It mixes in `TrainerIntegrationCommon` which already has a lot of helper validation methods
    which we can re-use here.

    Important: this class' setup can only work with a single gpu because it runs within the current
    pytest worker. For multi-gpu tests use TestDeepSpeedWithLauncher.

    Note: if any of the tests of this class get run there will be at least one gpu occupied by them
    until this pytest worker exits. This is because the gpu memory allocated by the cuda-kernels
    won't be released until this pytest worker exits.

    This may appear as some run-away tests if you watch `nvidia-smi` while other tests that fork new
    processes are run. So there will be one or two "stale" processes reported in `nvidia-smi`. This
    is not a bug.
170
    """
171
172
173

    def setUp(self):
        super().setUp()
174
175
176
177
178

        args = TrainingArguments(".")
        self.n_epochs = args.num_train_epochs
        self.batch_size = args.train_batch_size

179
180
181
        self.dist_env_1_gpu = dict(
            MASTER_ADDR="localhost", MASTER_PORT="10999", RANK="0", LOCAL_RANK="0", WORLD_SIZE="1"
        )
182

183
184
185
186
        self.ds_config_file = dict(
            zero2=f"{self.test_file_dir_str}/ds_config_zero2.json",
            zero3=f"{self.test_file_dir_str}/ds_config_zero3.json",
        )
187
188
189

        # use self.get_config_dict(stage) to use these to ensure the original is not modified
        with io.open(self.ds_config_file[ZERO2], "r", encoding="utf-8") as f:
190
191
192
            config_zero2 = json.load(f)
            # by default use fp16
            config_zero2["fp16"]["enabled"] = True
193
        with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
194
195
196
            config_zero3 = json.load(f)
            # by default use fp16
            config_zero3["fp16"]["enabled"] = True
197
198
            # This setting slows things down, so don't enable it by default unless needed by a test.
            # It's in the file as a demo for users since we want everything to work out of the box even if slower.
199
200
201
202
203
204
205
206
207
            config_zero3["zero_optimization"]["stage3_gather_fp16_weights_on_model_save"] = False
        self.ds_config_dict = dict(
            zero2=config_zero2,
            zero3=config_zero3,
        )

    def get_config_dict(self, stage):
        # As some tests modify the dict, always make a copy
        return deepcopy(self.ds_config_dict[stage])
208
209

    # --- These tests are enough to run on one of zero stages --- #
210

211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
    def test_hf_ds_config_mismatch(self):

        ds_config = self.get_config_dict(ZERO2)

        # Purposefully configure these values to mismatch TrainingArguments values.
        # This currently doesn't cover all keys (but it could)
        per_device_train_batch_size = 2
        ds_config["train_micro_batch_size_per_gpu"] = per_device_train_batch_size + 2

        ds_config["train_batch_size"] = 1000

        gradient_accumulation_steps = 2
        ds_config["gradient_accumulation_steps"] = gradient_accumulation_steps + 2

        max_grad_norm = 1.0
        ds_config["gradient_clipping"] = max_grad_norm + 0.1

        adam_beta1, adam_beta2 = 0.9, 0.99
        ds_config["optimizer"]["params"]["betas"] = [adam_beta1 - 0.1, adam_beta2 - 0.1]

        fp16 = True
        ds_config["fp16"]["enabled"] = not fp16

        keys = [
            "per_device_train_batch_size",
            "train_batch_size",
            "gradient_accumulation_steps",
            "max_grad_norm",
            "betas",
            "fp16",
        ]

        with mockenv_context(**self.dist_env_1_gpu):
            trainer = get_regression_trainer(
                local_rank=0,
                fp16=fp16,
                deepspeed=ds_config,
                per_device_train_batch_size=per_device_train_batch_size,
                gradient_accumulation_steps=gradient_accumulation_steps,
                max_grad_norm=max_grad_norm,
                adam_beta1=adam_beta1,
                adam_beta2=adam_beta2,
            )
            with self.assertRaises(Exception) as context:
                trainer.train()

        for key in keys:
            self.assertTrue(
                key in str(context.exception),
                f"{key} is not in the exception message:\n{context.exception}",
            )

263
264
265
266
267
268
269
270
271
    # Test various combos
    # 1. DS scheduler + DS optimizer: this is already tested by most other tests
    # 2. HF scheduler + HF optimizer:
    # 3. DS scheduler + HF optimizer:
    # 4. HF scheduler + DS optimizer:

    def test_hf_scheduler_hf_optimizer(self):
        a = 0
        with mockenv_context(**self.dist_env_1_gpu):
272
273
274
            ds_config_zero2_dict = self.get_config_dict(ZERO2)
            del ds_config_zero2_dict["optimizer"]  # force default HF Trainer optimizer
            del ds_config_zero2_dict["scheduler"]  # force default HF Trainer scheduler
275
            ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
276
            ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
277
            trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
278
279
280
281
282
283
284
            trainer.train()
        new_a = trainer.model.a.item()
        self.assertNotEqual(new_a, a)

    def test_ds_scheduler_hf_optimizer(self):
        a = 0
        with mockenv_context(**self.dist_env_1_gpu):
285
286
            ds_config_zero2_dict = self.get_config_dict(ZERO2)
            del ds_config_zero2_dict["optimizer"]  # force default HF Trainer optimizer
287
            ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
288
            ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
289
            trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
290
291
292
293
294
            trainer.train()
        new_a = trainer.model.a.item()
        self.assertNotEqual(new_a, a)

    def test_hf_scheduler_ds_optimizer(self):
295
        a = 0
296
        with mockenv_context(**self.dist_env_1_gpu):
297
298
            ds_config_zero2_dict = self.get_config_dict(ZERO2)
            del ds_config_zero2_dict["scheduler"]  # force default HF Trainer scheduler
299
            ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
300
            ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
301
            trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
302
303
304
            trainer.train()
        new_a = trainer.model.a.item()
        self.assertNotEqual(new_a, a)
305

306
    @require_deepspeed_aio
307
308
309
310
311
312
313
314
315
    def test_stage3_nvme_offload(self):
        with mockenv_context(**self.dist_env_1_gpu):
            # this actually doesn't have to be on NVMe, any storage will do since this test only
            # runs a simple check that we can use some directory as if it were NVMe
            nvme_path = self.get_auto_remove_tmp_dir()
            nvme_config = dict(device="nvme", nvme_path=nvme_path)
            ds_config_zero3_dict = self.get_config_dict(ZERO3)
            ds_config_zero3_dict["zero_optimization"]["offload_optimizer"] = nvme_config
            ds_config_zero3_dict["zero_optimization"]["offload_param"] = nvme_config
316
            trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=ds_config_zero3_dict)
317
            with CaptureLogger(deepspeed_logger) as cl:
318
                trainer.train()
319
            self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
320
321
322
323
324

    # --- These tests need to run on both zero stages --- #

    @parameterized.expand(stages)
    def test_hf_optimizer_with_offload(self, stage):
325
        # non-DS optimizers can be used with ZERO-offload (as long as they have both CPU and GPU implementation (except LAMB))
326
327
328
        ds_config_dict = self.get_config_dict(stage)
        del ds_config_dict["optimizer"]  # force default HF Trainer optimizer
        # force cpu offload
329
        ds_config_dict["zero_optimization"]["offload_optimizer"]["device"] = "cpu"
330
        with mockenv_context(**self.dist_env_1_gpu):
331
            trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=ds_config_dict)
332
            with CaptureLogger(deepspeed_logger) as cl:
333
                trainer.train()
334
            self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
335

336
337
338
339
340
341
342
    @parameterized.expand(stages)
    def test_fake_notebook_no_launcher(self, stage):
        # this setup emulates a notebook where a launcher needs to be emulated by hand

        # note that unittest resets sys.stdout each test, so `CaptureStd` will work here to capture
        # DeepSpeed log if this test happens to run first in this pytest worker. But it will fail if
        # it's run not as a first test as `sys.stdout` will no longer be the same. So we either have
343
344
        # to reset `deepspeed_logger.handlers[0].setStream(sys.stdout)` or directly capture from the deepspeed_logger.
        with mockenv_context(**self.dist_env_1_gpu):
345
            trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=self.get_config_dict(stage))
346
            with CaptureLogger(deepspeed_logger) as cl:
347
                trainer.train()
348
            self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
349
350
351

    @parameterized.expand(stages)
    def test_early_get_last_lr(self, stage):
352
353
354
355
356
357
358
359
360
361
362
363
364
        # with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may
        # not run for the first few dozen steps while loss scale is too large, and thus during
        # that time `get_last_lr` will fail if called during that warm up stage,
        #
        # setting `logging_steps=1` forces an early `trainer._maybe_log_save_evaluate()` which calls
        # `self.lr_scheduler.get_last_lr()` and originally it'd fail on the very first step.
        with mockenv_context(**self.dist_env_1_gpu):
            a = b = 0.0
            trainer = get_regression_trainer(
                a=a,
                b=b,
                local_rank=0,
                train_len=8,
365
                fp16=True,
366
                deepspeed=self.get_config_dict(stage),
367
368
369
370
                per_device_train_batch_size=8,
                logging_steps=1,
            )
            trainer.train()
371
372
373
            post_train_a = trainer.model.a.item()

            # XXX: for some reason the following check fails with zero3 - not a broken but a
374
375
376
377
378
            # different qualitative outcome - as if optimizer did run
            # oddly getting 1.0 for both a and b from 0.0 - there is a bug somewhere
            # print(trainer.model.a.item())
            # print(trainer.model.b.item())
            # need to investigate at some point
379
380
            if stage == ZERO3:
                return
381
382
383

            # it's enough that train didn't fail for this test, but we must check that
            # optimizer/scheduler didn't run (since if it did this test isn't testing the right thing)
384
            self.assertEqual(post_train_a, a)
385

386
387
    @parameterized.expand(stages)
    def test_gradient_accumulation(self, stage):
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
        # this test measures that we get identical weights and similar loss with:
        # 1. per_device_train_batch_size=8, gradient_accumulation_steps=1
        # 2. per_device_train_batch_size=4, gradient_accumulation_steps=2
        # since the 2nd should produce the effective batch of 1st, with the same results
        #
        # I can get an identical loss for a small train_len=32, plus the power of the initial
        # dynamic loss scale value set to:
        #   "fp16.initial_scale_power": 1
        # plus having the same WarmupLR's warmup_min_lr == warmup_max_lr in the config file
        # but for some reason going to train_len=64 the weights, weights start to mismatch with this setup.
        # the culprit seems to be `initial_scale_power` - putting it back to its default 32 keeps the weights identical

        train_len = 64
        a = b = 0.0

403
404
405
406
407
408
409
410
411
        kwargs = dict(
            a=a,
            b=b,
            local_rank=0,
            train_len=train_len,
            fp16=True,
            deepspeed=self.get_config_dict(stage),
        )

412
413
        with mockenv_context(**self.dist_env_1_gpu):
            no_grad_accum_trainer = get_regression_trainer(
414
415
                **kwargs,
                per_device_train_batch_size=16,
416
417
418
419
420
421
422
423
424
425
426
                gradient_accumulation_steps=1,
            )
            no_grad_accum_result = no_grad_accum_trainer.train()
            no_grad_accum_loss = no_grad_accum_result.training_loss
            no_grad_accum_a = no_grad_accum_trainer.model.a.item()
            no_grad_accum_b = no_grad_accum_trainer.model.b.item()
            # make sure the optimizer kicked in - if it hasn't changed from the original value of a then make train_len bigger
            self.assertNotEqual(no_grad_accum_a, a)

        with mockenv_context(**self.dist_env_1_gpu):
            yes_grad_accum_trainer = get_regression_trainer(
427
                **kwargs,
428
                per_device_train_batch_size=4,
429
                gradient_accumulation_steps=4,
430
431
432
433
434
435
436
            )
            yes_grad_accum_result = yes_grad_accum_trainer.train()
            yes_grad_accum_loss = yes_grad_accum_result.training_loss
            yes_grad_accum_a = yes_grad_accum_trainer.model.a.item()
            yes_grad_accum_b = yes_grad_accum_trainer.model.b.item()
            self.assertNotEqual(yes_grad_accum_a, a)

437
438
439
440
        # training with half the batch size but accumulation steps as 2 should give the same
        # weights, but sometimes get a slight difference still of 1e-6
        self.assertAlmostEqual(no_grad_accum_a, yes_grad_accum_a, places=5)
        self.assertAlmostEqual(no_grad_accum_b, yes_grad_accum_b, places=5)
441
442

        # see the note above how to get identical loss on a small bs
443
        self.assertAlmostEqual(no_grad_accum_loss, yes_grad_accum_loss, places=2)
444

445
    def check_saved_checkpoints_deepspeed(self, output_dir, freq, total, stage):
446
447
448
        # adapted from TrainerIntegrationCommon.check_saved_checkpoints

        file_list = [WEIGHTS_NAME, "training_args.bin", "trainer_state.json", "config.json"]
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465

        if stage == ZERO2:
            ds_file_list = ["mp_rank_00_model_states.pt"]
        elif stage == ZERO3:
            ds_file_list = ["zero_pp_rank_0_mp_rank_00_model_states.pt"]
        else:
            raise ValueError(f"unknown stage {stage}")

        # XXX: this can be recoded and then removed once we require deepspeed>0.3.13
        from packaging import version

        import deepspeed

        if version.parse(deepspeed.__version__) > version.parse("0.3.13"):
            ds_file_list.append("zero_pp_rank_0_mp_rank_00_optim_states.pt")
        else:
            ds_file_list.append("zero_pp_rank_0_mp_rank_00optim_states.pt")
466
467
468

        for step in range(freq, total, freq):
            checkpoint = os.path.join(output_dir, f"checkpoint-{step}")
469
            self.assertTrue(os.path.isdir(checkpoint), f"[{stage}] {checkpoint} dir is not found")
470
471
472

            # common files
            for filename in file_list:
473
474
                path = os.path.join(checkpoint, filename)
                self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found")
475
476
477
478
479
480

            # ds files
            ds_path = os.path.join(checkpoint, f"global_step{step}")
            for filename in ds_file_list:
                # filename = os.path.join(path, filename)
                # print(filename)
481
482
                path = os.path.join(ds_path, filename)
                self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found")
483

484
485
    @parameterized.expand(stages)
    def test_save_checkpoints(self, stage):
486
487
        # adapted from  TrainerIntegrationTest.test_save_checkpoints

488
        freq = 5
489
        output_dir = self.get_auto_remove_tmp_dir()
490
        ds_config_dict = self.get_config_dict(stage)
491
        ds_config_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
492
493
        if stage == ZERO3:
            ds_config_dict["zero_optimization"]["stage3_gather_fp16_weights_on_model_save"] = True
494
495
496
497
498
499

        # save checkpoints
        with mockenv_context(**self.dist_env_1_gpu):
            trainer = get_regression_trainer(
                output_dir=output_dir,
                save_steps=freq,
500
                fp16=True,
501
502
503
504
505
                deepspeed=ds_config_dict,
            )
            trainer.train()

        total = int(self.n_epochs * 64 / self.batch_size)
506
        self.check_saved_checkpoints_deepspeed(output_dir, freq, total, stage)
507

508
509
510
511
512
513
    @parameterized.expand(stages)
    def test_can_resume_training_errors(self, stage):

        with mockenv_context(**self.dist_env_1_gpu):
            ds_config_dict = self.get_config_dict(stage)
            output_dir = self.get_auto_remove_tmp_dir()
514
            trainer = get_regression_trainer(output_dir=output_dir, fp16=True, deepspeed=ds_config_dict)
515
516
517
518
519
520
521
522

            # 1. fail to find any checkpoint - due a fresh output_dir
            with self.assertRaises(Exception) as context:
                trainer.train(resume_from_checkpoint=True)
            self.assertTrue(
                "No valid checkpoint found in output directory" in str(context.exception),
                f"got exception: {context.exception}",
            )
523

524
525
526
527
528
529
530
531
532
533
534
535
            # 2. fail to find a bogus checkpoint
            with self.assertRaises(Exception) as context:
                checkpoint = os.path.join(output_dir, "checkpoint-5")
                trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus")
            self.assertTrue(
                "Can't find a valid checkpoint at" in str(context.exception), f"got exception: {context.exception}"
            )

    @parameterized.expand(stages)
    def test_can_resume_training_normal(self, stage):
        # adapted from TrainerIntegrationTest.test_can_resume_training
        # test normal resume for each stage separately, error-handling is tested in a different test
536
        output_dir = self.get_auto_remove_tmp_dir()
537
        ds_config_dict = self.get_config_dict(stage)
538
        ds_config_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
539
540
541
        if stage == ZERO3:
            ds_config_dict["zero_optimization"]["stage3_gather_fp16_weights_on_model_save"] = True

542
543
544
        kwargs = dict(
            output_dir=output_dir, train_len=128, save_steps=5, learning_rate=0.1, fp16=True, deepspeed=ds_config_dict
        )
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576

        with mockenv_context(**self.dist_env_1_gpu):
            trainer = get_regression_trainer(**kwargs)
            trainer.train()
            (a, b) = trainer.model.a.item(), trainer.model.b.item()
            state = dataclasses.asdict(trainer.state)

            checkpoint = os.path.join(output_dir, "checkpoint-5")

            # Reinitialize trainer
            trainer = get_regression_trainer(**kwargs)

            trainer.train(resume_from_checkpoint=checkpoint)
            (a1, b1) = trainer.model.a.item(), trainer.model.b.item()
            state1 = dataclasses.asdict(trainer.state)
            self.assertEqual(a, a1)
            self.assertEqual(b, b1)
            self.check_trainer_state_are_the_same(state, state1)

            # Now check with a later checkpoint that it also works when we span over one epoch
            checkpoint = os.path.join(output_dir, "checkpoint-15")

            # Reinitialize trainer and load model
            trainer = get_regression_trainer(**kwargs)

            trainer.train(resume_from_checkpoint=checkpoint)
            (a1, b1) = trainer.model.a.item(), trainer.model.b.item()
            state1 = dataclasses.asdict(trainer.state)
            self.assertEqual(a, a1)
            self.assertEqual(b, b1)
            self.check_trainer_state_are_the_same(state, state1)

577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
    @parameterized.expand(stages)
    def test_load_state_dict_from_zero_checkpoint(self, stage):
        # test that we can load fp32 weights directly from the zero checkpoint into the current model

        output_dir = self.get_auto_remove_tmp_dir()  # "./xxx", after=False, before=False)

        ds_config_dict = self.get_config_dict(stage)

        kwargs = dict(
            output_dir=output_dir,
            train_len=4,
            per_device_train_batch_size=4,
            num_train_epochs=1,
            save_strategy="steps",
            save_steps=1,
            learning_rate=0.1,
            fp16=True,
            deepspeed=ds_config_dict,
        )

        with mockenv_context(**self.dist_env_1_gpu):
            trainer = get_regression_trainer(**kwargs)
            trainer.train()
            (a, b) = trainer.model.a.item(), trainer.model.b.item()
            state = dataclasses.asdict(trainer.state)

            checkpoint_dir = get_last_checkpoint(output_dir)
            model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)

            (a1, b1) = model.a.item(), model.b.item()
            state1 = dataclasses.asdict(trainer.state)
            self.assertEqual(a, a1)
            self.assertEqual(b, b1)
            self.check_trainer_state_are_the_same(state, state1)

612
613
614
615
    def test_config_object(self):
        # test that we can switch from zero2 to zero3 in the same process for example
        # test is_zero, etc.
        output_dir = self.get_auto_remove_tmp_dir()
616
        kwargs = dict(output_dir=output_dir, train_len=8, fp16=True)
617

618
619
        ds_config_zero3_dict = self.get_config_dict("zero3")
        ds_config_zero2_dict = self.get_config_dict("zero2")
620

621
        with mockenv_context(**self.dist_env_1_gpu):
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
            trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs)
            self.assertTrue(is_deepspeed_zero3_enabled())

            # test we can repeat that and with train this time
            trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs)
            trainer.train()
            self.assertTrue(is_deepspeed_zero3_enabled())

            # test zero3 is disabled
            trainer = get_regression_trainer(deepspeed=ds_config_zero2_dict, **kwargs)
            self.assertFalse(is_deepspeed_zero3_enabled())

            # check config obj
            config = deepspeed_config()
            self.assertTrue(bool(config), "Deepspeed config should be accessible")

            del trainer
            # now weakref should gc the global and we shouldn't get anything here
            config = deepspeed_config()
            self.assertFalse(is_deepspeed_zero3_enabled())
            self.assertFalse(bool(config), "Deepspeed config should not be accessible")

644
645
646
647

@slow
@require_deepspeed
@require_torch_gpu
648
class TestDeepSpeedWithLauncher(TestCasePlus):
Patrick von Platen's avatar
Patrick von Platen committed
649
    """This class is for testing via an external script - can do multiple gpus"""
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665

    # Tests to devise #
    #
    # 1. predict_with_generate on multigpu - need to figure out how to give input sequences so that
    # the 2 gpus will generate prediction sequences that aren't of the same length - this is because
    # we had to code a special feature to sync the gpus when the predicted sequences aren't of the
    # same length. In general this will tested as a side-effect through a variety of other tests -
    # it'll simply hang trying to synchronize with other gpus if this problem is encountered. So as
    # long as we have a few full tests running on zero3 + predict_with_generate this should be
    # mostly covered.
    #
    # but there are 5 variations on beam search in `generate`- with identical code branched with `if
    # synced_gpus`
    #
    # 2. most tests should probably be run on both: zero2 and zero3 configs
    #
666

667
    @require_torch_multi_gpu
668
669
670
    @parameterized.expand(stages)
    def test_basic_distributed(self, stage):
        self.run_and_check(stage=stage, distributed=True)
671

672
673
    @parameterized.expand(stages)
    def test_do_eval_no_train(self, stage):
674
        # we should not fail if train is skipped
675
676
        self.run_and_check(
            stage=stage,
677
678
            eval_steps=1,
            distributed=False,
679
680
            do_train=False,
            do_eval=True,
681
        )
682

683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
    @parameterized.expand(stages)
    def test_fp32_non_distributed(self, stage):
        # real model needs too much GPU memory under stage2+fp32, so using tiny random model here -
        # therefore no quality checks, just basic completion checks are done
        self.run_and_check(
            stage=stage,
            model_name=T5_TINY,
            distributed=False,
            do_train=True,
            do_eval=True,
            quality_checks=False,
            fp16=False,
        )

    @require_torch_multi_gpu
    @parameterized.expand(stages)
    def test_fp32_distributed(self, stage):
        # real model needs too much GPU memory under stage2+fp32, so using tiny random model here -
        # therefore no quality checks, just basic completion checks are done
        self.run_and_check(
            stage=stage,
            model_name=T5_TINY,
            distributed=True,
            do_train=True,
            do_eval=True,
            quality_checks=False,
            fp16=False,
        )

712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
    @parameterized.expand(stages)
    def test_resume_train_not_from_ds_checkpoint(self, stage):
        # do normal training and then resume not from the deepspeed checkpoint but explicitly from
        # the saved model dir

        do_train = True
        do_eval = False
        kwargs = dict(stage=stage, eval_steps=1, distributed=True, do_train=do_train, do_eval=do_eval)

        # 1. normal training
        output_dir = self.run_and_check(**kwargs)

        # 2. now resume explicitly from the saved weights, by passing --model_name_or_path output_dir
        # - i.e. the same path the model was saved to in step 1
        output_dir = self.run_trainer(**kwargs, model_name=output_dir)

        self.do_checks(output_dir, do_train=do_train, do_eval=do_eval)

730
    def do_checks(self, output_dir, do_train=True, do_eval=True, quality_checks=True):
731
732
733
734

        if do_train:
            train_metrics = load_json(os.path.join(output_dir, "train_results.json"))
            self.assertIn("train_samples_per_second", train_metrics)
735
736
            if quality_checks:
                self.assertGreater(train_metrics["train_samples_per_second"], 0.5)
737
738
739
740

        if do_eval:
            eval_metrics = load_json(os.path.join(output_dir, "eval_results.json"))
            self.assertIn("eval_bleu", eval_metrics)
741
742
            if quality_checks:
                self.assertGreater(eval_metrics["eval_bleu"], 1)
743
744

    # XXX: need to do better validation beyond just that the run was successful
745
746
747
    def run_and_check(
        self,
        stage,
748
749
750
751
752
753
754
755
756
        model_name: str = T5_SMALL,
        eval_steps: int = 10,
        distributed: bool = True,
        do_train: bool = True,
        do_eval: bool = True,
        quality_checks: bool = True,
        fp16: bool = True,
        extra_args_str: str = None,
        remove_args_str: str = None,
757
758
759
    ):

        # we are doing quality testing so using a small real model
760
        output_dir = self.run_trainer(
761
            stage=stage,
762
            model_name=model_name,
763
            eval_steps=eval_steps,
764
            num_train_epochs=1,
765
766
            do_train=do_train,
            do_eval=do_eval,
767
            distributed=distributed,
768
            fp16=fp16,
769
770
771
            extra_args_str=extra_args_str,
            remove_args_str=remove_args_str,
        )
772

773
        self.do_checks(output_dir, do_train=do_train, do_eval=do_eval, quality_checks=quality_checks)
774
775

        return output_dir
776
777
778

    def run_trainer(
        self,
779
        stage: str,
780
        model_name: str,
781
782
783
784
        eval_steps: int = 10,
        num_train_epochs: int = 1,
        do_train: bool = False,
        do_eval: bool = True,
785
        distributed: bool = True,
786
        fp16: bool = True,
787
788
789
        extra_args_str: str = None,
        remove_args_str: str = None,
    ):
790
        max_len = 32
Sylvain Gugger's avatar
Sylvain Gugger committed
791
        data_dir = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
792
793
794
        output_dir = self.get_auto_remove_tmp_dir()
        args = f"""
            --model_name_or_path {model_name}
795
796
            --train_file {data_dir}/train.json
            --validation_file {data_dir}/val.json
797
798
799
800
801
802
803
            --output_dir {output_dir}
            --overwrite_output_dir
            --max_source_length {max_len}
            --max_target_length {max_len}
            --val_max_target_length {max_len}
            --warmup_steps 8
            --predict_with_generate
804
805
            --save_steps 0
            --eval_steps {eval_steps}
806
807
            --group_by_length
            --label_smoothing_factor 0.1
808
809
            --source_lang en
            --target_lang ro
810
            --report_to none
811
        """.split()
812
813
        args.extend(["--source_prefix", '"translate English to Romanian: "'])

814
815
816
        if fp16:
            args.extend(["--fp16"])

817
818
819
820
821
822
823
        actions = 0
        if do_train:
            actions += 1
            args.extend(
                f"""
            --do_train
            --num_train_epochs {str(num_train_epochs)}
824
            --max_train_samples 16
825
826
827
828
829
830
831
832
833
834
            --per_device_train_batch_size 2
            --learning_rate 3e-3
            """.split()
            )

        if do_eval:
            actions += 1
            args.extend(
                """
            --do_eval
835
            --max_eval_samples 16
836
837
838
839
840
            --per_device_eval_batch_size 2
            """.split()
            )

        assert actions > 0, "need at least do_train or do_eval for the test to run"
841
842
843
844

        if extra_args_str is not None:
            args.extend(extra_args_str.split())

845
        # currently only works for bool args
846
847
848
849
        if remove_args_str is not None:
            remove_args = remove_args_str.split()
            args = [x for x in args if x not in remove_args]

850
        ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
Sylvain Gugger's avatar
Sylvain Gugger committed
851
        script = [f"{self.examples_dir_str}/pytorch/translation/run_translation.py"]
852
        launcher = get_launcher(distributed)
853
854

        cmd = launcher + script + args + ds_args
855
        # keep for quick debug
856
857
858
859
860
861
862
863
864
865
866
867
868
        # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
        execute_subprocess_async(cmd, env=self.get_env())

        return output_dir

    @parameterized.expand(stages)
    def test_clm(self, stage):
        # this test exercises model.resize_token_embeddings() which requires param gathering outside
        # of forward - it's not used by `run_translation.py`, but it is in `run_clm.py`

        data_dir = self.tests_dir / "fixtures"
        output_dir = self.get_auto_remove_tmp_dir()
        args = f"""
869
            --model_name_or_path {GPT2_TINY}
870
871
872
873
874
875
            --train_file {data_dir}/sample_text.txt
            --validation_file {data_dir}/sample_text.txt
            --output_dir {output_dir}
            --overwrite_output_dir
            --do_train
            --do_eval
876
877
878
879
            --max_train_samples 16
            --max_eval_samples 16
            --per_device_train_batch_size 2
            --per_device_eval_batch_size 2
880
881
            --num_train_epochs 1
            --warmup_steps 8
882
883
            --block_size 64
            --fp16
884
            --report_to none
885
886
887
            """.split()

        ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
Sylvain Gugger's avatar
Sylvain Gugger committed
888
        script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"]
889
        launcher = get_launcher(distributed=True)
890
891
892
893

        cmd = launcher + script + args + ds_args
        # keep for quick debug
        # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
894
895
        execute_subprocess_async(cmd, env=self.get_env())

896
897
898
899
900
901
902
    def test_clm_from_config_zero3(self):
        # this test exercises AutoModel.from_config(config) - to ensure zero.Init is called

        data_dir = self.tests_dir / "fixtures"
        output_dir = self.get_auto_remove_tmp_dir()
        args = f"""
            --model_type gpt2
903
            --tokenizer_name {GPT2_TINY}
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
            --train_file {data_dir}/sample_text.txt
            --validation_file {data_dir}/sample_text.txt
            --output_dir {output_dir}
            --overwrite_output_dir
            --do_train
            --max_train_samples 4
            --per_device_train_batch_size 2
            --num_train_epochs 1
            --warmup_steps 8
            --block_size 8
            --fp16
            --report_to none
            """.split()

        ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_zero3.json".split()
        script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"]
920
        launcher = get_launcher(distributed=True)
921
922
923
924
925
926
927

        cmd = launcher + script + args + ds_args
        # keep for quick debug
        # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
        with CaptureStderr() as cs:
            execute_subprocess_async(cmd, env=self.get_env())
        assert "Detected DeepSpeed ZeRO-3" in cs.err