"tests/test_modeling_gptj.py" did not exist on "505f2d749eb52f4b8b803d8c9a5f04442446e6c2"
test_deepspeed.py 40.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import dataclasses
16
import io
17
import json
18
19
import os
import unittest
20
from copy import deepcopy
21

22
from parameterized import parameterized
23
from transformers import AutoModel, TrainingArguments, is_torch_available, logging
24
from transformers.deepspeed import HfDeepSpeedConfig, is_deepspeed_available
25
from transformers.file_utils import WEIGHTS_NAME
26
from transformers.testing_utils import (
27
    CaptureLogger,
28
    CaptureStderr,
29
    ExtendSysPath,
30
    LoggingLevel,
31
32
33
    TestCasePlus,
    execute_subprocess_async,
    get_gpu_count,
34
    mockenv_context,
35
    require_deepspeed,
36
37
38
39
    require_torch_gpu,
    require_torch_multi_gpu,
    slow,
)
40
from transformers.trainer_utils import get_last_checkpoint, set_seed
41
42


43
44
45
tests_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
root_dir = os.path.dirname(tests_dir)
with ExtendSysPath(tests_dir):
46
47
48
    from test_trainer import TrainerIntegrationCommon  # noqa

    if is_torch_available():
49
        from test_trainer import RegressionModelConfig, RegressionPreTrainedModel, get_regression_trainer  # noqa
50
51


52
set_seed(42)
53

54
55
56
# default torch.distributed port
DEFAULT_MASTER_PORT = "10999"

57
T5_SMALL = "t5-small"
58
T5_TINY = "patrickvonplaten/t5-tiny-random"
59
GPT2_TINY = "sshleifer/tiny-gpt2"
60
61


62
63
64
65
66
def load_json(path):
    with open(path) as f:
        return json.load(f)


Stas Bekman's avatar
Stas Bekman committed
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
def get_master_port(real_launcher=False):
    """
    When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed)
    the issue is that once the port is tied it can't be used anywhere else outside of this process,
    since torch.dist doesn't free the port until the process exits. Therefore for the sake of being
    able to run both emulated launcher and normal launcher tests we need 2 distinct ports.

    This function will give the right port in the right context. For real launcher it'll give the
    base port, for emulated launcher it'll give the base port + 1. In both cases a string is
    returned.

    Args:
        `real_launcher`: whether a real launcher is going to be used, or the emulated one

    """

    master_port_base = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT)
    if not real_launcher:
        master_port_base = str(int(master_port_base) + 1)
    return master_port_base


89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
def require_deepspeed_aio(test_case):
    """
    Decorator marking a test that requires deepspeed aio (nvme)
    """
    if not is_deepspeed_available():
        return unittest.skip("test requires deepspeed")(test_case)

    import deepspeed
    from deepspeed.ops.aio import AsyncIOBuilder

    if not deepspeed.ops.__compatible_ops__[AsyncIOBuilder.NAME]:
        return unittest.skip("test requires deepspeed async-io")(test_case)
    else:
        return test_case


105
106
if is_deepspeed_available():
    from deepspeed.utils import logger as deepspeed_logger  # noqa
107
    from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
108
    from transformers.deepspeed import deepspeed_config, is_deepspeed_zero3_enabled  # noqa
109

110
111
112
113
114
115
116

def get_launcher(distributed=False):
    # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
    # - it won't be able to handle that
    # 2. for now testing with just 2 gpus max (since some quality tests may give different
    # results with mode gpus because we use very little data)
    num_gpus = min(2, get_gpu_count()) if distributed else 1
Stas Bekman's avatar
Stas Bekman committed
117
    master_port = get_master_port(real_launcher=True)
118
    return f"deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}".split()
119
120


121
122
123
124
125
ZERO2 = "zero2"
ZERO3 = "zero3"
stages = [ZERO2, ZERO3]


126
127
128
129
130
131
132
133
134
135
@require_deepspeed
@require_torch_gpu
class CoreIntegrationDeepSpeed(TestCasePlus, TrainerIntegrationCommon):
    """
    Testing non-Trainer DeepSpeed integration
    """

    def setUp(self):
        super().setUp()

Stas Bekman's avatar
Stas Bekman committed
136
        master_port = get_master_port(real_launcher=False)
137
        self.dist_env_1_gpu = dict(
138
            MASTER_ADDR="localhost", MASTER_PORT=master_port, RANK="0", LOCAL_RANK="0", WORLD_SIZE="1"
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
        )

    def test_init_zero3(self):
        # test that zero.Init() works correctly under zero3
        ds_config = {
            "train_batch_size": 1,
            "zero_optimization": {
                "stage": 3,
            },
        }

        dschf = HfDeepSpeedConfig(ds_config)

        self.assertTrue(dschf.is_zero3())
        self.assertTrue(is_deepspeed_zero3_enabled())

        with LoggingLevel(logging.INFO):
            with mockenv_context(**self.dist_env_1_gpu):
                logger = logging.get_logger("transformers.modeling_utils")
                with CaptureLogger(logger) as cl:
                    AutoModel.from_pretrained(T5_TINY)
        self.assertIn("Detected DeepSpeed ZeRO-3", cl.out)

        # now remove zero optimization
        del ds_config["zero_optimization"]
        dschf = HfDeepSpeedConfig(ds_config)

        self.assertFalse(dschf.is_zero3())
        self.assertFalse(is_deepspeed_zero3_enabled())

        with LoggingLevel(logging.INFO):
            with mockenv_context(**self.dist_env_1_gpu):
                logger = logging.get_logger("transformers.modeling_utils")
                with CaptureLogger(logger) as cl:
                    AutoModel.from_pretrained(T5_TINY)
        self.assertNotIn("Detected DeepSpeed ZeRO-3", cl.out)


177
@require_deepspeed
178
@require_torch_gpu
179
180
181
182
183
class TrainerIntegrationDeepSpeed(TestCasePlus, TrainerIntegrationCommon):
    """

    This class is for testing directly via get_regression_trainer

184
185
186
187
188
189
190
191
192
193
194
195
196
    It mixes in `TrainerIntegrationCommon` which already has a lot of helper validation methods
    which we can re-use here.

    Important: this class' setup can only work with a single gpu because it runs within the current
    pytest worker. For multi-gpu tests use TestDeepSpeedWithLauncher.

    Note: if any of the tests of this class get run there will be at least one gpu occupied by them
    until this pytest worker exits. This is because the gpu memory allocated by the cuda-kernels
    won't be released until this pytest worker exits.

    This may appear as some run-away tests if you watch `nvidia-smi` while other tests that fork new
    processes are run. So there will be one or two "stale" processes reported in `nvidia-smi`. This
    is not a bug.
197
    """
198
199
200

    def setUp(self):
        super().setUp()
201
202
203
204
205

        args = TrainingArguments(".")
        self.n_epochs = args.num_train_epochs
        self.batch_size = args.train_batch_size

Stas Bekman's avatar
Stas Bekman committed
206
        master_port = get_master_port(real_launcher=False)
207
        self.dist_env_1_gpu = dict(
208
            MASTER_ADDR="localhost", MASTER_PORT=master_port, RANK="0", LOCAL_RANK="0", WORLD_SIZE="1"
209
        )
210

211
212
213
214
        self.ds_config_file = dict(
            zero2=f"{self.test_file_dir_str}/ds_config_zero2.json",
            zero3=f"{self.test_file_dir_str}/ds_config_zero3.json",
        )
215
216
217

        # use self.get_config_dict(stage) to use these to ensure the original is not modified
        with io.open(self.ds_config_file[ZERO2], "r", encoding="utf-8") as f:
218
219
220
            config_zero2 = json.load(f)
            # by default use fp16
            config_zero2["fp16"]["enabled"] = True
221
        with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
222
223
224
            config_zero3 = json.load(f)
            # by default use fp16
            config_zero3["fp16"]["enabled"] = True
225
226
            # This setting slows things down, so don't enable it by default unless needed by a test.
            # It's in the file as a demo for users since we want everything to work out of the box even if slower.
227
228
229
230
231
232
233
234
235
            config_zero3["zero_optimization"]["stage3_gather_fp16_weights_on_model_save"] = False
        self.ds_config_dict = dict(
            zero2=config_zero2,
            zero3=config_zero3,
        )

    def get_config_dict(self, stage):
        # As some tests modify the dict, always make a copy
        return deepcopy(self.ds_config_dict[stage])
236
237

    # --- These tests are enough to run on one of zero stages --- #
238

239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
    def test_hf_ds_config_mismatch(self):

        ds_config = self.get_config_dict(ZERO2)

        # Purposefully configure these values to mismatch TrainingArguments values.
        # This currently doesn't cover all keys (but it could)
        per_device_train_batch_size = 2
        ds_config["train_micro_batch_size_per_gpu"] = per_device_train_batch_size + 2

        ds_config["train_batch_size"] = 1000

        gradient_accumulation_steps = 2
        ds_config["gradient_accumulation_steps"] = gradient_accumulation_steps + 2

        max_grad_norm = 1.0
        ds_config["gradient_clipping"] = max_grad_norm + 0.1

        adam_beta1, adam_beta2 = 0.9, 0.99
        ds_config["optimizer"]["params"]["betas"] = [adam_beta1 - 0.1, adam_beta2 - 0.1]

        fp16 = True
        ds_config["fp16"]["enabled"] = not fp16

        keys = [
            "per_device_train_batch_size",
            "train_batch_size",
            "gradient_accumulation_steps",
            "max_grad_norm",
            "betas",
            "fp16",
        ]

        with mockenv_context(**self.dist_env_1_gpu):
            trainer = get_regression_trainer(
                local_rank=0,
                fp16=fp16,
                deepspeed=ds_config,
                per_device_train_batch_size=per_device_train_batch_size,
                gradient_accumulation_steps=gradient_accumulation_steps,
                max_grad_norm=max_grad_norm,
                adam_beta1=adam_beta1,
                adam_beta2=adam_beta2,
            )
            with self.assertRaises(Exception) as context:
                trainer.train()

        for key in keys:
            self.assertTrue(
                key in str(context.exception),
                f"{key} is not in the exception message:\n{context.exception}",
            )

291
292
293
294
295
296
297
298
299
    # Test various combos
    # 1. DS scheduler + DS optimizer: this is already tested by most other tests
    # 2. HF scheduler + HF optimizer:
    # 3. DS scheduler + HF optimizer:
    # 4. HF scheduler + DS optimizer:

    def test_hf_scheduler_hf_optimizer(self):
        a = 0
        with mockenv_context(**self.dist_env_1_gpu):
300
301
302
            ds_config_zero2_dict = self.get_config_dict(ZERO2)
            del ds_config_zero2_dict["optimizer"]  # force default HF Trainer optimizer
            del ds_config_zero2_dict["scheduler"]  # force default HF Trainer scheduler
303
            ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
304
            ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
305
            trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
306
307
308
309
310
311
312
            trainer.train()
        new_a = trainer.model.a.item()
        self.assertNotEqual(new_a, a)

    def test_ds_scheduler_hf_optimizer(self):
        a = 0
        with mockenv_context(**self.dist_env_1_gpu):
313
314
            ds_config_zero2_dict = self.get_config_dict(ZERO2)
            del ds_config_zero2_dict["optimizer"]  # force default HF Trainer optimizer
315
            ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
316
            ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
317
            trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
318
319
320
321
322
            trainer.train()
        new_a = trainer.model.a.item()
        self.assertNotEqual(new_a, a)

    def test_hf_scheduler_ds_optimizer(self):
323
        a = 0
324
        with mockenv_context(**self.dist_env_1_gpu):
325
326
            ds_config_zero2_dict = self.get_config_dict(ZERO2)
            del ds_config_zero2_dict["scheduler"]  # force default HF Trainer scheduler
327
            ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
328
            ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
329
            trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
330
331
332
            trainer.train()
        new_a = trainer.model.a.item()
        self.assertNotEqual(new_a, a)
333

334
    @require_deepspeed_aio
335
336
337
338
339
340
341
342
343
    def test_stage3_nvme_offload(self):
        with mockenv_context(**self.dist_env_1_gpu):
            # this actually doesn't have to be on NVMe, any storage will do since this test only
            # runs a simple check that we can use some directory as if it were NVMe
            nvme_path = self.get_auto_remove_tmp_dir()
            nvme_config = dict(device="nvme", nvme_path=nvme_path)
            ds_config_zero3_dict = self.get_config_dict(ZERO3)
            ds_config_zero3_dict["zero_optimization"]["offload_optimizer"] = nvme_config
            ds_config_zero3_dict["zero_optimization"]["offload_param"] = nvme_config
344
            trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=ds_config_zero3_dict)
345
            with CaptureLogger(deepspeed_logger) as cl:
346
                trainer.train()
347
            self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
348
349
350
351
352

    # --- These tests need to run on both zero stages --- #

    @parameterized.expand(stages)
    def test_hf_optimizer_with_offload(self, stage):
353
        # non-DS optimizers can be used with ZERO-offload (as long as they have both CPU and GPU implementation (except LAMB))
354
355
356
        ds_config_dict = self.get_config_dict(stage)
        del ds_config_dict["optimizer"]  # force default HF Trainer optimizer
        # force cpu offload
357
        ds_config_dict["zero_optimization"]["offload_optimizer"]["device"] = "cpu"
358
        with mockenv_context(**self.dist_env_1_gpu):
359
            trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=ds_config_dict)
360
            with CaptureLogger(deepspeed_logger) as cl:
361
                trainer.train()
362
            self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
363

364
365
366
367
368
369
370
    @parameterized.expand(stages)
    def test_fake_notebook_no_launcher(self, stage):
        # this setup emulates a notebook where a launcher needs to be emulated by hand

        # note that unittest resets sys.stdout each test, so `CaptureStd` will work here to capture
        # DeepSpeed log if this test happens to run first in this pytest worker. But it will fail if
        # it's run not as a first test as `sys.stdout` will no longer be the same. So we either have
371
372
        # to reset `deepspeed_logger.handlers[0].setStream(sys.stdout)` or directly capture from the deepspeed_logger.
        with mockenv_context(**self.dist_env_1_gpu):
373
            trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=self.get_config_dict(stage))
374
            with CaptureLogger(deepspeed_logger) as cl:
375
                trainer.train()
376
            self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
377
378
379

    @parameterized.expand(stages)
    def test_early_get_last_lr(self, stage):
380
381
382
383
384
385
386
387
388
389
390
391
392
        # with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may
        # not run for the first few dozen steps while loss scale is too large, and thus during
        # that time `get_last_lr` will fail if called during that warm up stage,
        #
        # setting `logging_steps=1` forces an early `trainer._maybe_log_save_evaluate()` which calls
        # `self.lr_scheduler.get_last_lr()` and originally it'd fail on the very first step.
        with mockenv_context(**self.dist_env_1_gpu):
            a = b = 0.0
            trainer = get_regression_trainer(
                a=a,
                b=b,
                local_rank=0,
                train_len=8,
393
                fp16=True,
394
                deepspeed=self.get_config_dict(stage),
395
396
397
398
                per_device_train_batch_size=8,
                logging_steps=1,
            )
            trainer.train()
399
400
401
            post_train_a = trainer.model.a.item()

            # XXX: for some reason the following check fails with zero3 - not a broken but a
402
403
404
405
406
            # different qualitative outcome - as if optimizer did run
            # oddly getting 1.0 for both a and b from 0.0 - there is a bug somewhere
            # print(trainer.model.a.item())
            # print(trainer.model.b.item())
            # need to investigate at some point
407
408
            if stage == ZERO3:
                return
409
410
411

            # it's enough that train didn't fail for this test, but we must check that
            # optimizer/scheduler didn't run (since if it did this test isn't testing the right thing)
412
            self.assertEqual(post_train_a, a)
413

414
415
    @parameterized.expand(stages)
    def test_gradient_accumulation(self, stage):
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
        # this test measures that we get identical weights and similar loss with:
        # 1. per_device_train_batch_size=8, gradient_accumulation_steps=1
        # 2. per_device_train_batch_size=4, gradient_accumulation_steps=2
        # since the 2nd should produce the effective batch of 1st, with the same results
        #
        # I can get an identical loss for a small train_len=32, plus the power of the initial
        # dynamic loss scale value set to:
        #   "fp16.initial_scale_power": 1
        # plus having the same WarmupLR's warmup_min_lr == warmup_max_lr in the config file
        # but for some reason going to train_len=64 the weights, weights start to mismatch with this setup.
        # the culprit seems to be `initial_scale_power` - putting it back to its default 32 keeps the weights identical

        train_len = 64
        a = b = 0.0

431
432
433
434
435
436
437
438
439
        kwargs = dict(
            a=a,
            b=b,
            local_rank=0,
            train_len=train_len,
            fp16=True,
            deepspeed=self.get_config_dict(stage),
        )

440
441
        with mockenv_context(**self.dist_env_1_gpu):
            no_grad_accum_trainer = get_regression_trainer(
442
443
                **kwargs,
                per_device_train_batch_size=16,
444
445
446
447
448
449
450
451
452
453
454
                gradient_accumulation_steps=1,
            )
            no_grad_accum_result = no_grad_accum_trainer.train()
            no_grad_accum_loss = no_grad_accum_result.training_loss
            no_grad_accum_a = no_grad_accum_trainer.model.a.item()
            no_grad_accum_b = no_grad_accum_trainer.model.b.item()
            # make sure the optimizer kicked in - if it hasn't changed from the original value of a then make train_len bigger
            self.assertNotEqual(no_grad_accum_a, a)

        with mockenv_context(**self.dist_env_1_gpu):
            yes_grad_accum_trainer = get_regression_trainer(
455
                **kwargs,
456
                per_device_train_batch_size=4,
457
                gradient_accumulation_steps=4,
458
459
460
461
462
463
464
            )
            yes_grad_accum_result = yes_grad_accum_trainer.train()
            yes_grad_accum_loss = yes_grad_accum_result.training_loss
            yes_grad_accum_a = yes_grad_accum_trainer.model.a.item()
            yes_grad_accum_b = yes_grad_accum_trainer.model.b.item()
            self.assertNotEqual(yes_grad_accum_a, a)

465
466
467
468
        # training with half the batch size but accumulation steps as 2 should give the same
        # weights, but sometimes get a slight difference still of 1e-6
        self.assertAlmostEqual(no_grad_accum_a, yes_grad_accum_a, places=5)
        self.assertAlmostEqual(no_grad_accum_b, yes_grad_accum_b, places=5)
469
470

        # see the note above how to get identical loss on a small bs
471
        self.assertAlmostEqual(no_grad_accum_loss, yes_grad_accum_loss, places=2)
472

473
    def check_saved_checkpoints_deepspeed(self, output_dir, freq, total, stage):
474
475
476
        # adapted from TrainerIntegrationCommon.check_saved_checkpoints

        file_list = [WEIGHTS_NAME, "training_args.bin", "trainer_state.json", "config.json"]
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493

        if stage == ZERO2:
            ds_file_list = ["mp_rank_00_model_states.pt"]
        elif stage == ZERO3:
            ds_file_list = ["zero_pp_rank_0_mp_rank_00_model_states.pt"]
        else:
            raise ValueError(f"unknown stage {stage}")

        # XXX: this can be recoded and then removed once we require deepspeed>0.3.13
        from packaging import version

        import deepspeed

        if version.parse(deepspeed.__version__) > version.parse("0.3.13"):
            ds_file_list.append("zero_pp_rank_0_mp_rank_00_optim_states.pt")
        else:
            ds_file_list.append("zero_pp_rank_0_mp_rank_00optim_states.pt")
494
495
496

        for step in range(freq, total, freq):
            checkpoint = os.path.join(output_dir, f"checkpoint-{step}")
497
            self.assertTrue(os.path.isdir(checkpoint), f"[{stage}] {checkpoint} dir is not found")
498
499
500

            # common files
            for filename in file_list:
501
502
                path = os.path.join(checkpoint, filename)
                self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found")
503
504
505
506
507
508

            # ds files
            ds_path = os.path.join(checkpoint, f"global_step{step}")
            for filename in ds_file_list:
                # filename = os.path.join(path, filename)
                # print(filename)
509
510
                path = os.path.join(ds_path, filename)
                self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found")
511

512
513
    @parameterized.expand(stages)
    def test_save_checkpoints(self, stage):
514
515
        # adapted from  TrainerIntegrationTest.test_save_checkpoints

516
        freq = 5
517
        output_dir = self.get_auto_remove_tmp_dir()
518
        ds_config_dict = self.get_config_dict(stage)
519
        ds_config_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
520
521
        if stage == ZERO3:
            ds_config_dict["zero_optimization"]["stage3_gather_fp16_weights_on_model_save"] = True
522
523
524
525
526
527

        # save checkpoints
        with mockenv_context(**self.dist_env_1_gpu):
            trainer = get_regression_trainer(
                output_dir=output_dir,
                save_steps=freq,
528
                fp16=True,
529
530
531
532
533
                deepspeed=ds_config_dict,
            )
            trainer.train()

        total = int(self.n_epochs * 64 / self.batch_size)
534
        self.check_saved_checkpoints_deepspeed(output_dir, freq, total, stage)
535

536
537
538
539
540
541
    @parameterized.expand(stages)
    def test_can_resume_training_errors(self, stage):

        with mockenv_context(**self.dist_env_1_gpu):
            ds_config_dict = self.get_config_dict(stage)
            output_dir = self.get_auto_remove_tmp_dir()
542
            trainer = get_regression_trainer(output_dir=output_dir, fp16=True, deepspeed=ds_config_dict)
543
544
545
546
547
548
549
550

            # 1. fail to find any checkpoint - due a fresh output_dir
            with self.assertRaises(Exception) as context:
                trainer.train(resume_from_checkpoint=True)
            self.assertTrue(
                "No valid checkpoint found in output directory" in str(context.exception),
                f"got exception: {context.exception}",
            )
551

552
553
554
555
556
557
558
559
560
561
562
563
            # 2. fail to find a bogus checkpoint
            with self.assertRaises(Exception) as context:
                checkpoint = os.path.join(output_dir, "checkpoint-5")
                trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus")
            self.assertTrue(
                "Can't find a valid checkpoint at" in str(context.exception), f"got exception: {context.exception}"
            )

    @parameterized.expand(stages)
    def test_can_resume_training_normal(self, stage):
        # adapted from TrainerIntegrationTest.test_can_resume_training
        # test normal resume for each stage separately, error-handling is tested in a different test
564
        output_dir = self.get_auto_remove_tmp_dir()
565
        ds_config_dict = self.get_config_dict(stage)
566
        ds_config_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
567
568
569
        if stage == ZERO3:
            ds_config_dict["zero_optimization"]["stage3_gather_fp16_weights_on_model_save"] = True

570
571
572
        kwargs = dict(
            output_dir=output_dir, train_len=128, save_steps=5, learning_rate=0.1, fp16=True, deepspeed=ds_config_dict
        )
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604

        with mockenv_context(**self.dist_env_1_gpu):
            trainer = get_regression_trainer(**kwargs)
            trainer.train()
            (a, b) = trainer.model.a.item(), trainer.model.b.item()
            state = dataclasses.asdict(trainer.state)

            checkpoint = os.path.join(output_dir, "checkpoint-5")

            # Reinitialize trainer
            trainer = get_regression_trainer(**kwargs)

            trainer.train(resume_from_checkpoint=checkpoint)
            (a1, b1) = trainer.model.a.item(), trainer.model.b.item()
            state1 = dataclasses.asdict(trainer.state)
            self.assertEqual(a, a1)
            self.assertEqual(b, b1)
            self.check_trainer_state_are_the_same(state, state1)

            # Now check with a later checkpoint that it also works when we span over one epoch
            checkpoint = os.path.join(output_dir, "checkpoint-15")

            # Reinitialize trainer and load model
            trainer = get_regression_trainer(**kwargs)

            trainer.train(resume_from_checkpoint=checkpoint)
            (a1, b1) = trainer.model.a.item(), trainer.model.b.item()
            state1 = dataclasses.asdict(trainer.state)
            self.assertEqual(a, a1)
            self.assertEqual(b, b1)
            self.check_trainer_state_are_the_same(state, state1)

605
606
607
608
609
            # Finally, should be able to resume with the same trainer/same deepspeed engine instance
            # XXX: but currently this not possible due DS bug: https://github.com/microsoft/DeepSpeed/issues/1612
            # trainer.train(resume_from_checkpoint=checkpoint)
            # a workaround needs to be used that re-creates the deepspeed engine

610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
    @parameterized.expand(stages)
    def test_load_state_dict_from_zero_checkpoint(self, stage):
        # test that we can load fp32 weights directly from the zero checkpoint into the current model

        output_dir = self.get_auto_remove_tmp_dir()  # "./xxx", after=False, before=False)

        ds_config_dict = self.get_config_dict(stage)

        kwargs = dict(
            output_dir=output_dir,
            train_len=4,
            per_device_train_batch_size=4,
            num_train_epochs=1,
            save_strategy="steps",
            save_steps=1,
            learning_rate=0.1,
            fp16=True,
            deepspeed=ds_config_dict,
        )

        with mockenv_context(**self.dist_env_1_gpu):
            trainer = get_regression_trainer(**kwargs)
            trainer.train()
            (a, b) = trainer.model.a.item(), trainer.model.b.item()
            state = dataclasses.asdict(trainer.state)

            checkpoint_dir = get_last_checkpoint(output_dir)
            model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)

            (a1, b1) = model.a.item(), model.b.item()
            state1 = dataclasses.asdict(trainer.state)
            self.assertEqual(a, a1)
            self.assertEqual(b, b1)
            self.check_trainer_state_are_the_same(state, state1)

645
646
647
648
    def test_config_object(self):
        # test that we can switch from zero2 to zero3 in the same process for example
        # test is_zero, etc.
        output_dir = self.get_auto_remove_tmp_dir()
649
        kwargs = dict(output_dir=output_dir, train_len=8, fp16=True)
650

651
652
        ds_config_zero3_dict = self.get_config_dict("zero3")
        ds_config_zero2_dict = self.get_config_dict("zero2")
653

654
        with mockenv_context(**self.dist_env_1_gpu):
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
            trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs)
            self.assertTrue(is_deepspeed_zero3_enabled())

            # test we can repeat that and with train this time
            trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs)
            trainer.train()
            self.assertTrue(is_deepspeed_zero3_enabled())

            # test zero3 is disabled
            trainer = get_regression_trainer(deepspeed=ds_config_zero2_dict, **kwargs)
            self.assertFalse(is_deepspeed_zero3_enabled())

            # check config obj
            config = deepspeed_config()
            self.assertTrue(bool(config), "Deepspeed config should be accessible")

            del trainer
            # now weakref should gc the global and we shouldn't get anything here
            config = deepspeed_config()
            self.assertFalse(is_deepspeed_zero3_enabled())
            self.assertFalse(bool(config), "Deepspeed config should not be accessible")

677
678
679
680

@slow
@require_deepspeed
@require_torch_gpu
681
class TestDeepSpeedWithLauncher(TestCasePlus):
Patrick von Platen's avatar
Patrick von Platen committed
682
    """This class is for testing via an external script - can do multiple gpus"""
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698

    # Tests to devise #
    #
    # 1. predict_with_generate on multigpu - need to figure out how to give input sequences so that
    # the 2 gpus will generate prediction sequences that aren't of the same length - this is because
    # we had to code a special feature to sync the gpus when the predicted sequences aren't of the
    # same length. In general this will tested as a side-effect through a variety of other tests -
    # it'll simply hang trying to synchronize with other gpus if this problem is encountered. So as
    # long as we have a few full tests running on zero3 + predict_with_generate this should be
    # mostly covered.
    #
    # but there are 5 variations on beam search in `generate`- with identical code branched with `if
    # synced_gpus`
    #
    # 2. most tests should probably be run on both: zero2 and zero3 configs
    #
699

700
    @require_torch_multi_gpu
701
702
703
    @parameterized.expand(stages)
    def test_basic_distributed(self, stage):
        self.run_and_check(stage=stage, distributed=True)
704

705
706
    def test_do_eval_no_train(self):
        # testing only zero3 since zero2 makes no sense with inference
707
        self.run_and_check(
708
            stage=ZERO3,
709
710
            eval_steps=1,
            distributed=False,
711
712
            do_train=False,
            do_eval=True,
713
        )
714

715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
    @parameterized.expand(stages)
    def test_fp32_non_distributed(self, stage):
        # real model needs too much GPU memory under stage2+fp32, so using tiny random model here -
        # therefore no quality checks, just basic completion checks are done
        self.run_and_check(
            stage=stage,
            model_name=T5_TINY,
            distributed=False,
            do_train=True,
            do_eval=True,
            quality_checks=False,
            fp16=False,
        )

    @require_torch_multi_gpu
    @parameterized.expand(stages)
    def test_fp32_distributed(self, stage):
        # real model needs too much GPU memory under stage2+fp32, so using tiny random model here -
        # therefore no quality checks, just basic completion checks are done
        self.run_and_check(
            stage=stage,
            model_name=T5_TINY,
            distributed=True,
            do_train=True,
            do_eval=True,
            quality_checks=False,
            fp16=False,
        )

744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
    @parameterized.expand(stages)
    def test_resume_train_not_from_ds_checkpoint(self, stage):
        # do normal training and then resume not from the deepspeed checkpoint but explicitly from
        # the saved model dir

        do_train = True
        do_eval = False
        kwargs = dict(stage=stage, eval_steps=1, distributed=True, do_train=do_train, do_eval=do_eval)

        # 1. normal training
        output_dir = self.run_and_check(**kwargs)

        # 2. now resume explicitly from the saved weights, by passing --model_name_or_path output_dir
        # - i.e. the same path the model was saved to in step 1
        output_dir = self.run_trainer(**kwargs, model_name=output_dir)

        self.do_checks(output_dir, do_train=do_train, do_eval=do_eval)

762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
    @require_torch_multi_gpu
    @parameterized.expand(["fp16", "fp32"])
    def test_inference(self, dtype):
        # this is just inference, so no optimizer should be loaded
        # it only works for z3 (makes no sense with z1-z2)
        fp16 = True if dtype == "fp16" else False
        self.run_and_check(
            stage=ZERO3,
            model_name=T5_TINY,
            distributed=True,
            do_train=False,
            do_eval=True,
            quality_checks=False,
            fp16=fp16,
        )

778
    def do_checks(self, output_dir, do_train=True, do_eval=True, quality_checks=True):
779
780
781
782

        if do_train:
            train_metrics = load_json(os.path.join(output_dir, "train_results.json"))
            self.assertIn("train_samples_per_second", train_metrics)
783
784
            if quality_checks:
                self.assertGreater(train_metrics["train_samples_per_second"], 0.5)
785
786
787
788

        if do_eval:
            eval_metrics = load_json(os.path.join(output_dir, "eval_results.json"))
            self.assertIn("eval_bleu", eval_metrics)
789
790
            if quality_checks:
                self.assertGreater(eval_metrics["eval_bleu"], 1)
791
792

    # XXX: need to do better validation beyond just that the run was successful
793
794
795
    def run_and_check(
        self,
        stage,
796
797
798
799
800
801
802
803
804
        model_name: str = T5_SMALL,
        eval_steps: int = 10,
        distributed: bool = True,
        do_train: bool = True,
        do_eval: bool = True,
        quality_checks: bool = True,
        fp16: bool = True,
        extra_args_str: str = None,
        remove_args_str: str = None,
805
806
807
    ):

        # we are doing quality testing so using a small real model
808
        output_dir = self.run_trainer(
809
            stage=stage,
810
            model_name=model_name,
811
            eval_steps=eval_steps,
812
            num_train_epochs=1,
813
814
            do_train=do_train,
            do_eval=do_eval,
815
            distributed=distributed,
816
            fp16=fp16,
817
818
819
            extra_args_str=extra_args_str,
            remove_args_str=remove_args_str,
        )
820

821
        self.do_checks(output_dir, do_train=do_train, do_eval=do_eval, quality_checks=quality_checks)
822
823

        return output_dir
824
825
826

    def run_trainer(
        self,
827
        stage: str,
828
        model_name: str,
829
830
831
832
        eval_steps: int = 10,
        num_train_epochs: int = 1,
        do_train: bool = False,
        do_eval: bool = True,
833
        distributed: bool = True,
834
        fp16: bool = True,
835
836
837
        extra_args_str: str = None,
        remove_args_str: str = None,
    ):
838
        max_len = 32
Sylvain Gugger's avatar
Sylvain Gugger committed
839
        data_dir = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
840
841
842
        output_dir = self.get_auto_remove_tmp_dir()
        args = f"""
            --model_name_or_path {model_name}
843
844
            --train_file {data_dir}/train.json
            --validation_file {data_dir}/val.json
845
846
847
848
849
850
851
            --output_dir {output_dir}
            --overwrite_output_dir
            --max_source_length {max_len}
            --max_target_length {max_len}
            --val_max_target_length {max_len}
            --warmup_steps 8
            --predict_with_generate
852
853
            --save_steps 0
            --eval_steps {eval_steps}
854
855
            --group_by_length
            --label_smoothing_factor 0.1
856
857
            --source_lang en
            --target_lang ro
858
            --report_to none
859
        """.split()
860
861
        args.extend(["--source_prefix", '"translate English to Romanian: "'])

862
863
864
        if fp16:
            args.extend(["--fp16"])

865
866
867
868
869
870
871
        actions = 0
        if do_train:
            actions += 1
            args.extend(
                f"""
            --do_train
            --num_train_epochs {str(num_train_epochs)}
872
            --max_train_samples 16
873
874
875
876
877
878
879
880
881
882
            --per_device_train_batch_size 2
            --learning_rate 3e-3
            """.split()
            )

        if do_eval:
            actions += 1
            args.extend(
                """
            --do_eval
883
            --max_eval_samples 16
884
885
886
887
888
            --per_device_eval_batch_size 2
            """.split()
            )

        assert actions > 0, "need at least do_train or do_eval for the test to run"
889
890
891
892

        if extra_args_str is not None:
            args.extend(extra_args_str.split())

893
        # currently only works for bool args
894
895
896
897
        if remove_args_str is not None:
            remove_args = remove_args_str.split()
            args = [x for x in args if x not in remove_args]

898
        ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
Sylvain Gugger's avatar
Sylvain Gugger committed
899
        script = [f"{self.examples_dir_str}/pytorch/translation/run_translation.py"]
900
        launcher = get_launcher(distributed)
901
902

        cmd = launcher + script + args + ds_args
903
        # keep for quick debug
904
905
906
907
908
909
910
911
912
913
914
915
916
        # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
        execute_subprocess_async(cmd, env=self.get_env())

        return output_dir

    @parameterized.expand(stages)
    def test_clm(self, stage):
        # this test exercises model.resize_token_embeddings() which requires param gathering outside
        # of forward - it's not used by `run_translation.py`, but it is in `run_clm.py`

        data_dir = self.tests_dir / "fixtures"
        output_dir = self.get_auto_remove_tmp_dir()
        args = f"""
917
            --model_name_or_path {GPT2_TINY}
918
919
920
921
922
923
            --train_file {data_dir}/sample_text.txt
            --validation_file {data_dir}/sample_text.txt
            --output_dir {output_dir}
            --overwrite_output_dir
            --do_train
            --do_eval
924
925
926
927
            --max_train_samples 16
            --max_eval_samples 16
            --per_device_train_batch_size 2
            --per_device_eval_batch_size 2
928
929
            --num_train_epochs 1
            --warmup_steps 8
930
931
            --block_size 64
            --fp16
932
            --report_to none
933
934
935
            """.split()

        ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
Sylvain Gugger's avatar
Sylvain Gugger committed
936
        script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"]
937
        launcher = get_launcher(distributed=True)
938
939
940
941

        cmd = launcher + script + args + ds_args
        # keep for quick debug
        # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
942
943
        execute_subprocess_async(cmd, env=self.get_env())

944
945
946
947
948
949
950
    def test_clm_from_config_zero3(self):
        # this test exercises AutoModel.from_config(config) - to ensure zero.Init is called

        data_dir = self.tests_dir / "fixtures"
        output_dir = self.get_auto_remove_tmp_dir()
        args = f"""
            --model_type gpt2
951
            --tokenizer_name {GPT2_TINY}
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
            --train_file {data_dir}/sample_text.txt
            --validation_file {data_dir}/sample_text.txt
            --output_dir {output_dir}
            --overwrite_output_dir
            --do_train
            --max_train_samples 4
            --per_device_train_batch_size 2
            --num_train_epochs 1
            --warmup_steps 8
            --block_size 8
            --fp16
            --report_to none
            """.split()

        ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_zero3.json".split()
        script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"]
968
        launcher = get_launcher(distributed=True)
969
970
971
972
973
974
975

        cmd = launcher + script + args + ds_args
        # keep for quick debug
        # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
        with CaptureStderr() as cs:
            execute_subprocess_async(cmd, env=self.get_env())
        assert "Detected DeepSpeed ZeRO-3" in cs.err
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021

    @parameterized.expand(stages)
    def test_load_best_model(self, stage):
        # this test exercises --load_best_model_at_end - the key is being able to resume after some training

        data_dir = self.tests_dir / "fixtures/tests_samples/wmt_en_ro"
        output_dir = self.get_auto_remove_tmp_dir()
        args = f"""
            --model_name_or_path {T5_TINY}
            --tokenizer_name {T5_TINY}
            --train_file {data_dir}/train.json
            --validation_file {data_dir}/val.json
            --output_dir {output_dir}
            --overwrite_output_dir
            --source_lang en
            --target_lang ro
            --do_train
            --max_train_samples 3
            --do_eval
            --max_eval_samples 1
            --logging_strategy steps
            --logging_steps 1
            --evaluation_strategy steps
            --eval_steps 1
            --save_strategy steps
            --save_steps 1
            --load_best_model_at_end
            --per_device_train_batch_size 1
            --per_device_eval_batch_size 1
            --num_train_epochs 1
            --fp16
            --report_to none
            """.split()
        args.extend(["--source_prefix", "translate English to Romanian: "])

        ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_zero3.json".split()
        script = [f"{self.examples_dir_str}/pytorch/translation/run_translation.py"]
        launcher = get_launcher(distributed=False)

        cmd = launcher + script + args + ds_args
        # keep for quick debug
        # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
        with CaptureStderr() as cs:
            execute_subprocess_async(cmd, env=self.get_env())
        # enough to test it didn't fail
        assert "Detected DeepSpeed ZeRO-3" in cs.err