test_deepspeed.py 26.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import dataclasses
16
import io
17
import json
18
19
import os
import unittest
20
from copy import deepcopy
21

22
from parameterized import parameterized
23
from transformers import TrainingArguments, is_torch_available
24
from transformers.file_utils import WEIGHTS_NAME
25
from transformers.integrations import is_deepspeed_available
26
from transformers.testing_utils import (
27
    CaptureLogger,
28
    ExtendSysPath,
29
30
31
    TestCasePlus,
    execute_subprocess_async,
    get_gpu_count,
32
    mockenv_context,
33
34
35
36
    require_torch_gpu,
    require_torch_multi_gpu,
    slow,
)
37
38
39
from transformers.trainer_utils import set_seed


40
bindir = os.path.abspath(os.path.dirname(__file__))
41
42
43
44
45
with ExtendSysPath(f"{bindir}/.."):
    from test_trainer import TrainerIntegrationCommon  # noqa

    if is_torch_available():
        from test_trainer import get_regression_trainer  # noqa
46
47


48
49
set_seed(42)
MBART_TINY = "sshleifer/tiny-mbart"
50
T5_SMALL = "t5-small"
51
52


53
54
55
56
57
def load_json(path):
    with open(path) as f:
        return json.load(f)


58
59
60
61
62
63
64
65
66
67
68
# a candidate for testing_utils
def require_deepspeed(test_case):
    """
    Decorator marking a test that requires deepspeed
    """
    if not is_deepspeed_available():
        return unittest.skip("test requires deepspeed")(test_case)
    else:
        return test_case


69
70
71
72
73
ZERO2 = "zero2"
ZERO3 = "zero3"
stages = [ZERO2, ZERO3]


74
@require_deepspeed
75
@require_torch_gpu
76
77
78
79
80
class TrainerIntegrationDeepSpeed(TestCasePlus, TrainerIntegrationCommon):
    """

    This class is for testing directly via get_regression_trainer

81
82
83
84
85
86
87
88
89
90
91
92
93
    It mixes in `TrainerIntegrationCommon` which already has a lot of helper validation methods
    which we can re-use here.

    Important: this class' setup can only work with a single gpu because it runs within the current
    pytest worker. For multi-gpu tests use TestDeepSpeedWithLauncher.

    Note: if any of the tests of this class get run there will be at least one gpu occupied by them
    until this pytest worker exits. This is because the gpu memory allocated by the cuda-kernels
    won't be released until this pytest worker exits.

    This may appear as some run-away tests if you watch `nvidia-smi` while other tests that fork new
    processes are run. So there will be one or two "stale" processes reported in `nvidia-smi`. This
    is not a bug.
94
    """
95
96
97

    def setUp(self):
        super().setUp()
98
99
100
101
102

        args = TrainingArguments(".")
        self.n_epochs = args.num_train_epochs
        self.batch_size = args.train_batch_size

103
104
105
        self.dist_env_1_gpu = dict(
            MASTER_ADDR="localhost", MASTER_PORT="10999", RANK="0", LOCAL_RANK="0", WORLD_SIZE="1"
        )
106

107
108
109
110
111
112
113
114
115
116
117
        self.ds_config_file = {}
        self.ds_config_file[ZERO2] = f"{self.test_file_dir_str}/ds_config_zero2.json"
        self.ds_config_file[ZERO3] = f"{self.test_file_dir_str}/ds_config_zero3.json"

        # use self.get_config_dict(stage) to use these to ensure the original is not modified
        self.ds_config_dict = {}
        with io.open(self.ds_config_file[ZERO2], "r", encoding="utf-8") as f:
            self.ds_config_dict[ZERO2] = json.load(f)
        with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
            self.ds_config_dict[ZERO3] = json.load(f)

118
119
120
121
122
123
    def tearDown(self):
        # XXX: Fixme - this is a temporary band-aid since this global variable impacts other tests
        import transformers

        transformers.integrations._is_deepspeed_zero3_enabled = None

124
    def get_config_dict(self, stage):
Patrick von Platen's avatar
Patrick von Platen committed
125
        """As the tests modify the dict, always make a copy"""
126
127
128
129
130
131
132
133
        config = deepcopy(self.ds_config_dict[stage])
        if stage == ZERO3:
            # This setting slows things down, so don't enable it by default unless needed by a test.
            # It's in the file as a demo for users since we want everything to work out of the box even if slower.
            config["zero_optimization"]["stage3_gather_fp16_weights_on_model_save"] = False
        return config

    # --- These tests are enough to run on one of zero stages --- #
134
135
136
137
138
139
140
141
142
143

    # Test various combos
    # 1. DS scheduler + DS optimizer: this is already tested by most other tests
    # 2. HF scheduler + HF optimizer:
    # 3. DS scheduler + HF optimizer:
    # 4. HF scheduler + DS optimizer:

    def test_hf_scheduler_hf_optimizer(self):
        a = 0
        with mockenv_context(**self.dist_env_1_gpu):
144
145
146
147
148
149
            ds_config_zero2_dict = self.get_config_dict(ZERO2)
            del ds_config_zero2_dict["optimizer"]  # force default HF Trainer optimizer
            del ds_config_zero2_dict["scheduler"]  # force default HF Trainer scheduler
            ds_config_zero2_dict["zero_optimization"]["cpu_offload"] = False
            ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
            trainer = get_regression_trainer(a=a, local_rank=0, deepspeed=ds_config_zero2_dict)
150
151
152
153
154
155
156
            trainer.train()
        new_a = trainer.model.a.item()
        self.assertNotEqual(new_a, a)

    def test_ds_scheduler_hf_optimizer(self):
        a = 0
        with mockenv_context(**self.dist_env_1_gpu):
157
158
159
160
161
            ds_config_zero2_dict = self.get_config_dict(ZERO2)
            del ds_config_zero2_dict["optimizer"]  # force default HF Trainer optimizer
            ds_config_zero2_dict["zero_optimization"]["cpu_offload"] = False
            ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
            trainer = get_regression_trainer(a=a, local_rank=0, deepspeed=ds_config_zero2_dict)
162
163
164
165
166
167
168
            trainer.train()
        new_a = trainer.model.a.item()
        self.assertNotEqual(new_a, a)

    def test_hf_scheduler_ds_optimizer(self):
        # this combo is not possible at the moment
        with mockenv_context(**self.dist_env_1_gpu):
169
170
171
172
173
            ds_config_zero2_dict = self.get_config_dict(ZERO2)
            del ds_config_zero2_dict["scheduler"]  # force default HF Trainer scheduler
            ds_config_zero2_dict["zero_optimization"]["cpu_offload"] = False
            ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
            trainer = get_regression_trainer(local_rank=0, deepspeed=ds_config_zero2_dict)
174
175
176
177
178
179
180
            with self.assertRaises(Exception) as context:
                trainer.train()
        self.assertTrue("HF scheduler + DeepSpeed optimizer combination is not possible" in str(context.exception))

    def test_hf_optimizer_with_offload(self):
        # must not allow non-DS optimizer when using ZERO-offload
        with mockenv_context(**self.dist_env_1_gpu):
181
182
183
            ds_config_zero2_dict = self.get_config_dict(ZERO2)
            del ds_config_zero2_dict["optimizer"]  # force default HF Trainer optimizer
            ds_config_zero2_dict["zero_optimization"]["cpu_offload"] = True
184
185
            # sanity check - should the default config change
            assert (
186
187
                "cpu_offload" in ds_config_zero2_dict["zero_optimization"]
                and ds_config_zero2_dict["zero_optimization"]["cpu_offload"] is True
188
            ), "ensure the config is set up correctly"
189
            trainer = get_regression_trainer(local_rank=0, deepspeed=ds_config_zero2_dict)
190
191
192
            with self.assertRaises(Exception) as context:
                trainer.train()
        self.assertTrue("ZeRO Offload can only work with DeepSpeed optimizers" in str(context.exception))
193

194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
    # --- These tests need to run on both zero stages --- #
    @parameterized.expand(stages)
    def test_fake_notebook_no_launcher(self, stage):
        # this setup emulates a notebook where a launcher needs to be emulated by hand

        # note that unittest resets sys.stdout each test, so `CaptureStd` will work here to capture
        # DeepSpeed log if this test happens to run first in this pytest worker. But it will fail if
        # it's run not as a first test as `sys.stdout` will no longer be the same. So we either have
        # to reset `logger.handlers[0].setStream(sys.stdout)` or directly capture from the logger.
        from deepspeed.utils import logger

        with CaptureLogger(logger) as cs:
            with mockenv_context(**self.dist_env_1_gpu):
                trainer = get_regression_trainer(local_rank=0, deepspeed=self.ds_config_file[stage])
                trainer.train()
        assert "DeepSpeed info" in cs.out, "expected DeepSpeed logger output but got none"

    @parameterized.expand(stages)
    def test_early_get_last_lr(self, stage):
213
214
215
216
217
218
219
220
221
222
223
224
225
        # with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may
        # not run for the first few dozen steps while loss scale is too large, and thus during
        # that time `get_last_lr` will fail if called during that warm up stage,
        #
        # setting `logging_steps=1` forces an early `trainer._maybe_log_save_evaluate()` which calls
        # `self.lr_scheduler.get_last_lr()` and originally it'd fail on the very first step.
        with mockenv_context(**self.dist_env_1_gpu):
            a = b = 0.0
            trainer = get_regression_trainer(
                a=a,
                b=b,
                local_rank=0,
                train_len=8,
226
                deepspeed=self.ds_config_file[stage],
227
228
229
230
                per_device_train_batch_size=8,
                logging_steps=1,
            )
            trainer.train()
231
232
233
234
235
236
            post_train_a = trainer.model.a.item()

            # XXX: for some reason the following check fails with zero3 - not a broken but a
            # different qualitative outcome - need to investigate at some point
            if stage == ZERO3:
                return
237
238
239

            # it's enough that train didn't fail for this test, but we must check that
            # optimizer/scheduler didn't run (since if it did this test isn't testing the right thing)
240
            self.assertEqual(post_train_a, a)
241

242
243
    @parameterized.expand(stages)
    def test_gradient_accumulation(self, stage):
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
        # this test measures that we get identical weights and similar loss with:
        # 1. per_device_train_batch_size=8, gradient_accumulation_steps=1
        # 2. per_device_train_batch_size=4, gradient_accumulation_steps=2
        # since the 2nd should produce the effective batch of 1st, with the same results
        #
        # I can get an identical loss for a small train_len=32, plus the power of the initial
        # dynamic loss scale value set to:
        #   "fp16.initial_scale_power": 1
        # plus having the same WarmupLR's warmup_min_lr == warmup_max_lr in the config file
        # but for some reason going to train_len=64 the weights, weights start to mismatch with this setup.
        # the culprit seems to be `initial_scale_power` - putting it back to its default 32 keeps the weights identical

        train_len = 64
        a = b = 0.0

        with mockenv_context(**self.dist_env_1_gpu):
            no_grad_accum_trainer = get_regression_trainer(
                a=a,
                b=b,
                local_rank=0,
                train_len=train_len,
265
                deepspeed=self.ds_config_file[stage],
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
                per_device_train_batch_size=8,
                gradient_accumulation_steps=1,
            )
            no_grad_accum_result = no_grad_accum_trainer.train()
            no_grad_accum_loss = no_grad_accum_result.training_loss
            no_grad_accum_a = no_grad_accum_trainer.model.a.item()
            no_grad_accum_b = no_grad_accum_trainer.model.b.item()
            # make sure the optimizer kicked in - if it hasn't changed from the original value of a then make train_len bigger
            self.assertNotEqual(no_grad_accum_a, a)

        with mockenv_context(**self.dist_env_1_gpu):
            yes_grad_accum_trainer = get_regression_trainer(
                a=a,
                b=b,
                local_rank=0,
                train_len=train_len,
282
                deepspeed=self.ds_config_file[stage],
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
                per_device_train_batch_size=4,
                gradient_accumulation_steps=2,
            )
            yes_grad_accum_result = yes_grad_accum_trainer.train()
            yes_grad_accum_loss = yes_grad_accum_result.training_loss
            yes_grad_accum_a = yes_grad_accum_trainer.model.a.item()
            yes_grad_accum_b = yes_grad_accum_trainer.model.b.item()
            self.assertNotEqual(yes_grad_accum_a, a)

        # training with half the batch size but accumulation steps as 2 should give the same weights
        self.assertEqual(no_grad_accum_a, yes_grad_accum_a)
        self.assertEqual(no_grad_accum_b, yes_grad_accum_b)

        # see the note above how to get identical loss on a small bs
        self.assertAlmostEqual(no_grad_accum_loss, yes_grad_accum_loss, places=5)

299
    def check_saved_checkpoints_deepspeed(self, output_dir, freq, total, stage):
300
301
302
        # adapted from TrainerIntegrationCommon.check_saved_checkpoints

        file_list = [WEIGHTS_NAME, "training_args.bin", "trainer_state.json", "config.json"]
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319

        if stage == ZERO2:
            ds_file_list = ["mp_rank_00_model_states.pt"]
        elif stage == ZERO3:
            ds_file_list = ["zero_pp_rank_0_mp_rank_00_model_states.pt"]
        else:
            raise ValueError(f"unknown stage {stage}")

        # XXX: this can be recoded and then removed once we require deepspeed>0.3.13
        from packaging import version

        import deepspeed

        if version.parse(deepspeed.__version__) > version.parse("0.3.13"):
            ds_file_list.append("zero_pp_rank_0_mp_rank_00_optim_states.pt")
        else:
            ds_file_list.append("zero_pp_rank_0_mp_rank_00optim_states.pt")
320
321
322

        for step in range(freq, total, freq):
            checkpoint = os.path.join(output_dir, f"checkpoint-{step}")
323
            self.assertTrue(os.path.isdir(checkpoint), f"[{stage}] {checkpoint} dir is not found")
324
325
326

            # common files
            for filename in file_list:
327
328
                path = os.path.join(checkpoint, filename)
                self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found")
329
330
331
332
333
334

            # ds files
            ds_path = os.path.join(checkpoint, f"global_step{step}")
            for filename in ds_file_list:
                # filename = os.path.join(path, filename)
                # print(filename)
335
336
                path = os.path.join(ds_path, filename)
                self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found")
337

338
339
    @parameterized.expand(stages)
    def test_save_checkpoints(self, stage):
340
341
        # adapted from  TrainerIntegrationTest.test_save_checkpoints

342
        freq = 5
343
        output_dir = self.get_auto_remove_tmp_dir()
344
        ds_config_dict = self.get_config_dict(stage)
345
        ds_config_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
346
347
        if stage == ZERO3:
            ds_config_dict["zero_optimization"]["stage3_gather_fp16_weights_on_model_save"] = True
348
349
350
351
352
353
354
355
356
357
358

        # save checkpoints
        with mockenv_context(**self.dist_env_1_gpu):
            trainer = get_regression_trainer(
                output_dir=output_dir,
                save_steps=freq,
                deepspeed=ds_config_dict,
            )
            trainer.train()

        total = int(self.n_epochs * 64 / self.batch_size)
359
        self.check_saved_checkpoints_deepspeed(output_dir, freq, total, stage)
360

361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
    @parameterized.expand(stages)
    def test_can_resume_training_errors(self, stage):

        with mockenv_context(**self.dist_env_1_gpu):
            ds_config_dict = self.get_config_dict(stage)
            output_dir = self.get_auto_remove_tmp_dir()
            trainer = get_regression_trainer(output_dir=output_dir, deepspeed=ds_config_dict)

            # 1. fail to find any checkpoint - due a fresh output_dir
            with self.assertRaises(Exception) as context:
                trainer.train(resume_from_checkpoint=True)
            self.assertTrue(
                "No valid checkpoint found in output directory" in str(context.exception),
                f"got exception: {context.exception}",
            )
376

377
378
379
380
381
382
383
384
385
386
387
388
            # 2. fail to find a bogus checkpoint
            with self.assertRaises(Exception) as context:
                checkpoint = os.path.join(output_dir, "checkpoint-5")
                trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus")
            self.assertTrue(
                "Can't find a valid checkpoint at" in str(context.exception), f"got exception: {context.exception}"
            )

    @parameterized.expand(stages)
    def test_can_resume_training_normal(self, stage):
        # adapted from TrainerIntegrationTest.test_can_resume_training
        # test normal resume for each stage separately, error-handling is tested in a different test
389
        output_dir = self.get_auto_remove_tmp_dir()
390
        ds_config_dict = self.get_config_dict(stage)
391
        ds_config_dict["fp16"]["initial_scale_power"] = 1  # force optimizer on the first step
392
393
394
        if stage == ZERO3:
            ds_config_dict["zero_optimization"]["stage3_gather_fp16_weights_on_model_save"] = True

395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
        kwargs = dict(output_dir=output_dir, train_len=128, save_steps=5, learning_rate=0.1, deepspeed=ds_config_dict)

        with mockenv_context(**self.dist_env_1_gpu):
            trainer = get_regression_trainer(**kwargs)
            trainer.train()
            (a, b) = trainer.model.a.item(), trainer.model.b.item()
            state = dataclasses.asdict(trainer.state)

            checkpoint = os.path.join(output_dir, "checkpoint-5")

            # Reinitialize trainer
            trainer = get_regression_trainer(**kwargs)

            trainer.train(resume_from_checkpoint=checkpoint)
            (a1, b1) = trainer.model.a.item(), trainer.model.b.item()
            state1 = dataclasses.asdict(trainer.state)
            self.assertEqual(a, a1)
            self.assertEqual(b, b1)
            self.check_trainer_state_are_the_same(state, state1)

            # Now check with a later checkpoint that it also works when we span over one epoch
            checkpoint = os.path.join(output_dir, "checkpoint-15")

            # Reinitialize trainer and load model
            trainer = get_regression_trainer(**kwargs)

            trainer.train(resume_from_checkpoint=checkpoint)
            (a1, b1) = trainer.model.a.item(), trainer.model.b.item()
            state1 = dataclasses.asdict(trainer.state)
            self.assertEqual(a, a1)
            self.assertEqual(b, b1)
            self.check_trainer_state_are_the_same(state, state1)

428
429
430
431

@slow
@require_deepspeed
@require_torch_gpu
432
class TestDeepSpeedWithLauncher(TestCasePlus):
Patrick von Platen's avatar
Patrick von Platen committed
433
    """This class is for testing via an external script - can do multiple gpus"""
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449

    # Tests to devise #
    #
    # 1. predict_with_generate on multigpu - need to figure out how to give input sequences so that
    # the 2 gpus will generate prediction sequences that aren't of the same length - this is because
    # we had to code a special feature to sync the gpus when the predicted sequences aren't of the
    # same length. In general this will tested as a side-effect through a variety of other tests -
    # it'll simply hang trying to synchronize with other gpus if this problem is encountered. So as
    # long as we have a few full tests running on zero3 + predict_with_generate this should be
    # mostly covered.
    #
    # but there are 5 variations on beam search in `generate`- with identical code branched with `if
    # synced_gpus`
    #
    # 2. most tests should probably be run on both: zero2 and zero3 configs
    #
450

451
    @require_torch_multi_gpu
452
453
454
    @parameterized.expand(stages)
    def test_basic_distributed(self, stage):
        self.run_and_check(stage=stage, distributed=True)
455

456
457
    @parameterized.expand(stages)
    def test_do_eval_no_train(self, stage):
458
        # we should not fail if train is skipped
459
460
        self.run_and_check(
            stage=stage,
461
462
            eval_steps=1,
            distributed=False,
463
464
            do_train=False,
            do_eval=True,
465
        )
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495

    @parameterized.expand(stages)
    def test_resume_train_not_from_ds_checkpoint(self, stage):
        # do normal training and then resume not from the deepspeed checkpoint but explicitly from
        # the saved model dir

        do_train = True
        do_eval = False
        kwargs = dict(stage=stage, eval_steps=1, distributed=True, do_train=do_train, do_eval=do_eval)

        # 1. normal training
        output_dir = self.run_and_check(**kwargs)

        # 2. now resume explicitly from the saved weights, by passing --model_name_or_path output_dir
        # - i.e. the same path the model was saved to in step 1
        output_dir = self.run_trainer(**kwargs, model_name=output_dir)

        self.do_checks(output_dir, do_train=do_train, do_eval=do_eval)

    def do_checks(self, output_dir, do_train=True, do_eval=True):

        if do_train:
            train_metrics = load_json(os.path.join(output_dir, "train_results.json"))
            self.assertIn("train_samples_per_second", train_metrics)
            self.assertGreater(train_metrics["train_samples_per_second"], 0.5)

        if do_eval:
            eval_metrics = load_json(os.path.join(output_dir, "eval_results.json"))
            self.assertIn("eval_bleu", eval_metrics)
            self.assertGreater(eval_metrics["eval_bleu"], 0)
496
497

    # XXX: need to do better validation beyond just that the run was successful
498
499
500
501
502
503
504
505
506
507
508
509
    def run_and_check(
        self,
        stage,
        eval_steps=10,
        distributed=True,
        do_train=True,
        do_eval=True,
        extra_args_str=None,
        remove_args_str=None,
    ):

        # we are doing quality testing so using a small real model
510
        output_dir = self.run_trainer(
511
512
513
            stage=stage,
            model_name=T5_SMALL,
            eval_steps=eval_steps,
514
            num_train_epochs=1,
515
516
            do_train=do_train,
            do_eval=do_eval,
517
518
519
520
            distributed=distributed,
            extra_args_str=extra_args_str,
            remove_args_str=remove_args_str,
        )
521
522
523
524

        self.do_checks(output_dir, do_train=do_train, do_eval=do_eval)

        return output_dir
525
526
527

    def run_trainer(
        self,
528
        stage: str,
529
        model_name: str,
530
531
532
533
        eval_steps: int = 10,
        num_train_epochs: int = 1,
        do_train: bool = False,
        do_eval: bool = True,
534
        distributed: bool = True,
535
536
537
        extra_args_str: str = None,
        remove_args_str: str = None,
    ):
538
        max_len = 32
Sylvain Gugger's avatar
Sylvain Gugger committed
539
        data_dir = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
540
541
542
        output_dir = self.get_auto_remove_tmp_dir()
        args = f"""
            --model_name_or_path {model_name}
543
544
            --train_file {data_dir}/train.json
            --validation_file {data_dir}/val.json
545
546
547
548
549
550
551
552
            --output_dir {output_dir}
            --overwrite_output_dir
            --max_source_length {max_len}
            --max_target_length {max_len}
            --val_max_target_length {max_len}
            --warmup_steps 8
            --predict_with_generate
            --logging_steps 0
553
554
            --save_steps 0
            --eval_steps {eval_steps}
555
556
557
            --group_by_length
            --label_smoothing_factor 0.1
            --adafactor
558
559
            --source_lang en
            --target_lang ro
560
        """.split()
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
        args.extend(["--source_prefix", '"translate English to Romanian: "'])

        actions = 0
        if do_train:
            actions += 1
            args.extend(
                f"""
            --do_train
            --num_train_epochs {str(num_train_epochs)}
            --max_train_samples 100
            --per_device_train_batch_size 2
            --learning_rate 3e-3
            """.split()
            )

        if do_eval:
            actions += 1
            args.extend(
                """
            --do_eval
581
            --max_eval_samples 100
582
583
584
585
586
            --per_device_eval_batch_size 2
            """.split()
            )

        assert actions > 0, "need at least do_train or do_eval for the test to run"
587
588
589
590

        if extra_args_str is not None:
            args.extend(extra_args_str.split())

591
        # currently only works for bool args
592
593
594
595
        if remove_args_str is not None:
            remove_args = remove_args_str.split()
            args = [x for x in args if x not in remove_args]

596
        ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
Sylvain Gugger's avatar
Sylvain Gugger committed
597
        script = [f"{self.examples_dir_str}/pytorch/translation/run_translation.py"]
598
        launcher = self.get_launcher(distributed)
599
600

        cmd = launcher + script + args + ds_args
601
        # keep for quick debug
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
        # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
        execute_subprocess_async(cmd, env=self.get_env())

        return output_dir

    @parameterized.expand(stages)
    def test_clm(self, stage):
        # this test exercises model.resize_token_embeddings() which requires param gathering outside
        # of forward - it's not used by `run_translation.py`, but it is in `run_clm.py`

        data_dir = self.tests_dir / "fixtures"
        output_dir = self.get_auto_remove_tmp_dir()
        args = f"""
            --model_name_or_path sshleifer/tiny-gpt2
            --train_file {data_dir}/sample_text.txt
            --validation_file {data_dir}/sample_text.txt
            --output_dir {output_dir}
            --overwrite_output_dir
            --do_train
            --do_eval
            --max_train_samples 10
623
            --max_eval_samples 10
624
625
626
627
628
629
630
631
            --per_device_train_batch_size 5
            --per_device_eval_batch_size 5
            --num_train_epochs 1
            --warmup_steps 8
            --block_size 128
            """.split()

        ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
Sylvain Gugger's avatar
Sylvain Gugger committed
632
        script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"]
633
        launcher = self.get_launcher(distributed=True)
634
635
636
637

        cmd = launcher + script + args + ds_args
        # keep for quick debug
        # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
638
639
640
        execute_subprocess_async(cmd, env=self.get_env())

        return output_dir
641
642
643
644
645
646
647
648

    def get_launcher(self, distributed=False):
        # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
        # - it won't be able to handle that
        # 2. for now testing with just 2 gpus max (since some quality tests may give different
        # results with mode gpus because we use very little data)
        num_gpus = min(2, get_gpu_count()) if distributed else 1
        return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()