test_sharded_ddp_features.py 20.7 KB
Newer Older
1
2
3
4
5
6
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.

"""
7
Testing ShardedDDP
8
9
"""

10
from contextlib import suppress
11

12
import numpy as np
13
import pytest
14
15
16
17
18
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn import Linear, Sequential

19
from fairscale.nn.data_parallel import ShardedDataParallel
20
from fairscale.optim import OSS
21
22
from fairscale.utils.testing import (
    GPT2,
23
    SGDWithPausingCompute,
24
25
26
    available_devices,
    check_same_models_across_ranks,
    skip_if_less_than_four_gpu,
27
28
    skip_if_no_cuda,
    skip_if_single_gpu,
29
    temp_files_ctx,
30
)
31

32

33
34
35
36
37
def _get_mlp(tripwire: bool = False):
    if not tripwire:
        return Sequential(Linear(2, 3), Linear(3, 3), Linear(3, 3), Linear(3, 3), Linear(3, 3), Linear(3, 3))

    class Tripwire(torch.nn.Module):
38
        """A model made to expose possible corner cases"""
39
40
41
42
43
44
45
46
47
48
49
50

        def __init__(self) -> None:
            super().__init__()
            self.model = Linear(2, 3, bias=False)

            # mismatched types in between trainable or not, can trip the buckets for instance
            self.register_parameter("tripwire", torch.nn.Parameter(torch.LongTensor((3, 3)), requires_grad=False))

        def forward(self, x):
            return self.model(x)

    return Tripwire()
51
52
53
54
55
56
57
58
59
60
61
62
63
64


class _DoubleInput(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.mlp = _get_mlp()

    def forward(self, x, y):
        x1 = self.mlp(x)
        x2 = self.mlp(y)
        return torch.cat((x1, x2), dim=1)


def run_one_step(
65
66
67
68
69
70
71
72
73
    rank,
    world_size,
    backend,
    device,
    temp_file_name,
    broadcast_buffers,
    grad_accumulation,
    reduce_buffer_size,
    optimizer_type,
74
    reduce_fp16=False,
75
76
):
    dist.init_process_group(init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size)
77
78
79
    if device == torch.device("cuda"):
        torch.cuda.set_device(rank)

80
81
82
    torch.manual_seed(rank)
    np.random.seed(rank)

83
84
85
86
87
88
    # Any model works. Add one different buffer per rank
    model = _get_mlp()
    model.register_buffer("test_buffer", torch.ones((1)) * rank)
    model.to(device)

    next(model.parameters()).requires_grad = False  # Test non-trainable parameters
89

90
91
92
93
94
    optimizer_settings = {"lr": 1e-3, "momentum": 0.99}
    if optimizer_type == SGDWithPausingCompute:
        optimizer_settings["rank"] = rank

    optimizer = OSS(params=model.parameters(), optim=optimizer_type, **optimizer_settings)
95
    ddp_model = ShardedDataParallel(
96
97
98
99
100
        model,
        optimizer,
        broadcast_buffers=broadcast_buffers,
        reduce_buffer_size=reduce_buffer_size,
        reduce_fp16=reduce_fp16,
101
    )
102

103
104
105
106
    # The model should be synchronized in between the ranks at ShardedDataParallel construction time, check that
    check_same_models_across_ranks(
        ddp_model, dist.group.WORLD, params_should_be_equal=True, check_broadcast_buffers=broadcast_buffers
    )
107

108
109
    # Optim loop
    def closure():
110
        ddp_model.zero_grad(set_to_none=True)
111
112
113
114
115
116
117
118
119
120

        with ddp_model.no_sync() if grad_accumulation else suppress():
            input_tensor = torch.rand((64, 2)).to(device)
            loss = ddp_model(input_tensor).abs().sum()
            loss.backward()
        return loss

    # The models should stay the same in between the ranks
    for i in range(5):
        _ = optimizer.step(closure=closure)
121
122
123
124
125

        # For a sync of all the streams
        if device.type == torch.device("cuda").type:
            torch.cuda.synchronize(device=device)

126
        # when running on cpu/gloo the "nodes" are not really different
127
        same_params = device == torch.device("cpu") or not grad_accumulation
128
129
130
        check_same_models_across_ranks(
            ddp_model, dist.group.WORLD, params_should_be_equal=same_params, check_broadcast_buffers=broadcast_buffers
        )
131

132
133
    dist.destroy_process_group()

134

135
def run_test(backend, device, world_size, broadcast_buffers, grad_accumulation, reduce_buffer_size, optimizer_type):
136
137
138
139
140
141
142
    with temp_files_ctx(num=1) as temp_files:
        mp.spawn(
            run_one_step,
            args=(world_size, backend, device, temp_files[0], broadcast_buffers, grad_accumulation, reduce_buffer_size),
            nprocs=world_size,
            join=True,
        )
Min Xu's avatar
Min Xu committed
143
144


145
146
@skip_if_no_cuda
@skip_if_single_gpu
147
148
149
@pytest.mark.parametrize("broadcast_buffers", [True, False])
@pytest.mark.parametrize("grad_accumulation", [True, False])
@pytest.mark.parametrize("reduce_buffer_size", [0, 2 ** 20])
150
@pytest.mark.parametrize("optimizer_type", [torch.optim.SGD, SGDWithPausingCompute])
151
@pytest.mark.parametrize("reduce_fp16", [False, True])
152
153
154
155
156
157
158
159
@pytest.mark.parametrize(
    "setup",
    [
        [dist.Backend.NCCL, torch.device("cuda")],
        [dist.Backend.GLOO, torch.device("cpu")],
        [dist.Backend.GLOO, torch.device("cuda")],
    ],
)
160
def test_step(broadcast_buffers, grad_accumulation, reduce_buffer_size, optimizer_type, reduce_fp16, setup):
161
    world_size = 2
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
    with temp_files_ctx(num=1) as temp_files:
        mp.spawn(
            run_one_step,
            args=(
                world_size,
                setup[0],
                setup[1],
                temp_files[0],
                broadcast_buffers,
                grad_accumulation,
                reduce_buffer_size,
                optimizer_type,
                reduce_fp16,
            ),
            nprocs=world_size,
            join=True,
        )
179
180


181
182
183
def run_test_two_inputs(rank, world_size, backend, device, temp_file_name, reduce_buffer_size):
    dist.init_process_group(init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size)
    if device == "cuda":
184
185
186
187
188
189
        torch.cuda.set_device(rank)

    torch.manual_seed(rank)
    np.random.seed(rank)

    model = _DoubleInput().to(device)
190
191
    optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
    ddp_model = ShardedDataParallel(model, optimizer, reduce_buffer_size=reduce_buffer_size)
192
193
194

    # Optim loop
    def closure():
195
        ddp_model.zero_grad(set_to_none=True)
196
197
198
199
200
        input_tensor = torch.rand((64, 2)).to(device)
        loss = ddp_model(input_tensor, input_tensor).abs().sum()
        loss.backward()
        return loss

201
    for _ in range(5):
202
        _ = optimizer.step(closure=closure)
Min Xu's avatar
Min Xu committed
203

204
205
    dist.destroy_process_group()

Min Xu's avatar
Min Xu committed
206

207
208
209
210
@pytest.mark.parametrize("reduce_buffer_size", [0, 2 ** 20])
@pytest.mark.parametrize("backend", ["gloo", "nccl"])
@pytest.mark.parametrize("device", available_devices)
def test_inputs(reduce_buffer_size, backend, device):
211
212
    # Check that the ShardedDDP wrapper accepts tuple(tensors) as inputs
    world_size = 2
213
214
215
    if backend == "nccl" and device == "cpu":
        pytest.skip("Incompatible combination, or cuda not available")
        return
216
217
218
219
220
221
222
    with temp_files_ctx(num=1) as temp_files:
        mp.spawn(
            run_test_two_inputs,
            args=(world_size, backend, device, temp_files[0], reduce_buffer_size),
            nprocs=world_size,
            join=True,
        )
223
224
225
226
227
228


def test_ddp_attributes():
    # Check that ShardedDDP exposes the same attributes as Pytorch's DDP
    # - is multi_device_module
    # - device_type
229
230
    with temp_files_ctx(num=1) as temp_files:
        dist.init_process_group(init_method="file://" + temp_files[0], backend="gloo", rank=0, world_size=1)
231

232
233
234
        model = Sequential(Linear(2, 3), Linear(3, 3))
        optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
        ddp_model = ShardedDataParallel(model, optimizer)
235

236
237
        assert hasattr(ddp_model, "is_multi_device_module")
        assert hasattr(ddp_model, "device_type")
238
        assert hasattr(ddp_model, "module")
239
        dist.destroy_process_group()
240
241


242
def test_random_attributes():
243
244
245
    with temp_files_ctx(num=1) as temp_files:
        # Check that ShardedDDP exposes the original module's attributes
        dist.init_process_group(init_method="file://" + temp_files[0], backend="gloo", rank=0, world_size=1)
246

247
248
        model = Sequential(Linear(2, 3), Linear(3, 3))
        model.banana = "sweet"
249

250
251
        optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
        ddp_model = ShardedDataParallel(model, optimizer)
252

253
254
        assert hasattr(ddp_model, "banana")
        assert not hasattr(ddp_model, "orange")
255

256
        dist.destroy_process_group()
257
258


259
def test_catch_grad_grad():
260
261
262
    with temp_files_ctx(num=1) as temp_files:
        # Check that ShardedDDP exposes the original module's attributes
        dist.init_process_group(init_method="file://" + temp_files[0], backend="gloo", rank=0, world_size=1)
263

264
265
266
267
268
        model = Sequential(Linear(2, 3), Linear(3, 3))
        model.train()
        chained_grad = torch.zeros_like(next(model.parameters()))
        chained_grad.requires_grad = True
        next(model.parameters()).grad = chained_grad
269

270
271
        optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
        ddp_model = ShardedDataParallel(model, optimizer)
272

273
274
275
        inputs = torch.rand(100, 2)
        with pytest.raises(RuntimeError):
            _ = ddp_model(inputs)
276

277
        dist.destroy_process_group()
278
279


280
def test_mixed_types():
281
282
283
    with temp_files_ctx(num=1) as temp_files:
        # Check that ShardedDDP exposes the original module's attributes
        dist.init_process_group(init_method="file://" + temp_files[0], backend="gloo", rank=0, world_size=1)
284

285
        model = _get_mlp(tripwire=True)
286

287
288
289
290
        optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
        model = ShardedDataParallel(model, optimizer)
        input_tensor = torch.rand((2, 2))
        _ = model(input_tensor)
291

292
        dist.destroy_process_group()
293
294


295
def run_test_train_eval_change(rank, world_size, file):
296
    # Check that ShardedDDP handles the switch from training to eval properly
297
    dist.init_process_group(init_method="file://" + file, backend="gloo", rank=rank, world_size=world_size)
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320

    model = _get_mlp()
    model.train()
    optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
    model = ShardedDataParallel(model, optimizer)
    input_tensor = torch.rand((2, 2))
    loss = model(input_tensor).sum()
    loss.backward()  # make sure that the gradients are reduced

    # Wipe the gradients and switch to eval mode
    model.zero_grad()
    model.eval()
    _ = model(input_tensor)
    assert next(model.parameters()).grad is None or torch.norm(next(model.parameters()).grad) < 1e-6

    # Get back to training
    model = model.train()
    model(input_tensor).sum().backward()
    assert torch.norm(next(model.parameters()).grad) > 0.0

    dist.destroy_process_group()


321
322
def test_train_eval_change():
    world_size = 4
323
324
    with temp_files_ctx(num=1) as temp_files:
        mp.spawn(
325
326
327
328
            run_test_train_eval_change,
            args=(world_size, temp_files[0]),
            nprocs=world_size,
            join=True,
329
        )
330
331


332
def run_test_device_change(rank, world_size, backend, device, temp_file_name, reduce_buffer_size):
333
    # Check that the wrapped module can change devices
334
335
    dist.init_process_group(init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size)
    torch.cuda.set_device(rank)
336

337
338
339
340
341
342
343
344
345
346
    model = Sequential(Linear(2, 3), Linear(3, 3)).cpu()  # not device on purpose, test changing it after the fact
    optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
    ddp_model = ShardedDataParallel(
        model, optimizer, sync_models_at_startup=False, reduce_buffer_size=reduce_buffer_size
    )
    try:
        ddp_model.to(device)
        assert False, "Changing devices should be caught and not supported"
    except AssertionError:
        pass
347

348
349
350
    # Check that we can change the data type
    ddp_model.to(device=torch.device("cpu"), dtype=torch.float16)

351
352
353
354
355
    dist.destroy_process_group()


@skip_if_no_cuda
@skip_if_single_gpu
356
357
@pytest.mark.parametrize("reduce_buffer_size", [0, 2 ** 20])
def test_device_change(reduce_buffer_size):
358
    # Check that ShardedDDP handles a device change properly
359
    world_size = 2
360
    backend = "nccl"
361
362
363
364
365
366
367
368
    with temp_files_ctx(num=1) as temp_files:
        device = "cuda"
        mp.spawn(
            run_test_device_change,
            args=(world_size, backend, device, temp_files[0], reduce_buffer_size),
            nprocs=world_size,
            join=True,
        )
369
370


371
372
373
374
375
376
def run_test_training_change(rank, world_size, backend, device, temp_file_name, reduce_buffer_size):
    group = dist.init_process_group(
        init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size
    )
    torch.cuda.set_device(rank)

377
    model = Sequential(Linear(2, 3), Linear(3, 3)).to(device)
378
379
    optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
    ddp_model = ShardedDataParallel(model, optimizer, process_group=group, reduce_buffer_size=reduce_buffer_size)
380
381
382
383
384
385
386
387
388
389
390
391

    inputs = torch.rand((10, 2), device=device)
    outputs = ddp_model(inputs)  # assert if the module has not been changed properly
    _ = outputs.norm().backward()

    ddp_model.eval()
    ddp_model(inputs)  # This will assert if eval() is not properly taken into account
    ddp_model(inputs)

    dist.destroy_process_group()


392
393
394
395
396
397
398
@skip_if_no_cuda
@skip_if_single_gpu
@pytest.mark.parametrize("reduce_buffer_size", [0, 2 ** 20])
def test_training_change(reduce_buffer_size):
    world_size = 2
    backend = "nccl"
    device = "cuda"
399
400
401
402
403
404
405
    with temp_files_ctx(num=1) as temp_files:
        mp.spawn(
            run_test_training_change,
            args=(world_size, backend, device, temp_files[0], reduce_buffer_size),
            nprocs=world_size,
            join=True,
        )
406
407


408
def run_test_ddp_sync_batch_norm(rank, world_size, backend, device, temp_file_name):
409
    dist.init_process_group(init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size)
410
411
412

    model = Sequential(Linear(2, 3), torch.nn.BatchNorm1d(3), Linear(3, 3)).to(device)
    model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
413
414
    model.to(device)  # in pytorch 1.5 syncBN switches to the default device/cpu

415
    optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
    ddp_model = ShardedDataParallel(model, optimizer)

    assert isinstance(model[1], torch.nn.SyncBatchNorm)
    # Ensures sync batch norm handles have been added
    ddp_model(torch.randn(2, 2).to(device))
    dist.destroy_process_group()


@skip_if_no_cuda
@skip_if_single_gpu
def test_ddp_sync_batch_norm():
    # Check that ShardedDDP is compatible with sync batch norm across multiple GPUs
    world_size = 2
    backend = "gloo"
    device = "cuda"
431
432
433
434
435
436
437
    with temp_files_ctx(num=1) as temp_files:
        mp.spawn(
            run_test_ddp_sync_batch_norm,
            args=(world_size, backend, device, temp_files[0]),
            nprocs=world_size,
            join=True,
        )
438
439


440
def run_test_two_optimizers(rank, world_size, backend, device, temp_file_name):
441
    dist.init_process_group(init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size)
442
443
444
445
446
447
448
449
    if device == torch.device("cuda"):
        torch.cuda.set_device(rank)

    torch.manual_seed(rank)
    np.random.seed(rank)
    model = _DoubleInput().to(device)

    parameters = list(model.parameters())
450
451
    optimizer_1 = OSS(params=parameters[:-10], optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
    optimizer_2 = OSS(params=parameters[-10:], optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
452
453
454
455
456
457
458
459
460
461
    ddp_model = ShardedDataParallel(model, [optimizer_1, optimizer_2])

    # Optim loop
    def closure():
        input_tensor = torch.rand((64, 2)).to(device)
        loss = ddp_model(input_tensor, input_tensor).abs().sum()
        loss.backward()
        return loss

    for i in range(5):
462
463
464
465
466
        optimizer_1.zero_grad()
        optimizer_2.zero_grad()

        _ = optimizer_1.step(closure=closure)
        _ = optimizer_2.step(closure=closure)
467
468
469
470
471
472
473
474
475

    dist.destroy_process_group()


def test_two_optimizers():
    # Check that the ShardedDDP wrapper accepts tuple(tensors) as inputs
    world_size = 2
    backend = "gloo"
    device = "cpu"
476
477
478
479
    with temp_files_ctx(num=1) as temp_files:
        mp.spawn(
            run_test_two_optimizers, args=(world_size, backend, device, temp_files[0]), nprocs=world_size, join=True
        )
480
481


482
def run_test_gpt2(rank, world_size, backend, device, temp_file_name, reduce_buffer_size):
483
    INPUT_DIM = 16
484
485
486
487
488
    BACH_SIZE = 10
    STEPS = 10

    url = "file://" + temp_file_name
    dist.init_process_group(init_method=url, backend=backend, rank=rank, world_size=world_size)
489
    torch.cuda.set_device(rank)
490
491
492
493

    torch.manual_seed(rank)
    np.random.seed(rank)
    model = GPT2(
494
        embed_dim=256, num_heads=2, num_layers=12, num_positions=INPUT_DIM * INPUT_DIM, num_vocab=512, num_classes=2
495
    )
496
    optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
497
    ddp_model = ShardedDataParallel(model, optimizer, reduce_buffer_size=reduce_buffer_size)
498

499
500
501
    # Move the model to another device post-construction
    model = model.to(device)

502
    # Optim loop
503
504
    set_to_none = True

505
    def closure():
506
507
508
509
        nonlocal set_to_none
        ddp_model.zero_grad(set_to_none=set_to_none)
        set_to_none = not set_to_none

510
511
512
513
514
515
516
517
518
519
        # Force int inputs to prevent the first grad from firing
        input_tensor = torch.randint(10, (BACH_SIZE, INPUT_DIM)).to(device)
        loss = ddp_model(input_tensor).abs().sum()
        loss.backward()
        return loss

    # Check for bucketing overflows
    for i in range(STEPS):
        _ = optimizer.step(closure=closure)

520
521
522
523
        # Stress test the .to() method
        ddp_model.to(device=device, dtype=torch.float16)
        ddp_model.to(device=device, dtype=torch.float32)

524
525
526
527
528
    dist.destroy_process_group()


@skip_if_no_cuda
@skip_if_single_gpu
529
@pytest.mark.parametrize("world_size", [1, 2])
530
531
@pytest.mark.parametrize("reduce_buffer", [2 ** 23, 2 ** 40])
def test_gpt2(world_size, reduce_buffer):
532
    # Check that having trainable unused params is fine
533
534
    backend = "gloo"
    device = "cuda"
535
    with temp_files_ctx(num=1) as temp_files:
536
537
538
539
540
541
        mp.spawn(
            run_test_gpt2,
            args=(world_size, backend, device, temp_files[0], reduce_buffer),
            nprocs=world_size,
            join=True,
        )
542
543


544
def run_test_multiple_groups(rank, world_size, tempfile_name, backend, reduce_buffer_size):
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
    # Only work with the even ranks, to check that the global_rank indexing is properly used
    dist.init_process_group(init_method="file://" + tempfile_name, backend=backend, rank=rank, world_size=world_size)

    sub_group_ranks = [0, 2]
    process_group = torch.distributed.new_group(ranks=sub_group_ranks, backend=backend)

    # Make sure that all the ranks get different training data
    # So that the sync check in between their models is meaningful
    torch.manual_seed(rank)
    np.random.seed(rank)

    # Standard deep learning setup
    device = "cuda"
    torch.cuda.set_device(rank)

    epochs, batch, input_width, hidden, target_width = 5, 3, 20, 10, 5
    loss_fn = torch.nn.L1Loss().to(device)

    def check(optimizer, model):
        # Just run a couple of epochs, check that the model is properly updated
        for _ in range(epochs):
            target = torch.rand((batch, target_width), device=device)
            inputs = torch.rand((batch, input_width), device=device)

            def closure():
                optimizer.zero_grad()
                output = model(inputs)
                loss = loss_fn(output, target)
                loss.backward()
                return loss

            _ = optimizer.step(closure=closure)

            # Check that all the params are the same on all ranks
579
580
581
            check_same_models_across_ranks(
                model, process_group, params_should_be_equal=True, check_broadcast_buffers=True
            )
582
583
584
585
586
587
588
589

    if rank in sub_group_ranks:
        # Model not-fitting in the broadcast bucket
        model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width)).to(
            device
        )

        # With SGD, Momentum is required to get a state to shard
590
591
592
593
        optimizer = OSS(model.parameters(), group=process_group, lr=1e-3, momentum=0.99)
        model = ShardedDataParallel(
            model, optimizer, process_group=process_group, reduce_buffer_size=reduce_buffer_size
        )
594
595
596
597
598
        check(optimizer, model)

    dist.destroy_process_group(process_group)


599
600
601
602
@skip_if_less_than_four_gpu
@pytest.mark.parametrize("reduce_buffer_size", [0, 2 ** 20])
@pytest.mark.parametrize("backend", ["gloo", "nccl"])
def test_multiple_groups(reduce_buffer_size, backend):
603
    world_size = 4
604
605
606
607
608
609
610
    with temp_files_ctx(num=1) as temp_files:
        mp.spawn(
            run_test_multiple_groups,
            args=(world_size, temp_files[0], backend, reduce_buffer_size),
            nprocs=world_size,
            join=True,
        )