test_oss.py 34.3 KB
Newer Older
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
1
2
3
4
5
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.

6
7
8
9
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring

10

11
12
import copy
from math import inf
13
import tempfile
14
from typing import Any, Dict, Type, cast
15
import unittest
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
16

17
import numpy as np
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
18
19
20
21
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
22
from torch.nn.parallel import DistributedDataParallel as DDP
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
23
24

import fairscale.optim as optim
25
26
27
28
29
30
31
from fairscale.utils.testing import (
    check_same_model_params,
    skip_if_no_cuda,
    skip_if_py39_no_cuda,
    skip_if_single_gpu,
    torch_version,
)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
32

33
34
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO  # type: ignore
DEVICE = "cuda" if torch.cuda.is_available() else torch.device("cpu")
35
RECIPIENT_RANK = 1
36

37
38
39
40
41
42
43
44
45
try:
    from torch.distributed import broadcast_object_list  # noqa

    _torch_broadcast_object = True
except ImportError:
    from fairscale.optim.utils import broadcast_object  # noqa

    _torch_broadcast_object = False

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
46

47
48
49
def dist_init(rank, world_size, tempfile_name, backend=BACKEND):
    url = "file://" + tempfile_name
    dist.init_process_group(init_method=url, backend=backend, rank=rank, world_size=world_size)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
50
51


52
53
54
55
56
57
58
59
60
61
62
63
64
def sync_object_ranks(something_to_sync: Any, reference_rank: int, device: torch.device) -> Any:
    if _torch_broadcast_object:
        package = [something_to_sync]
        dist.broadcast_object_list(package, src=reference_rank, group=dist.group.WORLD)
        package_sync = package[0]
    else:
        package_sync = optim.utils.broadcast_object(
            something_to_sync, src_rank=reference_rank, group=dist.group.WORLD, dist_device=device
        )

    return package_sync


65
66
67
68
class TestSingleRank(unittest.TestCase):
    """
    All the following tests do not check for inter-process communication
    """
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
69

70
71
    def setUp(self):
        dist_init(0, 1, tempfile.mkstemp()[1])
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
72

73
74
    def tearDown(self):
        torch.distributed.destroy_process_group()
75

76
77
78
    def test_create(self):
        params = [torch.rand(1)]
        o = optim.OSS(params, lr=0.01)
79

80
81
82
    def test_state_dict(self):
        x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
        o = optim.OSS([x], lr=0.1, momentum=0.9)
83
        x.backward()
84
85
86
        o.step()
        assert x == torch.tensor([0.9], device=DEVICE)
        assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
87
        o.zero_grad()
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
        o.consolidate_state_dict()  # Sync state dict in between replicas - even if there are none
        state_dict = o.state_dict()

        # Check that the state dict is pytorch-compliant key wise
        assert "param_groups" in state_dict.keys()
        assert "state" in state_dict.keys()

        # Check that the pulled state is what we expect, and that we have all the expected keys
        assert state_dict["param_groups"][0]["lr"] == 0.1
        assert state_dict["param_groups"][0]["momentum"] == 0.9
        assert not state_dict["param_groups"][0]["nesterov"]
        assert state_dict["param_groups"][0]["weight_decay"] == 0.0
        assert state_dict["param_groups"][0]["dampening"] == 0.0

        # Check that the pulled state and the .param_groups attribute are in sync
        for k in state_dict["param_groups"][0].keys():
            if k != "params":
                assert state_dict["param_groups"][0][k] == o.param_groups[0][k]

        # Check that it's correctly loaded
        o = optim.OSS([x], lr=0.01)
        o.load_state_dict(state_dict)
        # Check that state is correct and on proper device
        assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)

        # We should now be using a lr of 0.1, both within the optimizer
        # and as exposed by the .param_groups attribute
        assert o.param_groups[0]["lr"] == 0.1
        x.backward()
117
        o.step()
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
        assert x == torch.tensor([0.71], device=DEVICE)
        assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.9], device=DEVICE)

        # Check that the exposed param_groups are on the proper device
        assert o.param_groups[0]["params"][0].device == x.device

    def test_lr_scheduler(self):
        x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
        x2 = torch.tensor([1.0], device=DEVICE, requires_grad=True)
        o = optim.OSS([x], lr=0.01)
        o2 = torch.optim.SGD([x2], lr=0.01)
        s = torch.optim.lr_scheduler.StepLR(o, 1)
        s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
        for _ in range(5):
            x.backward()
            o.zero_grad()
            o.step()
            s.step()
            x2.backward()
            o2.zero_grad()
            o2.step()
            s2.step()
            assert x == x2

    def test_step_with_kwargs(self):
        class SGDWithStepKWArg(torch.optim.SGD):
            def step(self, closure=None, kwarg=[]):
                super().step()
                kwarg.append(5)

        kwarg = []
        x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
        o = optim.OSS([x], SGDWithStepKWArg, lr=0.1)
        x.backward()
        o.step(0, kwarg=kwarg)
        assert kwarg == [5]
        assert x == torch.tensor([0.9], device=DEVICE)

    def test_step_with_extra_inner_key(self):
        class SGDWithNewKey(torch.optim.SGD):
            # Dummy optimizer which adds a new key to the param groups
            def step(self, closure=None):
                super().step()
                self.param_groups[0]["new_key"] = 0.1

        x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
        o = optim.OSS([x], SGDWithNewKey, lr=0.1)
        x.backward()
        o.step()
        assert o.param_groups[0]["new_key"] == 0.1
        assert x == torch.tensor([0.9], device=DEVICE)
169

170
171
172
173
    def test_step_without_closure(self):
        class SGDWithoutClosure(torch.optim.SGD):
            def step(self):
                return super().step()
174

175
176
177
178
179
180
181
182
183
        x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
        o = optim.OSS([x], SGDWithoutClosure, lr=0.1)
        x.backward()
        o.step()
        assert x == torch.tensor([0.9], device=DEVICE)

    def test_implicit_local_state_dict(self):
        x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
        o = optim.OSS([x], lr=0.1)
184
185
        with pytest.raises(RuntimeError):
            _ = o.state_dict()
186
187


188
189
def run_test_add_param_group(rank, world_size, tempfile_name):
    dist_init(rank, world_size, tempfile_name)
190
191
192
193

    # Test with all parameters trainable to begin with
    def all_trainable():
        params = []
194
195
196
        sizes = [9, 7, 5, 3]
        sizes_world = sizes * world_size
        for size in sizes_world[:-1]:
197
198
199
200
201
202
203
204
205
206
207
208
            params.append(torch.rand(size, 1))

        # Make sure that the params are trainable, enforces size-based partitioning
        for p in params:
            p.requires_grad = True

        o = optim.OSS(params, lr=0.1)

        assert len(o.param_groups) == 1
        o.add_param_group({"params": [torch.rand(3, 1)]})

        assert len(o.param_groups) == 2
209
210
211

        # Verify that added group is added to the correct partition making all have the same number of elements
        assert sum([x.numel() for g in o.optim.param_groups for x in g["params"]]) == sum(sizes)
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
        assert len(o.optim.param_groups) == 2

    # Test a pathological config with a first big non-trainable param
    def some_trainable():
        params = []
        for size in [100, 3, 5, 2, 6, 4]:
            params.append(torch.rand(size, 1))

        # Make sure that the params are trainable, enforces size-based partitioning
        for p in params[1:]:
            p.requires_grad = True

        o = optim.OSS(params, lr=0.1)

        assert len(o.param_groups) == 1
        o.add_param_group({"params": [torch.rand(3, 1)]})

        assert len(o.param_groups) == 2
        assert len(o.optim.param_groups) == 2

    all_trainable()
    some_trainable()
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
234

235
236
    dist.destroy_process_group()

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
237
238

def test_add_param_group():
239
    world_size = 4
240
    if torch.cuda.is_available() and torch.cuda.device_count() < world_size:
241
242
        world_size = min(world_size, torch.cuda.device_count())

243
    mp.spawn(run_test_add_param_group, args=(world_size, tempfile.mkstemp()[1]), nprocs=world_size, join=True)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
244
245


246
247
def run_test_zero_grad(rank, world_size, tempfile_name):
    dist_init(rank, world_size, tempfile_name)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
248
249
250
251
252
253
254
255
256
257
258
    x = torch.rand(1)
    m = torch.nn.Linear(1, 1)
    o = optim.OSS(m.parameters(), lr=0.1)
    y = m(x)
    y.backward(x)
    assert m.weight.grad
    assert m.bias.grad
    o.zero_grad()
    assert not m.weight.grad
    assert not m.bias.grad

259
260
    dist.destroy_process_group()

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
261
262
263

def test_zero_grad():
    world_size = 2
264
265
266
    if torch.cuda.is_available() and torch.cuda.device_count() < world_size:
        world_size = min(world_size, torch.cuda.device_count())

267
268
    temp_file_name = tempfile.mkstemp()[1]
    mp.spawn(run_test_zero_grad, args=(world_size, temp_file_name), nprocs=world_size, join=True)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
269
270


271
272
def run_test_empty_shard(rank, world_size, tempfile_name, backend):
    dist_init(rank, world_size, tempfile_name, backend=backend)
273
    m = torch.nn.Linear(1, 1)
274
275
276
277
278
279
280
281
282
283
    x = torch.rand(20, 1)

    if torch.cuda.is_available():
        m = m.to(rank)
        x = x.to(rank)

    o = optim.OSS(m.parameters(), lr=0.1)
    y = m(x).sum()
    y.backward()
    o.step()
284
285
286
287

    dist.destroy_process_group()


288
289
@pytest.mark.parametrize("backend", ["gloo", "nccl"])
def test_empty_shard(backend):
290
    world_size = 4
291
292
293
294
295
    if torch.cuda.is_available() and torch.cuda.device_count() < world_size:
        world_size = min(world_size, torch.cuda.device_count())
    if world_size == 1 or (backend == "nccl" and not torch.cuda.is_available()):
        pytest.skip("Not enough GPUs to test with NCCL, or CUDA not present")
    mp.spawn(run_test_empty_shard, args=(world_size, tempfile.mkstemp()[1], backend), nprocs=world_size, join=True)
296
297


298
299
def run_test_step(rank, world_size, tempfile_name):
    dist_init(rank, world_size, tempfile_name, backend="gloo")
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
    x = torch.tensor([float(rank + 1)], device=rank)
    m = torch.nn.Linear(1, 1)
    m.weight.data = torch.tensor([[1.0]])
    m.bias.data = torch.tensor([2.0])
    m.to(rank)
    o = optim.OSS(m.parameters(), lr=0.1)
    y = m(x)
    y.backward(x)
    for p in m.parameters():
        dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
        p.grad.data /= world_size
    o.step()
    assert m.weight == torch.tensor([[0.75]], device=rank)
    assert m.bias == torch.tensor([1.85], device=rank)

315
316
    dist.destroy_process_group()

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
317

Benjamin Lefaudeux's avatar
Benjamin Lefaudeux committed
318
@skip_if_single_gpu
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
319
def test_step():
Benjamin Lefaudeux's avatar
Benjamin Lefaudeux committed
320
    world_size = 2
321
322
323
    temp_file_name = tempfile.mkstemp()[1]

    mp.spawn(run_test_step, args=(world_size, temp_file_name), nprocs=world_size, join=True)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
324
325


326
327
def run_test_step_with_closure(rank, world_size, tempfile_name, optimizer=None):
    dist_init(rank, world_size, tempfile_name)
328

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
329
330
331
332
333
334
335
336
337
338
339
340
    x_val = rank + 1
    weight = 1.0
    bias = 2.0
    error = 1.0
    target = torch.tensor([x_val * weight + bias + error], device=rank)
    loss_fn = torch.nn.L1Loss()

    x = torch.tensor([float(x_val)], device=rank)
    m = torch.nn.Linear(1, 1)
    m.weight.data = torch.tensor([[weight]])
    m.bias.data = torch.tensor([bias])
    m.to(rank)
341

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
342
    o = optim.OSS(m.parameters(), lr=0.1)
343

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
    y = m(x)
    y.backward(x)
    for p in m.parameters():
        dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
        p.grad.data /= world_size

    def closure():
        o.zero_grad()
        output = m(x)
        loss = loss_fn(output, target)
        loss.backward()
        return loss

    loss = o.step(closure=closure)

    assert loss == torch.tensor(error, device=rank)
    assert m.weight == torch.tensor([[1.1]], device=rank)
    assert m.bias == torch.tensor([2.1], device=rank)

363
364
    dist.destroy_process_group()

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
365
366
367

@skip_if_no_cuda
def test_step_with_closure():
368
    world_size = min(2, torch.cuda.device_count())
369
370
371
    temp_file_name = tempfile.mkstemp()[1]

    mp.spawn(run_test_step_with_closure, args=(world_size, temp_file_name), nprocs=world_size, join=True)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
372
373


374
375
def run_test_sharding(rank, world_size, tempfile_name):
    dist_init(rank, world_size, tempfile_name)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
376
    params = []
377
378
379
380
    sizes = [9, 7, 5, 3]
    sizes_world = sizes * world_size

    for size in sizes_world:
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
381
        params.append(torch.rand(size, 1))
382
383
384
385
386

    # Make sure that the params are trainable, enforces size-based partitioning
    for p in params:
        p.requires_grad = True

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
387
    o = optim.OSS(params, lr=0.1)
388
    assert sum([x.numel() for x in o.optim.param_groups[0]["params"]]) == sum(sizes)
Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
389

390
391
    dist.destroy_process_group()

Mandeep Singh Baines's avatar
Mandeep Singh Baines committed
392
393

def test_sharding():
394
395
396
    world_size = 4
    if torch.cuda.is_available():
        world_size = min(world_size, torch.cuda.device_count())
397

398
    _, temp_file_name = tempfile.mkstemp()
399
    mp.spawn(run_test_sharding, args=(world_size, temp_file_name), nprocs=world_size, join=True)
400
401


402
403
def run_test_collect_shards(rank, world_size, reference_rank, tempfile_name):
    dist_init(rank, world_size, tempfile_name)
404
405
406
    device = torch.device(rank) if torch.cuda.device_count() > 1 else DEVICE

    # Run a dummy step so that the optimizer state dict exists
407
    batch, input_width, hidden, target_width = 3, 3, 3, 5
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
    target = torch.rand((batch, target_width), device=device)
    inputs = torch.rand((batch, input_width), device=device)

    model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))
    model.to(device)

    loss_fn = torch.nn.L1Loss()
    loss_fn.to(device)

    # With SGD, Momentum is required to get a state to shard
    optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99)

    def closure():
        optimizer.zero_grad()
        output = model(inputs)
        loss = loss_fn(output, target)
        loss.backward()
        return loss

    _ = optimizer.step(closure=closure)

    # Update the optimizer state on the reference rank
    optimizer.consolidate_state_dict(recipient_rank=reference_rank)

    # Fetch the state on the reference rank
    # - check that it has the correct size
    # - load it again
    if rank == reference_rank:
        optimizer_state_dict = optimizer.state_dict()
437
        assert len(optimizer_state_dict["state"]) == len(list(model.parameters()))
438
439
440
    else:
        optimizer_state_dict = {}

441
442
    # distribute to the other ranks
    optimizer_state_dict = sync_object_ranks(optimizer_state_dict, reference_rank, device)
443
444

    # Load the optimizer state dict
445
    optimizer.load_state_dict(optimizer_state_dict)
446
447
448
449
450

    # Check that the states are not None, but {}
    for state in optimizer.state.values():
        for _, _ in state.items():
            pass
451
    dist.destroy_process_group()
452
453


454
455
# TODO(blefaudeux) Fix for torch v1.8.0
@pytest.mark.skipif(torch.__version__.split("+")[0].split(".") == ["1", "8", "0"], reason="disabled for torch 1.8.0")
456
457
def test_collect_shards():
    world_size = 3
458
459
    temp_file_name = tempfile.mkstemp()[1]

460
461
    if torch.cuda.is_available():
        world_size = min(world_size, torch.cuda.device_count())
462
463
464
    reference_rank = 0

    mp.spawn(
465
        run_test_collect_shards, args=(world_size, reference_rank, temp_file_name), nprocs=world_size, join=True,
466
    )
467
468


469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
def run_test_reproducibility(rank, world_size, reference_rank, tempfile_name):
    dist_init(rank, world_size, tempfile_name)
    device = torch.device(rank) if torch.cuda.device_count() > 1 else DEVICE

    # Run a dummy step so that the optimizer state dict exists
    batch, input_width, hidden, target_width = 3, 3, 3, 5
    target = torch.rand((batch, target_width), device=device)
    inputs = torch.rand((batch, input_width), device=device)

    model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))
    model.to(device)

    loss_fn = torch.nn.L1Loss()
    loss_fn.to(device)

    optimizer = optim.OSS(model.parameters(), optim=torch.optim.RMSprop, lr=0.1)

    def closure():
        optimizer.zero_grad()
        output = model(inputs)
        loss = loss_fn(output, target)
        loss.backward()
        return loss

    _ = optimizer.step(closure=closure)

    # Update the optimizer state on the reference rank
    optimizer.consolidate_state_dict(recipient_rank=reference_rank)

    # Fetch the state on the reference rank, broadcast to the other ones
    if rank == reference_rank:
        optimizer_state_dict = optimizer.state_dict()
    else:
        optimizer_state_dict = {}

    # Run two steps, log the loss
    _ = optimizer.step(closure=closure)
    reference_loss = optimizer.step(closure=closure)

    # Load the optimizer state dict, rewind the state two steps back
    optimizer.load_state_dict(optimizer_state_dict)

    # Run two new steps, log the loss again and check that we get the same
    _ = optimizer.step(closure=closure)
    test_loss = optimizer.step(closure=closure)

    assert torch.allclose(reference_loss, test_loss)

    dist.destroy_process_group()


520
521
# TODO(blefaudeux) Fix for torch v1.8.0
@pytest.mark.skipif(torch.__version__.split("+")[0].split(".") == ["1", "8", "0"], reason="disabled for torch 1.8.0")
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
def test_reproducibility():
    world_size = 2
    temp_file_name = tempfile.mkstemp()[1]

    if torch.cuda.is_available() and torch.cuda.device_count() < world_size:
        # Bail out if not enough devices
        return

    reference_rank = 0

    mp.spawn(
        run_test_collect_shards, args=(world_size, reference_rank, temp_file_name), nprocs=world_size, join=True,
    )


537
def run_test_multiple_groups(rank, world_size, tempfile_name):
538
    # Only work with the even ranks, to check that the global_rank indexing is properly used
539
    dist_init(rank=rank, world_size=world_size, tempfile_name=tempfile_name, backend="gloo")
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
    sub_group_ranks = [0, 2, 4]
    process_group = torch.distributed.new_group(ranks=sub_group_ranks, backend="gloo")

    # Make sure that all the ranks get different training data
    # So that the sync check in between their models is meaningful
    torch.manual_seed(rank)
    np.random.seed(rank)

    # Standard deep learning setup
    device = "cpu"
    epochs, batch, input_width, hidden, target_width = 5, 3, 20, 10, 5
    loss_fn = torch.nn.L1Loss().to(device)

    def check(optimizer):
        # Just run a couple of epochs, check that the model is properly updated
        for _ in range(epochs):
            target = torch.rand((batch, target_width), device=device)
            inputs = torch.rand((batch, input_width), device=device)

            def closure():
                optimizer.zero_grad()
                output = model(inputs)
                loss = loss_fn(output, target)
                loss /= world_size
                loss.backward()
                dist.all_reduce(loss, group=process_group)  # Not strictly needed for the test below

                return loss

            _ = optimizer.step(closure=closure)

            # Check that all the params are the same on all ranks
            for pg in optimizer.param_groups:
                for p in pg["params"]:
                    receptacle = [p.clone() for _ in sub_group_ranks] if rank == 0 else []
                    dist.gather(p, receptacle, dst=0, group=process_group)
                    if rank == 0:
                        for sync_p in receptacle[1:]:
578
579
580
581
582
                            assert torch.all(
                                torch.eq(receptacle[0], sync_p)
                            ), "Models differ in between ranks {} - {}".format(
                                torch.norm(receptacle[0]), torch.norm(sync_p)
                            )
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604

    if rank in sub_group_ranks:
        # Model fitting in the broadcast bucket
        model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width)).to(
            device
        )

        # With SGD, Momentum is required to get a state to shard
        optimizer = optim.OSS(
            model.parameters(), lr=0.1, momentum=0.99, group=process_group, broadcast_buffer_size=2 ** 20
        )
        check(optimizer)

        # Model not-fitting in the broadcast bucket
        model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width)).to(
            device
        )

        # With SGD, Momentum is required to get a state to shard
        optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99, group=process_group, broadcast_buffer_size=0)
        check(optimizer)

605
606
    dist.destroy_process_group(process_group)

607

608
@skip_if_py39_no_cuda
609
610
def test_multiple_groups():
    world_size = 6
611
    temp_file_name = tempfile.mkstemp()[1]
612
613

    mp.spawn(
614
        run_test_multiple_groups, args=(world_size, temp_file_name), nprocs=world_size, join=True,
615
    )
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660


def run_gradient_clipping(rank, world_size, tempfile_name):
    dist_init(rank, world_size, tempfile_name, backend="gloo")
    device = torch.device(rank)
    torch.manual_seed(rank)  # make sure that the different rank get different data

    # Run a dummy step so that the optimizer state dict exists
    batch, input_width, hidden, target_width = 3, 20, 10, 5
    target = torch.rand((batch, target_width), device=device)
    inputs = torch.rand((batch, input_width), device=device)
    NORMS = [1.0, 2.0, 1, 2, inf]
    CLIP_NORM = 0.3

    def check(norm):
        model_oss = torch.nn.Sequential(
            torch.nn.Linear(input_width, hidden),
            torch.nn.Linear(hidden, hidden),
            torch.nn.Linear(hidden, target_width),
        ).to(device)
        model = copy.deepcopy(model_oss)

        # For this test the gradients are (all) reduced in the same way in between the torch reference and fairscale.
        # Normally OSS would use ShardedDDP and only reduce to the proper rank, but this does not change the
        # gradient norm computation from OSS and adds a dependency.
        # to keep the comparison apples-to-apples DDP is used in both cases
        model_oss = DDP(module=model_oss, device_ids=[rank],)
        sharded_optimizer = optim.OSS(model_oss.parameters(), lr=0.1, momentum=0.99)

        model = DDP(model, device_ids=[rank],)

        loss_fn = torch.nn.L1Loss()
        loss_fn.to(device)

        model.zero_grad()
        model_oss.zero_grad()

        outputs = model(inputs)
        outputs_oss = model_oss(inputs)

        loss = loss_fn(outputs, target)
        loss.backward()

        loss_oss = loss_fn(outputs_oss, target)
        loss_oss.backward()
661
        torch.testing.assert_allclose(loss_oss, loss)
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676

        # Check the equivalence with the non-sharded optim
        oss_total_norm = sharded_optimizer.clip_grad_norm(CLIP_NORM, norm_type=norm)
        total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP_NORM, norm_type=norm)
        assert torch.allclose(oss_total_norm, total_norm), "torch and fairscale should return the same grad norm"

        # Check that the params have indeed been clipped
        for params in sharded_optimizer.per_device_params.values():
            for param in filter(lambda x: x.grad is not None, params[rank]):
                assert torch.norm(param.grad, p=norm) < CLIP_NORM, f"param grad norm above clip : {param.grad}"

    for norm in NORMS:
        print(f"Checking norm {norm}")
        check(norm)

Benjamin Lefaudeux's avatar
Benjamin Lefaudeux committed
677
678
679
        # Check twice, catch an hypothetic iterator dumb mistake
        check(norm)

680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
    dist.destroy_process_group()


@skip_if_no_cuda
def test_gradient_clipping():
    world_size = 3
    temp_file_name = tempfile.mkstemp()[1]

    if torch.cuda.is_available():
        world_size = min(world_size, torch.cuda.device_count())
    reference_rank = 0

    mp.spawn(
        run_gradient_clipping, args=(world_size, temp_file_name), nprocs=world_size, join=True,
    )
695
696
697
698


def run_state_dict_distributed(rank, world_size, tempfile_name):
    dist_init(rank, world_size, tempfile_name, backend="gloo")
699

700
701
702
    device = torch.device(rank)
    torch.manual_seed(rank)  # make sure that the different rank get different data

703
704
    # Setup two problems in parallel, we'll make sure that the second track (with save/load) follows the first one(untouched)
    # We split the model in two to test the multiple param groups support
705
706
707
708
    batch, input_width, hidden, target_width = 3, 20, 10, 5
    target = torch.rand((batch, target_width), device=device)
    inputs = torch.rand((batch, input_width), device=device)

709
    model_oss1 = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, hidden)).to(device)
710
711
    head_oss1 = torch.nn.Linear(hidden, target_width).to(device)

712
    model_oss2 = copy.deepcopy(model_oss1)
713
    head_oss2 = copy.deepcopy(head_oss1)
714
715
716
717
718
719
720

    # For this test the gradients are (all) reduced in the same way in between the torch reference and fairscale.
    # Normally OSS would use ShardedDDP and only reduce to the proper rank, but this does not change the
    # gradient norm computation from OSS and adds a dependency.
    # to keep the comparison apples-to-apples DDP is used in both cases
    model_oss1 = DDP(module=model_oss1, device_ids=[rank],)
    sharded_optimizer1 = optim.OSS(model_oss1.parameters(), lr=0.1, momentum=0.99)
721
722
    sharded_optimizer1.add_param_group({"params": head_oss1.parameters()})

723
724
    model_oss2 = DDP(module=model_oss2, device_ids=[rank],)
    sharded_optimizer2 = optim.OSS(model_oss2.parameters(), lr=0.1, momentum=0.99)
725
    sharded_optimizer2.add_param_group({"params": head_oss2.parameters()})
726

727
    loss_fn = torch.nn.L1Loss().to(device)
728

729
    def run_grad_step(model, head, optimizer):
730
        model.zero_grad()
731
        outputs = head(model(inputs))
732

733
734
735
736
    # pull the current state, broadcast it to all ranks
    sharded_optimizer2.consolidate_state_dict(recipient_rank=RECIPIENT_RANK)  # all ranks
    state_dict2 = sharded_optimizer2.state_dict() if rank == RECIPIENT_RANK else {}
    state_dict2 = sync_object_ranks(state_dict2, RECIPIENT_RANK, device)
737

738
739
    # re-create a new optimizer from scratch with absurd values, load the previous state
    sharded_optimizer2 = optim.OSS(model_oss2.parameters(), lr=1e6, momentum=0.0001)
740
    sharded_optimizer2.add_param_group({"params": head_oss2.parameters()})
741
    sharded_optimizer2.load_state_dict(state_dict2)
742
743
744
    check_same_model_params(
        model_oss1, model_oss2, "parameters of the two identical models have diverged (before any steps)"
    )
745
746

    # now take a step and check that parameters are equal
747
748
    run_grad_step(model_oss1, head_oss1, sharded_optimizer1)
    run_grad_step(model_oss2, head_oss2, sharded_optimizer2)
749
750
751
    check_same_model_params(
        model_oss1, model_oss2, "parameters of the two identical models have diverged (after stepping)"
    )
752

753
754
755
756
    # save the state dict for one model only, then distribute to the other ranks
    sharded_optimizer2.consolidate_state_dict(recipient_rank=RECIPIENT_RANK)  # all ranks
    state_dict2 = sharded_optimizer2.state_dict() if rank == RECIPIENT_RANK else {}
    state_dict2 = sync_object_ranks(state_dict2, RECIPIENT_RANK, device)
757
758
759
760
761
762
763
764

    # Check that the pulled state and the .param_groups attribute are in sync
    for replica in range(len(state_dict2["param_groups"])):
        for k in state_dict2["param_groups"][replica].keys():
            if k != "params":
                assert state_dict2["param_groups"][replica][k] == sharded_optimizer2.param_groups[0][k]

    # take a step
765
766
    run_grad_step(model_oss1, head_oss1, sharded_optimizer1)
    run_grad_step(model_oss2, head_oss2, sharded_optimizer2)
767
768
769
    check_same_model_params(
        model_oss1, model_oss2, "parameters of the two identical models have diverged (after consolidating)"
    )
770

771
772
773
774
    # save again for one rank, then distribute to the others
    sharded_optimizer2.consolidate_state_dict(recipient_rank=RECIPIENT_RANK)  # all ranks
    state_dict2 = sharded_optimizer2.state_dict() if rank == RECIPIENT_RANK else {}
    state_dict2 = sync_object_ranks(state_dict2, RECIPIENT_RANK, device)
775
776
777

    # reload the state_dict
    sharded_optimizer2 = optim.OSS(model_oss2.parameters(), lr=0.1, momentum=0.99)
778
    sharded_optimizer2.add_param_group({"params": head_oss2.parameters()})
779
780
781
    sharded_optimizer2.load_state_dict(state_dict2)

    # take a step
782
783
    run_grad_step(model_oss1, head_oss1, sharded_optimizer1)
    run_grad_step(model_oss2, head_oss2, sharded_optimizer2)
784
785
786
    check_same_model_params(
        model_oss1, model_oss2, "parameters of the two identical models have diverged (after reloading)"
    )
787
788
789
790
791
792

    dist.destroy_process_group()


@skip_if_no_cuda
def test_state_dict_distributed():
793
    world_size = 2
794
795
796
    temp_file_name = tempfile.mkstemp()[1]

    if torch.cuda.is_available():
797
        world_size = max(world_size, torch.cuda.device_count())
798
799
800
801

    mp.spawn(
        run_state_dict_distributed, args=(world_size, temp_file_name), nprocs=world_size, join=True,
    )
802
803


804
def run_ddp_parity(rank, world_size, backend, temp_file_name, change_train_graph):
805
806
807
808
809
810
811
    url = "file://" + temp_file_name
    dist.init_process_group(init_method=url, backend=backend, rank=rank, world_size=world_size)

    device = torch.device("cuda")
    torch.cuda.set_device(rank)
    torch.manual_seed(rank)
    np.random.seed(rank)
812
813
814
815
    hidden = 5
    in_channels = 3
    out_channels = 3
    batch = 64
816

817
    def check_optimizer_equivalence(optimizer: Type[torch.optim.Optimizer], change_train_graph: bool = False):
818
        # Any model works. Add one different buffer per rank
819
820
821
        trunk = torch.nn.Sequential(
            torch.nn.Linear(in_channels, hidden), torch.nn.Linear(hidden, hidden), torch.nn.Linear(hidden, hidden)
        )
822
823
824
825
826
827
        trunk.register_buffer("test_buffer", torch.ones((1)) * rank)
        trunk.to(device)

        head = torch.nn.Linear(hidden, out_channels).to(device)

        # Define a model to be trained by OSS
828
        oss_module = torch.nn.Sequential(trunk, head)
829
830

        # Make sure that the param groups are interleaved, to catch an ordering bug in the state dict
831
        oss_trainable_params = [
832
833
            {"params": list(trunk.parameters())[:-1] + list(head.parameters()), "lr": 1e-5},
            {"params": list(trunk.parameters())[-1], "lr": 1e-4},
834
835
        ]

836
837
        optimizer_settings: Dict[Any, Any] = {}
        if isinstance(optimizer, torch.optim.SGD):
838
839
840
841
842
843
844
845
846
847
            optimizer_settings["momentum"] = 0.9

        sharded_optimizer = optim.OSS(
            params=oss_trainable_params,
            optim=optimizer,
            group=None,
            broadcast_buffer_size=2 ** 10,
            **optimizer_settings,
        )

848
        oss_ddp_model = DDP(module=oss_module, device_ids=[rank], broadcast_buffers=True, find_unused_parameters=True)
849

850
851
852
853
        # Define a model to be trained by normal pytorch + DDP
        ddp_trunk = copy.deepcopy(trunk)
        ddp_head = copy.deepcopy(head)
        ddp_module = torch.nn.Sequential(ddp_trunk, ddp_head)
854

855
        ddp_trainable_params = [
856
857
            {"params": list(ddp_trunk.parameters())[:-1] + list(ddp_head.parameters()), "lr": 1e-5},
            {"params": list(ddp_trunk.parameters())[-1], "lr": 1e-4},
858
859
        ]
        ddp_optimizer = optimizer(ddp_trainable_params, **optimizer_settings)  # type: ignore
860
        ddp_model = DDP(module=ddp_module, device_ids=[rank], broadcast_buffers=True, find_unused_parameters=True)
861

862
863
        def check_step():
            input_tensor = torch.rand((batch, in_channels)).to(device)
864
865
866
867
868
869
870
871
872

            def closure_ddp(input_tensor=input_tensor):
                ddp_optimizer.zero_grad()
                ddp_loss = ddp_model(input_tensor).abs().sum()
                ddp_loss.backward()
                return ddp_loss

            def closure_sharded(input_tensor=input_tensor):
                sharded_optimizer.zero_grad()
873
                sharded_loss = oss_ddp_model(input_tensor).abs().sum()
874
875
876
877
878
879
880
                sharded_loss.backward()
                return sharded_loss

            loss_ddp = cast(torch.Tensor, ddp_optimizer.step(closure=closure_ddp))
            loss_sharded_optim = cast(torch.Tensor, sharded_optimizer.step(closure=closure_sharded))

            assert torch.allclose(
881
882
                loss_ddp, loss_sharded_optim, rtol=1e-3
            ), f"Losses differ in between Pytorch optim and OSS\n {loss_ddp.item()} - {loss_sharded_optim.item()} - world size {world_size}"
883

884
885
            check_same_model_params(oss_ddp_model, ddp_model)

886
        # The model should be synchronized in between the ranks at construction time, check that
887
        check_same_model_params(oss_ddp_model, ddp_model)
888
889
890
891

        # The models should stay the same in between ddp and sharded optimizer
        for i in range(5):
            check_step()
892
893
894
895
896
897
898

            # Check that altering the trainable parameters does not cause DDP and OSS to diverge
            if change_train_graph:
                # Flip the first parameter from trainable to non-trainable and vice-versa
                next(ddp_module.parameters()).requires_grad = not next(ddp_module.parameters()).requires_grad
                next(oss_module.parameters()).requires_grad = not next(oss_module.parameters()).requires_grad
                # sharded_optimizer.refresh_trainable()
899

900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
        # Check that the checkpoints are compatible (post pytorch 1.5)
        if torch_version()[1] > 5:
            # - get states
            ddp_state_dict = ddp_optimizer.state_dict()
            sharded_optimizer.consolidate_state_dict(recipient_rank=RECIPIENT_RANK)
            sharded_optim_state_dict = sharded_optimizer.state_dict() if rank == RECIPIENT_RANK else {}
            sharded_optim_state_dict = sync_object_ranks(sharded_optim_state_dict, RECIPIENT_RANK, device)

            # - cross load the states
            # run one step and check that the models are still the same
            ddp_state_dict_ref = copy.deepcopy(ddp_state_dict)  # OSS will remove some states
            ddp_optimizer.load_state_dict(sharded_optim_state_dict)  # mixup on purpose !
            sharded_optimizer.load_state_dict(ddp_state_dict)
            check_step()

            #  - self load, rewind, check no problem
            # run one step and check that the models are still the same
            ddp_optimizer.load_state_dict(ddp_state_dict_ref)
            sharded_optimizer.load_state_dict(sharded_optim_state_dict)
            check_step()
920

921
    for opt in [torch.optim.Adam, torch.optim.SGD]:
922
        check_optimizer_equivalence(opt, change_train_graph=change_train_graph)
923
924
925
926
927
928

    dist.destroy_process_group()


@skip_if_no_cuda
@skip_if_single_gpu
929
930
931
@pytest.mark.parametrize("change_train_graph", [True, False])
@pytest.mark.parametrize("backend", [dist.Backend.NCCL, dist.Backend.GLOO])
def test_ddp_parity(change_train_graph: bool, backend: dist.Backend):
932
933
    temp_file_name = tempfile.mkstemp()[1]
    world_size = torch.cuda.device_count()
934
935
936
    mp.spawn(
        run_ddp_parity, args=(world_size, backend, temp_file_name, change_train_graph), nprocs=world_size, join=True
    )