"sgl-kernel/vscode:/vscode.git/clone" did not exist on "b5dcfd4154262307cbaf595522acf445bd4c482c"
test_pipe.py 24.6 KB
Newer Older
Tom Birch's avatar
Tom Birch committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.

# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from collections import OrderedDict
from copy import deepcopy
import os
import time

import pytest
import torch
from torch import nn

29
30
31
32
33
from fairscale.nn.model_parallel.initialize import (
    destroy_model_parallel,
    get_pipeline_parallel_group,
    initialize_model_parallel,
)
34
from fairscale.nn.pipe import AsyncPipe, LazyModule, MultiProcessPipe
35
from fairscale.utils.testing import get_worker_map, torch_spawn, torch_version
Tom Birch's avatar
Tom Birch committed
36
37
38


@torch_spawn([2])
39
40
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def parameters(pipe_class):
Tom Birch's avatar
Tom Birch committed
41
    model = nn.Sequential(nn.Linear(1, 1))
42
    pipe = pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=1)
Tom Birch's avatar
Tom Birch committed
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
    if torch.distributed.get_rank() == 0:
        assert list(pipe.parameters()) != []
    else:
        assert list(pipe.parameters()) == []


@torch_spawn([2])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def infiniband():
    if torch.distributed.get_rank() == 0:
        t = torch.Tensor(range(100)).cuda()
        torch.distributed.broadcast(t, 0)
    else:
        t = torch.empty(100).cuda()
        torch.distributed.broadcast(t, 0)

    assert torch.equal(t, torch.Tensor(range(100)).cuda())
    print(f"t on {torch.distributed.get_rank()} is {t}")


@torch_spawn([2])
@pytest.mark.skipif("OMPI_COMM_WORLD_RANK" not in os.environ, reason="mpi required")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def infiniband2():
    if torch.distributed.get_rank() == 0:
        t = torch.Tensor(range(100)).cuda()
69
        torch.distributed.send(t, 1, group=get_pipeline_parallel_group())
Tom Birch's avatar
Tom Birch committed
70
71
    else:
        t = torch.empty(100).cuda()
72
        torch.distributed.recv(t, 0, group=get_pipeline_parallel_group())
Tom Birch's avatar
Tom Birch committed
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109

    assert torch.equal(t, torch.Tensor(range(100)).cuda())
    print(f"t on {torch.distributed.get_rank()} is {t}")


@torch_spawn([2])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def infiniband3():
    t = torch.Tensor(range(100)).cuda()
    torch.distributed.all_reduce(t, op=torch.distributed.ReduceOp.SUM)
    assert torch.equal(t, torch.Tensor(range(0, 200, 2)).cuda())


@torch_spawn([2])
@pytest.mark.skipif("OMPI_COMM_WORLD_RANK" not in os.environ, reason="mpi required")
def mpi():
    seed = 1234
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)

    torch.distributed.barrier()
    tensor_size = (1024, 1024, 10)
    torch.cuda.set_device(torch.distributed.get_rank())  # need to pin device or ucx gets unhappy

    if torch.distributed.get_rank() == 0:
        # t = torch.Tensor(range(10)).cuda(0)
        t = torch.rand(*tensor_size).cuda(0)
        torch.distributed.send(t, 1, tag=1234)
    else:
        t = torch.empty(*tensor_size).cuda(1)
        torch.distributed.recv(t, 0, tag=1234)
        t2 = torch.rand(*tensor_size).cuda(1)

        assert torch.equal(t, t2)


@torch_spawn([1])
110
111
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def public_attrs(pipe_class):
Tom Birch's avatar
Tom Birch committed
112
113
114
115
116
117
118
119
120
    class MyString:
        def __init__(self, value):
            self.value = value

        def __str__(self):
            return self.value

    model = nn.Sequential(nn.Linear(1, 1))

121
    pipe = pipe_class(model, balance=(1,), worker_map=get_worker_map(), chunks=42.000, checkpoint=MyString("always"),)
Tom Birch's avatar
Tom Birch committed
122
123
124
125
126
127
128
129
130
131

    assert pipe.balance == [1]
    assert pipe.chunks == 42
    assert isinstance(pipe.chunks, int)
    assert pipe.checkpoint == "always"
    assert isinstance(pipe.checkpoint, str)


@torch_spawn([2])
@pytest.mark.parametrize("balance", [[2], [1, 1]])
132
133
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def sequential_like(balance, pipe_class):
Tom Birch's avatar
Tom Birch committed
134
135
136
137
    a = nn.Linear(1, 1)
    b = nn.Linear(1, 1)

    model = nn.Sequential(a, b)
138
    model = pipe_class(model, balance, worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170

    if balance == [2]:
        if torch.distributed.get_rank() == 0:
            assert len(model) == 2
            assert list(model) == [a, b]

            assert model[0] is a
            assert model[1] is b
            with pytest.raises(IndexError):
                _ = model[2]

            assert model[-1] is b
            assert model[-2] is a
        else:
            assert len(model) == 0
            assert list(model) == []
    else:
        assert len(model) == 1
        if torch.distributed.get_rank() == 0:
            assert list(model) == [a]
            assert model[0] is a
            assert model[-1] is a
        else:
            assert list(model) == [b]
            assert model[0] is b
            assert model[-1] is b

        with pytest.raises(IndexError):
            _ = model[1]


@torch_spawn([1])
171
172
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def balance_wrong_length(pipe_class):
Tom Birch's avatar
Tom Birch committed
173
174
175
176
177
178
    a = nn.Linear(1, 1)
    b = nn.Linear(1, 1)

    model = nn.Sequential(a, b)

    with pytest.raises(ValueError):
179
        pipe_class(model, balance=[1], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
180
181

    with pytest.raises(ValueError):
182
        pipe_class(model, balance=[3], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
183
184
185


@torch_spawn([2])
186
187
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def balance_less_than_1(pipe_class):
Tom Birch's avatar
Tom Birch committed
188
189
190
191
192
193
    a = nn.Linear(1, 1)
    b = nn.Linear(1, 1)

    model = nn.Sequential(a, b)

    with pytest.raises(ValueError):
194
        pipe_class(model, balance=[0, 2], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
195
196

    with pytest.raises(ValueError):
197
        pipe_class(model, balance=[-1, 3], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
198
199
200


@torch_spawn([1])
201
202
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def chunks_less_than_1(pipe_class):
Tom Birch's avatar
Tom Birch committed
203
204
205
    model = nn.Sequential(nn.Linear(1, 1))

    with pytest.raises(ValueError):
206
        pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=0)
Tom Birch's avatar
Tom Birch committed
207
208

    with pytest.raises(ValueError):
209
        pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=-1)
Tom Birch's avatar
Tom Birch committed
210
211
212


@torch_spawn([1])
213
214
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def too_few_devices(pipe_class):
Tom Birch's avatar
Tom Birch committed
215
216
217
218
    model = nn.Sequential(nn.Linear(1, 1), nn.Linear(1, 1), nn.Linear(1, 1), nn.Linear(1, 1))

    with pytest.raises(IndexError):
        # len(balance) > len(group.size())
219
        model = pipe_class(model, balance=[1, 1, 1, 1], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
220
221
222


@torch_spawn([1])
223
224
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def batch_size_indivisible(pipe_class):
Tom Birch's avatar
Tom Birch committed
225
    model = nn.Sequential(nn.Linear(1, 1))
226
    model = pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=4)
Tom Birch's avatar
Tom Birch committed
227
228
229
230
231
232
233
234
235

    with pytest.warns(None) as record:
        model(torch.rand(7, 1))

    # Indivisible batch size is legal.
    assert not record


@torch_spawn([1])
236
237
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def batch_size_small(pipe_class):
Tom Birch's avatar
Tom Birch committed
238
    model = nn.Sequential(nn.Linear(1, 1))
239
    model = pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=4)
Tom Birch's avatar
Tom Birch committed
240
241
242
243
244
245
246
247
248

    with pytest.warns(None) as record:
        model(torch.rand(2, 1))

    # Batch size smaller than chunks is legal.
    assert not record


@torch_spawn([1])
249
250
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def checkpoint_mode(pipe_class):
Tom Birch's avatar
Tom Birch committed
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
    def count_grad_fn(grad_fn, name, visited=set()):
        if grad_fn in visited:
            return 0
        visited.add(grad_fn)

        if grad_fn is None:
            return 0
        if grad_fn.__class__.__name__ == name:
            return 1

        counter = 0
        for next_grad_fn, _ in grad_fn.next_functions:
            counter += count_grad_fn(next_grad_fn, name, visited=visited)
        return counter

    model = nn.Sequential(nn.Linear(1, 1))
    input = torch.rand(2, 1)

269
270
    always = pipe_class(
        model, balance=[1], worker_map=get_worker_map(), chunks=2, checkpoint="always", pipelined_backward=False,
Tom Birch's avatar
Tom Birch committed
271
    )
272
273
    except_last = pipe_class(
        model, balance=[1], worker_map=get_worker_map(), chunks=2, checkpoint="except_last", pipelined_backward=False,
Tom Birch's avatar
Tom Birch committed
274
    )
275
276
    never = pipe_class(
        model, balance=[1], worker_map=get_worker_map(), chunks=2, checkpoint="never", pipelined_backward=False,
Tom Birch's avatar
Tom Birch committed
277
278
279
280
281
282
283
284
285
286
287
288
    )

    always_output = always(input)
    except_last_output = except_last(input)
    never_output = never(input)

    assert count_grad_fn(always_output.grad_fn, "CheckpointBackward") == 2
    assert count_grad_fn(except_last_output.grad_fn, "CheckpointBackward") == 1
    assert count_grad_fn(never_output.grad_fn, "CheckpointBackward") == 0


@torch_spawn([1])
289
290
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def checkpoint_mode_invalid(pipe_class):
Tom Birch's avatar
Tom Birch committed
291
292
293
    model = nn.Sequential(nn.Linear(1, 1))

    with pytest.raises(ValueError, match="checkpoint is not one of 'always', 'except_last', or 'never'"):
294
295
        pipe_class(
            model, balance=[1], worker_map=get_worker_map(), chunks=2, checkpoint="INVALID_CHECKPOINT",
Tom Birch's avatar
Tom Birch committed
296
297
298
299
        )


@torch_spawn([1])
300
301
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def checkpoint_mode_when_chunks_1(pipe_class):
Tom Birch's avatar
Tom Birch committed
302
303
304
    model = nn.Sequential(nn.Linear(1, 1))

    # All checkpoint modes are fine.
305
306
    pipe_class(
        model, balance=[1], worker_map=get_worker_map(), chunks=1, checkpoint="except_last",
307
    )
308
309
    pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=1, checkpoint="always")
    pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=1, checkpoint="never")
Tom Birch's avatar
Tom Birch committed
310
311
312


@torch_spawn([1])
313
314
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def checkpoint_eval(pipe_class):
Tom Birch's avatar
Tom Birch committed
315
    model = nn.Sequential(nn.Linear(1, 1))
316
    model = pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=2, pipelined_backward=False,)
Tom Birch's avatar
Tom Birch committed
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
    input = torch.rand(2, 1)

    def find_grad_fn(grad_fn, name):
        if grad_fn is None:
            return False
        if grad_fn.__class__.__name__ == name:
            return True
        for next_grad_fn, _ in grad_fn.next_functions:
            if find_grad_fn(next_grad_fn, name):
                return True
        return False

    model.train()
    train_output = model(input)
    assert find_grad_fn(train_output.grad_fn, "CheckpointBackward")
    assert find_grad_fn(train_output.grad_fn, "RecomputeBackward")

    model.eval()
    eval_output = model(input)
    assert not find_grad_fn(eval_output.grad_fn, "CheckpointBackward")
    assert not find_grad_fn(eval_output.grad_fn, "RecomputeBackward")


@torch_spawn([2])
341
@pytest.mark.xfail(torch_version() < (1, 6, 0), reason="Doesn't work on torch < 1.6.0", strict=True)
342
343
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def checkpoint_non_float_input(pipe_class):
Tom Birch's avatar
Tom Birch committed
344
345
346
347
348
349
350
351
352
    class ForkNonFloat(nn.Module):
        def forward(self, input):
            return (input * 2, torch.tensor([False]))

    class JoinNonFloat(nn.Module):
        def forward(self, input):
            return input[0] * 2

    model = nn.Sequential(ForkNonFloat(), JoinNonFloat())
353
354
    model = pipe_class(
        model, balance=[1, 1], worker_map=get_worker_map(), chunks=1, checkpoint="always", pipelined_backward=False,
Tom Birch's avatar
Tom Birch committed
355
356
357
358
359
360
361
    )

    input = torch.rand(1, requires_grad=True)
    output = model(input)
    if model.group.rank() == 1:
        # with torch.autograd.detect_anomaly():
        output.backward()
362
    elif pipe_class == MultiProcessPipe:
Tom Birch's avatar
Tom Birch committed
363
364
        model.back_helper(output)

365
366
    torch.distributed.barrier()

Tom Birch's avatar
Tom Birch committed
367
368

@torch_spawn([1])
369
370
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def no_grad(pipe_class):
Tom Birch's avatar
Tom Birch committed
371
    model = nn.Sequential(nn.Linear(1, 1))
372
    model = pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=2)
Tom Birch's avatar
Tom Birch committed
373
374
375
376
377
378
379
380
381
382
383
    input = torch.rand(2, 1)

    latent = None

    def hook(module, input, output):
        _ = module
        _ = input

        nonlocal latent
        latent = output

384
    partition = model.partitions[0]
385
    partition.module.register_forward_hook(hook)
Tom Birch's avatar
Tom Birch committed
386
387
388
389
390
391
392
393

    with torch.no_grad():
        model(input)

    assert latent.grad_fn is None


@torch_spawn([1])
394
395
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def exception(pipe_class):
Tom Birch's avatar
Tom Birch committed
396
397
398
399
400
401
402
403
    class ExpectedException(Exception):
        pass

    class Raise(nn.Module):
        def forward(self, *_):
            raise ExpectedException()

    model = nn.Sequential(Raise())
404
    model = pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=1)
Tom Birch's avatar
Tom Birch committed
405
406
407
408
409
410
411

    with pytest.raises(ExpectedException):
        model(torch.rand(1))


# FIXME(tom) should probably signal to all hosts in group to stop
@torch_spawn([4])
412
@pytest.mark.skipif(torch.cuda.is_available() and torch.cuda.device_count() < 4, reason="Not enough GPUs")
Tom Birch's avatar
Tom Birch committed
413
@pytest.mark.xfail(strict=True)
414
415
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def exception_early_stop_asap(pipe_class):
Tom Birch's avatar
Tom Birch committed
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
    """Even the first partitions have finished to process, the partition before
    the failed partition hould be killed as soon as possible.
    """

    class ExpectedExceptio(Exception):
        pass

    class Pass(nn.Module):
        def forward(self, x):
            return x

    counter = 0

    class Counter(nn.Module):
        def forward(self, x):
            time.sleep(0.1)

            nonlocal counter
            counter += 1

            return x

    class Raise(nn.Module):
        def forward(self, x):
            raise ExpectedException()

    model = nn.Sequential(Pass(), Pass(), Counter(), Raise())
443
    model = pipe_class(model, [1, 1, 1, 1], worker_map=get_worker_map(), chunks=3)
Tom Birch's avatar
Tom Birch committed
444
445
446
447
448
449
450
451
452

    with pytest.raises(ExpectedException):
        model(torch.rand(3))

    # If the early stop doesn't work, it would be 3 instead.
    assert counter == 2


@torch_spawn([1])
453
454
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def input_pair(pipe_class):
Tom Birch's avatar
Tom Birch committed
455
456
457
458
459
460
461
462
463
464
465
    class Two(nn.Module):
        def __init__(self):
            super().__init__()
            self.fc_a = nn.Linear(1, 1)
            self.fc_b = nn.Linear(1, 1)

        def forward(self, a_and_b):
            a, b = a_and_b
            return (self.fc_a(a), self.fc_b(b))

    model = nn.Sequential(Two())
466
    model = pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=2, pipelined_backward=False,)
Tom Birch's avatar
Tom Birch committed
467
468
469
470
471
472
473
474
475
476
477
478
479

    a = torch.rand(10, 1, requires_grad=True)
    b = torch.rand(10, 1, requires_grad=True)

    a_out, b_out = model((a, b))
    loss = (a_out + b_out).mean()
    loss.backward()

    assert a.grad is not None
    assert b.grad is not None


@torch_spawn([1])
480
481
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def input_singleton(pipe_class):
Tom Birch's avatar
Tom Birch committed
482
483
484
485
486
487
488
489
490
491
    class One(nn.Module):
        def __init__(self):
            super().__init__()
            self.fc = nn.Linear(1, 1)

        def forward(self, only_a):
            (a,) = only_a
            return (self.fc(a),)

    model = nn.Sequential(One())
492
    model = pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=2, pipelined_backward=False,)
Tom Birch's avatar
Tom Birch committed
493
494
495
496
497
498
499
500
501
502
503
504

    a = torch.rand(10, 1, requires_grad=True)

    (a_out,) = model((a,))
    loss = a_out.mean()
    loss.backward()

    assert all(p.grad is not None for p in model.parameters())
    assert a.grad is not None


@torch_spawn([1])
505
506
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def input_varargs(pipe_class):
Tom Birch's avatar
Tom Birch committed
507
    model = nn.Sequential(nn.Linear(1, 1))
508
    model = pipe_class(model, balance=[1], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
509
510
511
512
513
514
515
516
517
518

    a = torch.rand(1)
    b = torch.rand(1)

    # TypeError: forward() takes 2 positional arguments but 3 were given
    with pytest.raises(TypeError):
        model(a, b)


@torch_spawn([1])
519
520
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def non_tensor(pipe_class):
Tom Birch's avatar
Tom Birch committed
521
522
523
524
525
    class NonTensor(nn.Module):
        def forward(self, _):
            return "hello"

    model = nn.Sequential(NonTensor())
526
    model = pipe_class(model, balance=[1], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
527
528
529
530
531
532
533
534
535
536
537
538
    x = torch.rand(1)

    # TypeError: expected Tensor as element 0 in argument 0, but got str
    with pytest.raises(TypeError):
        model(x)

    # TypeError: expected Tensor to scatter, but got str
    with pytest.raises(TypeError):
        model("hello")


@torch_spawn([1])
539
540
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def non_tensor_tuple(pipe_class):
Tom Birch's avatar
Tom Birch committed
541
542
543
544
545
    class NonTensorTuple(nn.Module):
        def forward(self, x):
            return (x, "hello")

    model = nn.Sequential(NonTensorTuple())
546
    model = pipe_class(model, balance=[1], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
547
548
549
550
551
552
553
554
555
556
557
558
559
560
    x = torch.rand(1)

    # TypeError: CheckpointBackward.forward: expected Variable (got str) for return value 1
    with pytest.raises(TypeError):
        model(x)

    # TypeError: expected Tensor to scatter, but got str
    with pytest.raises(TypeError):
        model((x, "hello"))


@torch_spawn([1])
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
@pytest.mark.parametrize("lazy", [True, False])
561
562
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def deferred_batch_norm(checkpoint, lazy, pipe_class):
Tom Birch's avatar
Tom Birch committed
563
564
565
566
    bn = nn.BatchNorm2d(3)
    pipe_bn = deepcopy(bn)
    pipe_fn = lambda: pipe_bn  # noqa: E731
    if lazy:
567
        model = [LazyModule(pipe_fn)]
Tom Birch's avatar
Tom Birch committed
568
569
    else:
        model = nn.Sequential(pipe_bn)
570
571
    pipe = pipe_class(
        model, balance=[1], worker_map=get_worker_map(), chunks=2, checkpoint=checkpoint, deferred_batch_norm=True,
Tom Birch's avatar
Tom Birch committed
572
573
574
575
576
577
578
579
580
581
582
583
584
    )

    x = torch.rand(4, 3, 10, 10)
    pipe(x).mean().backward()
    bn(x).mean().backward()

    assert torch.allclose(pipe[0].running_mean, bn.running_mean, atol=1e-4)
    assert torch.allclose(pipe[0].running_var, bn.running_var, atol=1e-4)


@torch_spawn([1])
@pytest.mark.parametrize("checkpoint", ["never", "always"])
@pytest.mark.parametrize("lazy", [True, False])
585
586
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def deferred_batch_norm_params(checkpoint, lazy, pipe_class):
Tom Birch's avatar
Tom Birch committed
587
588
589
590
    bn = nn.BatchNorm2d(3)
    pipe_bn = deepcopy(bn)
    pipe_fn = lambda: pipe_bn  # noqa: E731
    if lazy:
591
        model = [LazyModule(pipe_fn)]
Tom Birch's avatar
Tom Birch committed
592
593
    else:
        model = nn.Sequential(pipe_bn)
594
595
    pipe = pipe_class(
        model, balance=[1], worker_map=get_worker_map(), chunks=1, checkpoint=checkpoint, deferred_batch_norm=True,
Tom Birch's avatar
Tom Birch committed
596
597
598
599
600
601
602
603
604
605
606
607
608
    )

    x = torch.rand(4, 3, 10, 10)
    pipe(x).mean().backward()
    bn(x).mean().backward()

    assert pipe[0].weight.grad is not None
    assert pipe[0].bias.grad is not None

    assert torch.allclose(pipe[0].weight.grad, bn.weight.grad, atol=1e-4)
    assert torch.allclose(pipe[0].bias.grad, bn.bias.grad, atol=1e-4)


609
@torch_spawn([4])
610
611
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def devices(pipe_class):
Tom Birch's avatar
Tom Birch committed
612
613
614
615
616
617
    a = nn.Linear(1, 1)
    b = nn.Linear(1, 1)
    c = nn.Linear(1, 1)

    # There are extra two ranks.
    model = nn.Sequential(a, b, c)
618
    model = pipe_class(model, [1, 1, 1], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
619
620

    # Extra devices must be discarded.
621
    if model.group.rank() == 3:
Tom Birch's avatar
Tom Birch committed
622
623
624
625
        assert model.pipeline is None


@torch_spawn([2])
626
627
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def partitions(pipe_class):
Tom Birch's avatar
Tom Birch committed
628
629
630
631
    a = nn.Linear(1, 1)
    b = nn.Linear(1, 1)

    model = nn.Sequential(a, b)
632
    model = pipe_class(model, [1, 1], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
633

634
    assert isinstance(model.partitions, list)
Tom Birch's avatar
Tom Birch committed
635
    assert len(model) == 1
636
    assert isinstance(model.partitions[0].module, nn.Sequential)
Tom Birch's avatar
Tom Birch committed
637

638
639
640
641
    if model.group.rank() == 0:
        assert "0.0.weight" in model.state_dict()
    else:
        assert "0.1.weight" in model.state_dict()
Tom Birch's avatar
Tom Birch committed
642
643
644
645


@torch_spawn([2])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
646
647
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def deny_moving(pipe_class):
Tom Birch's avatar
Tom Birch committed
648
649
650
651
    a = nn.Linear(1, 1)
    b = nn.Linear(1, 1)

    model = nn.Sequential(a, b)
652
    model = pipe_class(model, [1, 1], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669

    model.cuda()
    model.cpu()
    model.to(torch.device("cuda"))
    model.to(0)
    model.to("cuda")
    model.to(device=0)
    model.to(torch.rand(1))
    model.to(tensor=torch.rand(1))

    # Casting is allowed.
    model.half()
    model.to(torch.double)
    model.to(dtype=torch.float)


@torch_spawn([1])
670
671
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def empty_module(pipe_class):
Tom Birch's avatar
Tom Birch committed
672
673
    # Empty sequential module is not illegal.
    model = nn.Sequential()
674
    model = pipe_class(model, [], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
675
676
677
678

    assert model(torch.tensor([42])) == torch.tensor([42])
    assert model((torch.tensor([42]),)) == (torch.tensor([42]),)

679
    # But only tensor or tensors is legal in MultiProcessPipe.
Tom Birch's avatar
Tom Birch committed
680
681
682
683
684
685

    with pytest.raises(TypeError):
        model(42)


@torch_spawn([2])
686
687
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def named_children(pipe_class):
Tom Birch's avatar
Tom Birch committed
688
689
690
691
    a = nn.Linear(1, 1)
    b = nn.Linear(1, 1)

    model = nn.Sequential(OrderedDict([("a", a), ("b", b)]))
692
    model = pipe_class(model, [1, 1], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
693
694

    names = set(n for n, _ in model.named_modules())
695
696
697
698
    if model.group.rank() == 0:
        assert "0.a" in names
    else:
        assert "0.b" in names
Tom Birch's avatar
Tom Birch committed
699

700
    # MultiProcessPipe doesn't support __getattr__. Unlike nn.Sequential, MultiProcessPipe requires
Tom Birch's avatar
Tom Birch committed
701
702
703
704
705
706
    # several methods in its namespace.
    with pytest.raises(AttributeError):
        model.a


@torch_spawn([1])
707
708
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def recommend_auto_balance(pipe_class):
709
    with pytest.raises(ValueError):
Tom Birch's avatar
Tom Birch committed
710
        # module and sum of balance have differen length (module: 0, sum of balance: 1)
711
        pipe_class(nn.Sequential(), [1])
Tom Birch's avatar
Tom Birch committed
712

713
    with pytest.raises(ValueError):
Tom Birch's avatar
Tom Birch committed
714
        # module and sum of balance have different length (module: 2, sum of balance: 1)
715
        pipe_class(nn.Sequential(nn.Linear(1, 1), nn.Linear(1, 1)), [1])
Tom Birch's avatar
Tom Birch committed
716
717
718


@torch_spawn([2])
719
720
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def lazy_construction(pipe_class):
Tom Birch's avatar
Tom Birch committed
721
722
723
724
725
726
727
728
729
730
731
732
    init_count = 0

    class Custom(nn.Module):
        def __init__(self):
            super(Custom, self).__init__()
            nonlocal init_count
            init_count += 1

        def forward(self, x):
            return x

    model = [
733
734
735
736
        LazyModule(lambda: Custom()),
        LazyModule(lambda: Custom()),
        LazyModule(lambda: Custom()),
        LazyModule(lambda: Custom()),
Tom Birch's avatar
Tom Birch committed
737
738
    ]

739
    pipe = pipe_class(model, balance=[2, 2], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
740
741
742
743
744
745
746
747

    assert isinstance(pipe[0], Custom)
    assert isinstance(pipe[1], Custom)
    assert len(pipe) == 2
    assert init_count == 2


@torch_spawn([2])
748
@pytest.mark.skipif("OMPI_COMM_WORLD_RANK" in os.environ, reason="doesn't apply to mpi")
749
750
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def missing_worker_map(pipe_class):
Tom Birch's avatar
Tom Birch committed
751
752
    model = nn.Sequential(nn.ReLU(), nn.ReLU())

753
    with pytest.raises(ValueError, match="'RpcTransport' requires 'worker_map' to be set"):
754
        pipe_class(model, [1, 1])
Tom Birch's avatar
Tom Birch committed
755
756
757
758


@torch_spawn([2])
@pytest.mark.skip(reason="currently broken")
759
760
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def verify_module_duplicate_parameters_on_distinct_partitions(pipe_class):
Tom Birch's avatar
Tom Birch committed
761
762
763
764
765
766
767
768
769
770
    class Surrogate(nn.Module):
        def __init__(self, module):
            super().__init__()
            self.module = module

    conv = nn.Conv2d(3, 3, 1)
    model = nn.Sequential(Surrogate(conv), Surrogate(conv))

    # FIXME(tom) can't have duplicate params with separate processes
    with pytest.raises(ValueError, match="module with duplicate parameters on distinct devices is not supported"):
771
        pipe_class(model, [1, 1], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
772
773
774


@torch_spawn([4])
775
776
@pytest.mark.parametrize("pipe_class", [MultiProcessPipe, AsyncPipe])
def pipelined_backward(pipe_class):
Tom Birch's avatar
Tom Birch committed
777
778
779
780
    model = nn.Sequential(nn.ReLU(), nn.ReLU())

    destroy_model_parallel()
    initialize_model_parallel(1, 4)
781
    pipe = pipe_class(model, [1, 1], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
782
783
784
785
786

    assert pipe.pipelined_backward is False

    destroy_model_parallel()
    initialize_model_parallel(2, 2)
787
    pipe = pipe_class(model, [1, 1], worker_map=get_worker_map())
Tom Birch's avatar
Tom Birch committed
788
789

    assert pipe.pipelined_backward is True
790
791
792
793
794
795


@torch_spawn([4])
def async_event_loop():

    model = nn.Sequential(nn.Linear(10, 10), nn.ReLU(), nn.Linear(10, 10), nn.ReLU())
796
    pipe = AsyncPipe(model, [1, 1, 1, 1], worker_map=get_worker_map(), chunks=10)
797
798
799
800
801
802
803

    inputs = torch.rand(100, 10)

    output = pipe(inputs)
    if pipe.final_stage:
        loss = output.mean()
        loss.backward()