test_activation_checkpointing.py 1.91 KB
Newer Older
zbian's avatar
zbian committed
1
2
3
4
5
6
7
8
9
#!/usr/bin/env python
# -*- encoding: utf-8 -*-

import pytest
import torch
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint

from colossalai.context.parallel_mode import ParallelMode
10
from colossalai.context.random import add_seed, seed, set_mode, reset_seeds
zbian's avatar
zbian committed
11
12
13
14
15
16
17
18
19
from colossalai.utils import checkpoint


def forward(x, weight):
    out = torch.matmul(x, weight)
    with seed(ParallelMode.DATA):
        out_ = F.dropout(out, p=0.4, training=True)
    return out_

20

zbian's avatar
zbian committed
21
@pytest.mark.gpu
LuGY's avatar
LuGY committed
22
23
@pytest.mark.parametrize("cpu_offload", [True, False])
def test_activation_checkpointing(cpu_offload):
24
25
    add_seed(ParallelMode.GLOBAL, 1024)
    add_seed(ParallelMode.DATA, 1026)
zbian's avatar
zbian committed
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
    set_mode(ParallelMode.GLOBAL)
    global_cuda_rng_state = torch.cuda.get_rng_state()
    set_mode(ParallelMode.DATA)
    data_parallel_cuda_rng_state = torch.cuda.get_rng_state()
    set_mode(ParallelMode.GLOBAL)

    # normal
    data = torch.rand(2, 2, requires_grad=True).cuda()
    data.retain_grad()
    weight = torch.rand(2, 4, requires_grad=True).cuda()

    data_ = data.clone().detach()
    data_.requires_grad = True
    data_.retain_grad()
    weight_ = weight.clone().detach()
    weight_.requires_grad = True

    out = forward(data, weight)
    loss = out.sum()
    loss.backward()

    # checkpoint
    set_mode(ParallelMode.GLOBAL)
    torch.cuda.set_rng_state(global_cuda_rng_state)
    set_mode(ParallelMode.DATA)
    torch.cuda.set_rng_state(data_parallel_cuda_rng_state)
    set_mode(ParallelMode.GLOBAL)
LuGY's avatar
LuGY committed
53
    out = checkpoint(forward, cpu_offload, data_, weight_)
zbian's avatar
zbian committed
54
55
56
57
    loss = out.sum()
    loss.backward()

    assert torch.all(data.grad == data_.grad), 'Gradient of the input does not match'
Frank Lee's avatar
Frank Lee committed
58
    torch.cuda.empty_cache()
59
60
61
62
63
    # as seed manager is singleton
    # if we don't reset seeds here,
    # other tests will fail if running together with this test
    # as other tests can't overwrite the seed set by this test
    reset_seeds()