test_kernel.py 3.12 KB
Newer Older
1
2
import pytest
import torch
3
import torch.distributed as dist
4

5
import colossalai
6
7
from colossalai.moe import SparseMLP
from colossalai.moe.manager import MOE_MANAGER
8
9
from colossalai.testing import rerun_if_address_is_in_use, spawn
from colossalai.utils import get_current_device
10

11
BATCH_SIZE = 4
12
13
14
15
16
17
18
NUM_EXPERTS = 4


def check_equal(tensor_a, tensor_b, atol=1e-06):
    assert torch.allclose(tensor_a, tensor_b, rtol=0, atol=atol) is True


19
def run_routing(rank, world_size, port, rs=2, hidden_size=128, data_type=torch.float32, topk=1):
20
21
22
    # Here we do not need TF32, since it brings absolute error on results
    torch.backends.cuda.matmul.allow_tf32 = False

23
24
    colossalai.launch(config=dict(), rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    local_rank = dist.get_rank()
25

26
27
28
    MOE_MANAGER.setup(42, parallel="EP")    # MOE environment initialization
    MOE_MANAGER.reset_loss()
    torch.manual_seed(rs + local_rank)    # set each process has different random seed
29
30
31
32

    # get randomized data
    tokens = torch.randn(BATCH_SIZE, hidden_size, dtype=data_type, device=get_current_device(), requires_grad=True)

33
34
35
36
37
    layer = SparseMLP(hidden_size=hidden_size,
                      intermediate_size=hidden_size * 2,
                      num_experts=NUM_EXPERTS,
                      router_top_k=topk,
                      router_capacity_factor_train=1.0)
HELSON's avatar
HELSON committed
38
    layer = layer.to(get_current_device())
39
40
41
    if data_type == torch.float16:
        layer = layer.half()

42
    # use matrix multiplication instead of COL_MOE_KERNEL in MOE dispatch and combine
43
44
    layer.enable_kernel = False
    old_out = layer(tokens)
45
46
    ech = old_out.shape
    grad = torch.randn(ech, device=get_current_device())
47
    old_out.backward(grad)  # get gradient
48
49
50

    # save all results
    o_tk_grad = tokens.grad.data.clone()
HELSON's avatar
HELSON committed
51
    o_gt_grad = layer.gate_weight.grad.data.clone()
52
53
54

    # reset all gradients
    tokens.grad.zero_()
HELSON's avatar
HELSON committed
55
    layer.gate_weight.grad.zero_()
56

57
58
    layer.enable_kernel = True
    new_out = layer(tokens)    # get outputs through colossal kernel
59
60
61
62
63
64
65

    if data_type == torch.float32:
        check_equal(old_out, new_out)
    else:
        check_equal(old_out, new_out, 1e-2)
    # forward function passed

66
    new_out.backward(grad)  # get new type gradient
67
    n_tk_grad = tokens.grad.data.clone()
HELSON's avatar
HELSON committed
68
    n_gt_grad = layer.gate_weight.grad.data.clone()
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86

    if data_type == torch.float32:
        check_equal(o_tk_grad, n_tk_grad)
    else:
        check_equal(o_tk_grad, o_tk_grad, 1e-2)
    # tokens gradient is correct

    if data_type == torch.float32:
        check_equal(o_gt_grad, n_gt_grad, 5e-05)
    else:
        check_equal(o_gt_grad, n_gt_grad, 2e-01)
    # bias gradient is correct


@pytest.mark.dist
@pytest.mark.parametrize("rs", [131])
@pytest.mark.parametrize("hidden_size", [32, 144])
@pytest.mark.parametrize("data_type", [torch.float32, torch.float16])
87
@pytest.mark.parametrize("topk", [1, 2])
88
@rerun_if_address_is_in_use()
89
90
def test_moe_kernel(rs, hidden_size, data_type, topk):
    spawn(run_routing, 4, rs=rs, hidden_size=hidden_size, data_type=data_type, topk=topk)
91
92


93
94
if __name__ == '__main__':
    test_moe_kernel(2, 256, torch.float16, 2)