fmoe_functions.py 4.2 KB
Newer Older
Rick Ho's avatar
Rick Ho committed
1
2
3
4
5
6
7
8
9
10
11
import torch
from torch.autograd import Function
import fmoe_cuda


def moe_prepare_forward(gate, num_expert, world_size):
    fmoe_cuda.ensure_nccl(torch.distributed.distributed_c10d._default_pg, gate)

    with torch.no_grad():
        _, pos = torch.sort(gate)
        gate_idx, gate_count = torch.unique(gate, return_counts=True)
Rick Ho's avatar
Rick Ho committed
12
13
        local_expert_count = torch.zeros(num_expert * world_size, 
                device=gate.device, dtype=torch.long)
Rick Ho's avatar
Rick Ho committed
14
15
        local_expert_count.index_put_((gate_idx.long(), ), gate_count)

16
17
18
19
20
        if world_size > 1:
            global_expert_count, = fmoe_cuda.expert_exchange(
                    local_expert_count, num_expert, world_size)
        else:
            global_expert_count = local_expert_count
Rick Ho's avatar
Rick Ho committed
21
        fwd_expert_count = global_expert_count.view(world_size, 
22
               num_expert).sum(dim=0)
Rick Ho's avatar
Rick Ho committed
23
24
25
26
27
28
29
30
31
32
33
        fwd_batch_size = int(fwd_expert_count.sum().item())
    return (pos, local_expert_count.cpu(), global_expert_count.cpu(), 
            fwd_expert_count.cpu(), fwd_batch_size)


class MOEScatter(Function):
    @staticmethod
    def forward(ctx, inp, pos, local_expert_count, global_expert_count,
            fwd_batch_size, world_size):
        local_input_buf, = fmoe_cuda.local_gather(inp, pos)
        if world_size > 1:
Rick Ho's avatar
Rick Ho committed
34
            global_input_buf, = fmoe_cuda.global_scatter(local_input_buf, 
Rick Ho's avatar
Rick Ho committed
35
36
37
38
39
40
                    local_expert_count, global_expert_count,
                    fwd_batch_size, world_size)
        else:
            global_input_buf = local_input_buf
        ctx.moe_args = fwd_batch_size, inp.shape[0], world_size
        variables = (pos, local_expert_count, global_expert_count)
41
        ctx.save_for_backward(*variables)
Rick Ho's avatar
Rick Ho committed
42
43
44
45
46
47
48
49
        return global_input_buf

    @staticmethod
    def backward(ctx, global_grad_in):
        (pos, local_expert_count, global_expert_count) = ctx.saved_tensors
        (fwd_batch_size, local_batch_size, world_size) = ctx.moe_args

        if world_size > 1:
Rick Ho's avatar
Rick Ho committed
50
            local_grad_in, = fmoe_cuda.global_gather(global_grad_out,
Rick Ho's avatar
Rick Ho committed
51
52
53
54
                    local_expert_count, global_expert_count,
                    local_batch_size, world_size)
        else:
            local_grad_in = global_grad_in
Rick Ho's avatar
Rick Ho committed
55
        grad_in, = fmoe_cuda.local_scatter(local_grad_in, pos)
Rick Ho's avatar
Rick Ho committed
56
57
58
59
60
61
        return grad_in, None, None, None, None, None


class MOELinear(Function):
    @staticmethod
    def forward(ctx, global_input_buf, weight, fwd_expert_count):
Rick Ho's avatar
Rick Ho committed
62
        global_output_buf, = fmoe_cuda.forward(global_input_buf, weight,
Rick Ho's avatar
Rick Ho committed
63
                fwd_expert_count)
64
        variables = (global_input_buf, weight, fwd_expert_count)
Rick Ho's avatar
Rick Ho committed
65
66
67
68
69
70
        ctx.save_for_backward(*variables)
        return global_output_buf

    @staticmethod
    def backward(ctx, grad_out):
        (input_buf, weight, fwd_expert_count) = ctx.saved_tensors
71
        grad_inp_buf, grad_weight = fmoe_cuda.backward(
Rick Ho's avatar
Rick Ho committed
72
73
74
75
76
77
78
79
80
                grad_out, input_buf, weight, fwd_expert_count)
        return grad_inp_buf, grad_weight, None


class MOEGather(Function):
    @staticmethod
    def forward(ctx, global_output_buf, pos, local_expert_count, 
            global_expert_count, local_batch_size, world_size):
        if world_size > 1:
Rick Ho's avatar
Rick Ho committed
81
            local_output_buf, = fmoe_cuda.global_gather(global_output_buf, 
Rick Ho's avatar
Rick Ho committed
82
83
84
85
                    local_expert_count, global_expert_count, 
                    local_batch_size, world_size)
        else:
            local_output_buf = global_output_buf
Rick Ho's avatar
Rick Ho committed
86
        output, = fmoe_cuda.local_scatter(local_output_buf, pos)
Rick Ho's avatar
Rick Ho committed
87

88
        ctx.moe_args = local_batch_size, global_output_buf.shape[0], world_size
Rick Ho's avatar
Rick Ho committed
89
90
91
92
93
94
95
        variables = (pos, local_expert_count, global_expert_count)
        ctx.save_for_backward(*variables)
        return output

    @staticmethod
    def backward(ctx, grad_out):
        pos, local_expert_count, global_expert_count = ctx.saved_tensors
96
97
        local_batch_size, fwd_batch_size, world_size = ctx.moe_args
        grad_out_buf, = fmoe_cuda.local_gather(grad_out.contiguous(), pos)
Rick Ho's avatar
Rick Ho committed
98
        if world_size > 1:
Rick Ho's avatar
Rick Ho committed
99
            global_grad_out_buf, = fmoe_cuda.global_scatter(grad_out_buf,
Rick Ho's avatar
Rick Ho committed
100
101
102
103
104
105
106
                    local_expert_count, global_expert_count,
                    fwd_batch_size, world_size)
        else:
            global_grad_out_buf = grad_out_buf
        return global_grad_out_buf, None, None, None, None, None