layers.py 8.14 KB
Newer Older
Rick Ho's avatar
Rick Ho committed
1
2
3
r'''
Layers that FMoE provides to users
'''
Jiezhong Qiu's avatar
Jiezhong Qiu committed
4
import math
Rick Ho's avatar
Rick Ho committed
5
import torch
Rick Ho's avatar
Rick Ho committed
6
import torch.nn as nn
7
import numpy as np
Rick Ho's avatar
Rick Ho committed
8

Rick Ho's avatar
Rick Ho committed
9
10
from .functions import moe_prepare_forward
from .functions import MOEScatter, MOEGather, MOELinear
Sengxian's avatar
Sengxian committed
11
from .functions import AllGather, Slice
Rick Ho's avatar
Rick Ho committed
12
from .gates import NaiveGate
Rick Ho's avatar
Rick Ho committed
13

Rick Ho's avatar
Rick Ho committed
14
15

class FMoELinear(nn.Module):
Rick Ho's avatar
Rick Ho committed
16
17
18
19
20
21
    r'''
    A linear layer that contains multiple experts.
    As multiple experts can be placed on the same worker, the computation can be
    performed in parallel to increase the performance.
    The FMoELinear module provides such function.
    '''
22
23
    def __init__(self, num_expert: int, in_feat: int, out_feat: int,
            bias: bool = True, rank: int = 0):
Rick Ho's avatar
Rick Ho committed
24
        super().__init__()
Rick Ho's avatar
Rick Ho committed
25
26
27
        self.num_expert = num_expert
        self.in_feat = in_feat
        self.out_feat = out_feat
28
        self.rank = rank
29
        self.weight = nn.Parameter(torch.Tensor(num_expert, out_feat, in_feat))
30
31
32
33
        if bias:
            self.bias = nn.Parameter(torch.Tensor(num_expert, out_feat))
        else:
            self.register_parameter('bias', None)
Rick Ho's avatar
Rick Ho committed
34
35
36
        self.reset_parameters()

    def reset_parameters(self):
Rick Ho's avatar
Rick Ho committed
37
38
39
        r'''
        Initialize the weight as linear layers
        '''
40
        rng = np.random.default_rng(np.random.randint(2048) + self.rank)
41

Jiezhong Qiu's avatar
Jiezhong Qiu committed
42
        # copied from torch.nn.init.kaiming_uniform_
43
44
45
        fan = nn.init._calculate_correct_fan(self.weight[0], 'fan_in')
        gain = nn.init.calculate_gain('leaky_relu', math.sqrt(5))
        std = gain / math.sqrt(fan)
Jiezhong Qiu's avatar
Jiezhong Qiu committed
46
        bound = math.sqrt(3.0) * std
47
48
        device = self.weight.device
        dtype = self.weight.dtype
49
        weight = rng.uniform(-bound, bound, size=tuple(self.weight.size()))
Rick Ho's avatar
Rick Ho committed
50
        self.weight.data = torch.tensor(weight, dtype=dtype, device=device)
51
52
53
54
55
56

        if self.bias is not None:
            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[0])
            bound = 1 / math.sqrt(fan_in)
            bias = rng.uniform(-bound, bound, size=tuple(self.bias.size()))
            self.bias.data = torch.tensor(bias, dtype=dtype, device=device)
Rick Ho's avatar
Rick Ho committed
57
58

    def forward(self, inp, fwd_expert_count):
Rick Ho's avatar
Rick Ho committed
59
60
61
        r'''
        Call MOE function
        '''
62
        x = MOELinear.apply(inp, self.weight, fwd_expert_count)
Jiezhong Qiu's avatar
Jiezhong Qiu committed
63
        if self.bias is not None:
64
65
66
67
68
69
70
71
72
73
74
            # TODO: torch.repeat_interleave seems have wrong behavior 
            # in backward, leading to incorrect gradient for bias.
            # Thus we use a for-loop to manually expand the bias term.
            # This part should finally goes to MOELinear.apply.
            #bias = torch.repeat_interleave(self.bias,
            #        fwd_expert_count.to(self.bias.device), dim=0)
            bias = []
            for i in range(self.num_expert):
                if fwd_expert_count[i] > 0:
                    bias.append(self.bias[i].unsqueeze(0).expand(fwd_expert_count[i], -1))
            bias = torch.cat(bias, dim=0)
75
76
            x = x + bias
        return x
Rick Ho's avatar
Rick Ho committed
77

Jiezhong Qiu's avatar
Jiezhong Qiu committed
78
79
80
81
82
83
84
    def extra_repr(self) -> str:
        return 'num_expert={}, in_features={}, \
                out_features={}, bias={}, rank={}'.format(
                    self.num_expert, self.in_feat,
                    self.out_feat, self.bias is not None, self.rank
        )

Rick Ho's avatar
Rick Ho committed
85

Rick Ho's avatar
Rick Ho committed
86
87
88
89
90
91
92
93
94
95
def mark_module_parallel_comm(module, comm):
    r'''
    Mark all parameters in `module` as doing data parallel in `comm`, where
    `comm` may be one of `'world', 'dp', 'none'`.
    '''
    for p in module.parameters():
        setattr(p, 'dp_comm', comm)


def _fmoe_general_global_forward(inp, gate, expert_fn, num_expert, world_size):
Rick Ho's avatar
Rick Ho committed
96
97
98
99
100
101
    r'''
    A private function that performs the following steps to complete the MoE
    computation.
    * Count the number of tokens from each worker to each expert.
    * Send the features to their target position so that input features to each
    expert are contiguous in memory.
Rick Ho's avatar
Rick Ho committed
102
    * Perform the forward computation of the experts using `expert_fn`
Rick Ho's avatar
Rick Ho committed
103
104
105
106
    * Gather the output features of experts back, and reorder them as sentences.
    Intermediate results like expert counts are hidden from users by this
    function.
    '''
107
    (
Rick Ho's avatar
Rick Ho committed
108
109
        pos, local_expert_count, global_expert_count, fwd_expert_count,
        fwd_batch_size
110
111
    ) = moe_prepare_forward(gate, num_expert, world_size)
    x = MOEScatter.apply(
Rick Ho's avatar
Rick Ho committed
112
113
        inp, pos, local_expert_count, global_expert_count, fwd_batch_size,
        world_size
114
    )
Rick Ho's avatar
Rick Ho committed
115
    x = expert_fn(x, fwd_expert_count)
116
117
118
    x = MOEGather.apply(
        x, pos, local_expert_count, global_expert_count, inp.shape[0], world_size
    )
Rick Ho's avatar
Rick Ho committed
119
120
121
    return x


Rick Ho's avatar
Rick Ho committed
122
class FMoE(nn.Module):
Rick Ho's avatar
Rick Ho committed
123
    r'''
Rick Ho's avatar
Rick Ho committed
124
125
    A general moe implementation that supports an arbitrary module as the
    expert.
Rick Ho's avatar
Rick Ho committed
126
127
128
129
130
131
132
133
134
    * `num_expert` stands for the number of experts on **each** worker.
    * `world_size` stands for the total number of workers that contains
    different experts.
    * `mp_group` can be a torch's communication group, indicating that model
    parallel is applied across the group, which means that workers in the group
    hold the same copy of the input feature, and demands the same copy of the
    output. FMoE saves computation by slicing the input in the mp group and
    performing all-gather after the MLP computation.
    * `top_k` stands for the number of experts each token is going to.
Rick Ho's avatar
Rick Ho committed
135
136
137
    * `gate` is a gate class which can found in `fmoe.gates`.
    * `expert` can be specified as a module class, it is used to generate
    `num_expert` expert modules.
Rick Ho's avatar
Rick Ho committed
138
    '''
Rick Ho's avatar
Rick Ho committed
139
    def __init__(self, num_expert=32, d_model=1024, world_size=1, mp_group=None,
Rick Ho's avatar
Rick Ho committed
140
            top_k=2, gate=NaiveGate, expert=None):
Rick Ho's avatar
Rick Ho committed
141
        super().__init__()
Rick Ho's avatar
Rick Ho committed
142
143
144
        self.num_expert = num_expert
        self.d_model = d_model
        self.world_size = world_size
Rick Ho's avatar
fmoefy  
Rick Ho committed
145
        self.mp_group = mp_group
Rick Ho's avatar
Rick Ho committed
146
147
148
149
150
151
        if mp_group is None:
            self.mp_size = 1
            self.mp_rank = 0
        else:
            self.mp_size = mp_group.size()
            self.mp_rank = mp_group.rank()
Rick Ho's avatar
Rick Ho committed
152
        self.top_k = top_k
Rick Ho's avatar
Rick Ho committed
153
        self.gate = gate(d_model, num_expert, world_size, top_k)
Rick Ho's avatar
Rick Ho committed
154
        if expert is not None:
155
            self.experts = nn.ModuleList([expert(d_model)
156
157
158
159
                for _ in range(num_expert)])
            self.experts_fused = False
        else:
            self.experts_fused = True
Rick Ho's avatar
Rick Ho committed
160
161

    def expert_fn(self, inp, fwd_expert_count):
162
        if self.experts_fused:
Rick Ho's avatar
Rick Ho committed
163
164
165
166
167
168
169
170
171
            return self.experts(inp, fwd_expert_count)
        outputs = []
        base_idx = 0
        for i in range(self.num_expert):
            batch_size = fwd_expert_count[i].item()
            inp_slice = inp[base_idx:base_idx + batch_size]
            outputs.append(self.experts[i](inp_slice))
            base_idx += batch_size
        return torch.cat(outputs, dim=0)
Rick Ho's avatar
Rick Ho committed
172

173
    def mark_parallel_comm(self, expert_dp_comm='none'):
Rick Ho's avatar
Rick Ho committed
174
        r'''
Rick Ho's avatar
Rick Ho committed
175
176
177
178
179
        Automatically mark the data parallel comms of the parameters within the
        module. This can be typically called at the end of the __init__ function
        in child classes.
        '''
        if self.experts is not None:
180
            comm = expert_dp_comm
Rick Ho's avatar
Rick Ho committed
181
182
183
184
185
186
187
188
189
190
191
192
            if isinstance(self.experts, list):
                for e in self.experts:
                    mark_module_parallel_comm(e, comm)
            else:
                mark_module_parallel_comm(self.experts, comm)
        mark_module_parallel_comm(self.gate, 'world')

    def forward(self, inp):
        r'''
        The FMoE module first computes gate output, and then conduct MoE forward
        according to the gate.  The score of the selected gate given by the
        expert is multiplied to the experts' output tensors as a weight.
Rick Ho's avatar
Rick Ho committed
193
        '''
Rick Ho's avatar
Rick Ho committed
194
        if self.mp_size > 1:
Sengxian's avatar
Sengxian committed
195
196
            inp = Slice.apply(inp,
                    self.mp_rank, self.mp_size, self.mp_group)
Sengxian's avatar
Sengxian committed
197

Rick Ho's avatar
Rick Ho committed
198
        gate_top_k_idx, gate_score = self.gate(inp)
199
200
        # to: (BxLxtop_k) x d_model
        inp = inp.repeat_interleave(repeats=self.top_k, dim=0)
Jiezhong Qiu's avatar
Jiezhong Qiu committed
201
        x = _fmoe_general_global_forward(inp, gate_top_k_idx, self.expert_fn,
Rick Ho's avatar
Rick Ho committed
202
                self.num_expert, self.world_size)
203
        # to: (BxL) x top_k x d_model
Rick Ho's avatar
Rick Ho committed
204
205
206
        x = x.view(-1, self.top_k, self.d_model)
        # to: (BxL) x d_model
        x = torch.bmm(gate_score, x).reshape(-1, self.d_model)
Sengxian's avatar
Sengxian committed
207

Rick Ho's avatar
Rick Ho committed
208
        if self.mp_size > 1:
Rick Ho's avatar
Rick Ho committed
209
            x = AllGather.apply(x,
Rick Ho's avatar
Rick Ho committed
210
                    self.mp_rank, self.mp_size, self.mp_group)
Rick Ho's avatar
Rick Ho committed
211
        return x