"vscode:/vscode.git/clone" did not exist on "4eecbabb78c96941d011aecc44adcddc8a672736"
layers.py 7.07 KB
Newer Older
Rick Ho's avatar
Rick Ho committed
1
2
3
4
r'''
Layers that FMoE provides to users
'''
import torch
Rick Ho's avatar
Rick Ho committed
5
6
import torch.nn as nn

Rick Ho's avatar
Rick Ho committed
7
8
9
from .functions import moe_prepare_forward
from .functions import MOEScatter, MOEGather, MOELinear
from .functions import AllGather
Rick Ho's avatar
Rick Ho committed
10
from .gates import NaiveGate
Rick Ho's avatar
Rick Ho committed
11

Rick Ho's avatar
Rick Ho committed
12
13

class FMoELinear(nn.Module):
Rick Ho's avatar
Rick Ho committed
14
15
16
17
18
19
    r'''
    A linear layer that contains multiple experts.
    As multiple experts can be placed on the same worker, the computation can be
    performed in parallel to increase the performance.
    The FMoELinear module provides such function.
    '''
20
    def __init__(self, num_expert=32, in_feat=1024, out_feat=1024, rank=0):
Rick Ho's avatar
Rick Ho committed
21
        super().__init__()
Rick Ho's avatar
Rick Ho committed
22
23
24
        self.num_expert = num_expert
        self.in_feat = in_feat
        self.out_feat = out_feat
25
        self.rank = rank
26
        self.weight = nn.Parameter(torch.Tensor(num_expert, out_feat, in_feat))
Rick Ho's avatar
Rick Ho committed
27
28
29
        self.reset_parameters()

    def reset_parameters(self):
Rick Ho's avatar
Rick Ho committed
30
31
32
        r'''
        Initialize the weight as linear layers
        '''
33
34
35
36
37
        rng = np.random.default_rng(np.random.randint(2048) + self.rank)
        fan = nn.init._calculate_correct_fan(self.weight[0], 'fan_in')
        gain = nn.init.calculate_gain('leaky_relu', math.sqrt(5))
        std = gain / math.sqrt(fan)
        bound = math.sqrt(3.0) * std  # Calculate uniform bounds from standard deviation
Rick Ho's avatar
Rick Ho committed
38
        for i in range(self.num_expert):
39
40
            weight = rng.uniform(-bound, bound, size=tuple(self.weight[i].size()))
            self.weight.data[i] = torch.from_numpy(weight)
Rick Ho's avatar
Rick Ho committed
41
42

    def forward(self, inp, fwd_expert_count):
Rick Ho's avatar
Rick Ho committed
43
44
45
        r'''
        Call MOE function
        '''
Rick Ho's avatar
Rick Ho committed
46
47
48
        return MOELinear.apply(inp, self.weight, fwd_expert_count)


Rick Ho's avatar
Rick Ho committed
49
50
51
52
53
54
55
56
57
58
def mark_module_parallel_comm(module, comm):
    r'''
    Mark all parameters in `module` as doing data parallel in `comm`, where
    `comm` may be one of `'world', 'dp', 'none'`.
    '''
    for p in module.parameters():
        setattr(p, 'dp_comm', comm)


def _fmoe_general_global_forward(inp, gate, expert_fn, num_expert, world_size):
Rick Ho's avatar
Rick Ho committed
59
60
61
62
63
64
    r'''
    A private function that performs the following steps to complete the MoE
    computation.
    * Count the number of tokens from each worker to each expert.
    * Send the features to their target position so that input features to each
    expert are contiguous in memory.
Rick Ho's avatar
Rick Ho committed
65
    * Perform the forward computation of the experts using `expert_fn`
Rick Ho's avatar
Rick Ho committed
66
67
68
69
    * Gather the output features of experts back, and reorder them as sentences.
    Intermediate results like expert counts are hidden from users by this
    function.
    '''
70
    (
Rick Ho's avatar
Rick Ho committed
71
72
        pos, local_expert_count, global_expert_count, fwd_expert_count,
        fwd_batch_size
73
74
    ) = moe_prepare_forward(gate, num_expert, world_size)
    x = MOEScatter.apply(
Rick Ho's avatar
Rick Ho committed
75
76
        inp, pos, local_expert_count, global_expert_count, fwd_batch_size,
        world_size
77
    )
Rick Ho's avatar
Rick Ho committed
78
    x = expert_fn(x, fwd_expert_count)
79
80
81
    x = MOEGather.apply(
        x, pos, local_expert_count, global_expert_count, inp.shape[0], world_size
    )
Rick Ho's avatar
Rick Ho committed
82
83
84
    return x


Rick Ho's avatar
Rick Ho committed
85
class FMoE(nn.Module):
Rick Ho's avatar
Rick Ho committed
86
    r'''
Rick Ho's avatar
Rick Ho committed
87
88
    A general moe implementation that supports an arbitrary module as the expert
    Either `expert` or `expert_fn` is required.
Rick Ho's avatar
Rick Ho committed
89
90
91
92
93
94
95
96
97
    * `num_expert` stands for the number of experts on **each** worker.
    * `world_size` stands for the total number of workers that contains
    different experts.
    * `mp_group` can be a torch's communication group, indicating that model
    parallel is applied across the group, which means that workers in the group
    hold the same copy of the input feature, and demands the same copy of the
    output. FMoE saves computation by slicing the input in the mp group and
    performing all-gather after the MLP computation.
    * `top_k` stands for the number of experts each token is going to.
Rick Ho's avatar
Rick Ho committed
98
99
100
101
102
103
    * `gate` is a gate class which can found in `fmoe.gates`.
    * `expert` can be specified as a module class, it is used to generate
    `num_expert` expert modules.
    * `expert_fn` is specified as a callable object or a function, it will be
    called during forward, giving the input tensor (contiguous) and the array of
    the number of input feature to each expert as input.
Rick Ho's avatar
Rick Ho committed
104
    '''
Rick Ho's avatar
Rick Ho committed
105
106
    def __init__(self, num_expert=32, d_model=1024, world_size=1, mp_group=None,
            top_k=2, gate=NaiveGate, expert=None, expert_fn=None):
Rick Ho's avatar
Rick Ho committed
107
        super().__init__()
Rick Ho's avatar
Rick Ho committed
108
109
110
        self.num_expert = num_expert
        self.d_model = d_model
        self.world_size = world_size
Rick Ho's avatar
fmoefy  
Rick Ho committed
111
        self.mp_group = mp_group
Rick Ho's avatar
Rick Ho committed
112
113
114
115
116
117
        if mp_group is None:
            self.mp_size = 1
            self.mp_rank = 0
        else:
            self.mp_size = mp_group.size()
            self.mp_rank = mp_group.rank()
Rick Ho's avatar
Rick Ho committed
118
        self.top_k = top_k
Rick Ho's avatar
Rick Ho committed
119
        self.gate = gate(d_model, num_expert, world_size, top_k)
Rick Ho's avatar
Rick Ho committed
120
121
122
        if expert_fn is None:
            assert expert is not None, 'Either expert or expert_fn should be set'
            self.experts = [expert(d_model) for _ in range(num_expert)]
123
            def expert_fn(inp, fwd_expert_count):
Rick Ho's avatar
Rick Ho committed
124
125
126
127
128
129
130
131
132
133
134
                outputs = []
                base_idx = 0
                for i in range(self.num_expert):
                    batch_size = fwd_expert_count[i].item()
                    inp_slice = inp[base_idx:base_idx + batch_size]
                    outputs.append(self.experts[i](inp_slice))
                    base_idx += batch_size
                return torch.cat(outputs, dim=0)
        self.expert_fn = expert_fn

    def mark_parallel_comm(self):
Rick Ho's avatar
Rick Ho committed
135
        r'''
Rick Ho's avatar
Rick Ho committed
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
        Automatically mark the data parallel comms of the parameters within the
        module. This can be typically called at the end of the __init__ function
        in child classes.
        '''
        if self.experts is not None:
            if self.world_size > self.mp_size:
                comm = 'none'
            else:
                comm = 'dp'
            if isinstance(self.experts, list):
                for e in self.experts:
                    mark_module_parallel_comm(e, comm)
            else:
                mark_module_parallel_comm(self.experts, comm)
        mark_module_parallel_comm(self.gate, 'world')

    def forward(self, inp):
        r'''
        The FMoE module first computes gate output, and then conduct MoE forward
        according to the gate.  The score of the selected gate given by the
        expert is multiplied to the experts' output tensors as a weight.
Rick Ho's avatar
Rick Ho committed
157
        '''
Rick Ho's avatar
Rick Ho committed
158
        if self.mp_size > 1:
159
            B: int = inp.shape[0]
Rick Ho's avatar
Rick Ho committed
160
161
            local_batch_size = B // self.mp_size
            batch_start = local_batch_size * self.mp_rank
Sengxian's avatar
Sengxian committed
162
            batch_end = min(batch_start + local_batch_size, B)
163
            inp = inp[batch_start:batch_end]
Sengxian's avatar
Sengxian committed
164

Rick Ho's avatar
Rick Ho committed
165
        gate_top_k_idx, gate_score = self.gate(inp)
166
167
        # to: (BxLxtop_k) x d_model
        inp = inp.repeat_interleave(repeats=self.top_k, dim=0)
Rick Ho's avatar
Rick Ho committed
168
169
        x = _fmoe_general_global_forward(inp, gate_top_k_idx, self.expert_fn,
                self.num_expert, self.world_size)
170
        # to: (BxL) x top_k x d_model
Rick Ho's avatar
Rick Ho committed
171
172
173
        x = x.view(-1, self.top_k, self.d_model)
        # to: (BxL) x d_model
        x = torch.bmm(gate_score, x).reshape(-1, self.d_model)
Sengxian's avatar
Sengxian committed
174

Rick Ho's avatar
Rick Ho committed
175
        if self.mp_size > 1:
Rick Ho's avatar
Rick Ho committed
176
            x = AllGather.apply(x,
Rick Ho's avatar
Rick Ho committed
177
                    self.mp_rank, self.mp_size, self.mp_group)
Rick Ho's avatar
Rick Ho committed
178
        return x