embedding.py 8.49 KB
Newer Older
1
2
3
4
# Copyright (c) 2022, Tri Dao.

import torch
import torch.nn as nn
5
from einops import rearrange
Tri Dao's avatar
Tri Dao committed
6
from torch import Tensor
7

Tri Dao's avatar
Tri Dao committed
8
from flash_attn.utils.distributed import all_reduce, reduce_scatter
9

10
11

class GPT2Embeddings(nn.Module):
Tri Dao's avatar
Tri Dao committed
12
13
14
15
16
17
18
19
20
21
    def __init__(
        self,
        embed_dim,
        vocab_size,
        max_position_embeddings,
        padding_idx=None,
        word_embed_proj_dim=None,
        device=None,
        dtype=None,
    ):
22
        """
Tri Dao's avatar
Tri Dao committed
23
24
25
        If max_position_embeddings <= 0, there's no position embeddings
        If word_embe_proj_dim is not None (e.g., OPT-350m), we embed to that dimension
            the project up to embed_dim
26
        """
Tri Dao's avatar
Tri Dao committed
27
        factory_kwargs = {"device": device, "dtype": dtype}
28
        super().__init__()
Tri Dao's avatar
Tri Dao committed
29
        if word_embed_proj_dim is None:
Tri Dao's avatar
Tri Dao committed
30
31
32
            self.word_embeddings = nn.Embedding(
                vocab_size, embed_dim, padding_idx=padding_idx, **factory_kwargs
            )
Tri Dao's avatar
Tri Dao committed
33
34
            self.project_in = None
        else:
Tri Dao's avatar
Tri Dao committed
35
36
37
38
39
40
            self.word_embeddings = nn.Embedding(
                vocab_size, word_embed_proj_dim, padding_idx=padding_idx, **factory_kwargs
            )
            self.project_in = nn.Linear(
                word_embed_proj_dim, embed_dim, bias=False, **factory_kwargs
            )
41
42
        self.max_position_embeddings = max_position_embeddings
        if self.max_position_embeddings > 0:
Tri Dao's avatar
Tri Dao committed
43
44
45
            self.position_embeddings = nn.Embedding(
                max_position_embeddings, embed_dim, **factory_kwargs
            )
46
47
48

    def forward(self, input_ids, position_ids=None):
        """
Tri Dao's avatar
Tri Dao committed
49
50
        input_ids: (batch, seqlen)
        position_ids: (batch, seqlen)
Tri Dao's avatar
Tri Dao committed
51
52
53
        """
        batch_size, seqlen = input_ids.shape
        embeddings = self.word_embeddings(input_ids)
Tri Dao's avatar
Tri Dao committed
54
55
        if self.project_in is not None:
            embeddings = self.project_in(embeddings)
Tri Dao's avatar
Tri Dao committed
56
57
58
59
60
61
62
63
64
        if self.max_position_embeddings > 0:
            if position_ids is None:
                position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
            position_embeddings = self.position_embeddings(position_ids)
            embeddings = embeddings + position_embeddings
        return embeddings


class BertEmbeddings(nn.Module):
Tri Dao's avatar
Tri Dao committed
65
66
67
68
69
70
71
72
73
74
    def __init__(
        self,
        embed_dim,
        vocab_size,
        max_position_embeddings,
        type_vocab_size,
        padding_idx=None,
        device=None,
        dtype=None,
    ):
Tri Dao's avatar
Tri Dao committed
75
        """
Tri Dao's avatar
Tri Dao committed
76
77
        If max_position_embeddings <= 0, there's no position embeddings
        If type_vocab_size <= 0, there's no token type embeddings
Tri Dao's avatar
Tri Dao committed
78
        """
Tri Dao's avatar
Tri Dao committed
79
        factory_kwargs = {"device": device, "dtype": dtype}
Tri Dao's avatar
Tri Dao committed
80
        super().__init__()
Tri Dao's avatar
Tri Dao committed
81
82
83
        self.word_embeddings = nn.Embedding(
            vocab_size, embed_dim, padding_idx=padding_idx, **factory_kwargs
        )
Tri Dao's avatar
Tri Dao committed
84
85
86
        self.max_position_embeddings = max_position_embeddings
        self.type_vocab_size = type_vocab_size
        if self.max_position_embeddings > 0:
Tri Dao's avatar
Tri Dao committed
87
88
89
            self.position_embeddings = nn.Embedding(
                max_position_embeddings, embed_dim, **factory_kwargs
            )
Tri Dao's avatar
Tri Dao committed
90
        if self.type_vocab_size > 0:
Tri Dao's avatar
Tri Dao committed
91
            self.token_type_embeddings = nn.Embedding(type_vocab_size, embed_dim, **factory_kwargs)
Tri Dao's avatar
Tri Dao committed
92
93
94

    def forward(self, input_ids, position_ids=None, token_type_ids=None):
        """
Tri Dao's avatar
Tri Dao committed
95
96
97
        input_ids: (batch, seqlen)
        position_ids: (batch, seqlen)
        token_type_ids: (batch, seqlen)
98
99
        """
        batch_size, seqlen = input_ids.shape
Tri Dao's avatar
Tri Dao committed
100
        embeddings = self.word_embeddings(input_ids)
101
102
        if self.max_position_embeddings > 0:
            if position_ids is None:
Tri Dao's avatar
Tri Dao committed
103
                position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
104
            position_embeddings = self.position_embeddings(position_ids)
Tri Dao's avatar
Tri Dao committed
105
106
107
108
109
110
111
            embeddings = embeddings + position_embeddings
        if self.type_vocab_size > 0:
            if token_type_ids is None:
                token_type_ids = torch.zeros(seqlen, dtype=torch.long, device=input_ids.device)
            token_type_embeddings = self.token_type_embeddings(token_type_ids)
            embeddings = embeddings + token_type_embeddings
        return embeddings
112
113


114
115
116
117
118
119
class VocabParallelEmbedding(nn.Embedding):
    def __init__(self, num_embeddings, *args, process_group=None, padding_idx=None, **kwargs):
        self.process_group = process_group
        if process_group is not None:
            world_size = torch.distributed.get_world_size(process_group)
            if num_embeddings % world_size != 0:
Tri Dao's avatar
Tri Dao committed
120
121
122
123
                raise ValueError(
                    f"num_embeddings ({num_embeddings}) must be divisible by "
                    f"world_size ({world_size})"
                )
124
            if world_size > 1 and padding_idx is not None:
Tri Dao's avatar
Tri Dao committed
125
                raise RuntimeError("ParallelEmbedding does not support padding_idx")
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
        else:
            world_size = 1
        super().__init__(num_embeddings // world_size, *args, padding_idx=padding_idx, **kwargs)

    def forward(self, input: Tensor) -> Tensor:
        if self.process_group is None:
            return super().forward(input)
        else:
            rank = torch.distributed.get_rank(self.process_group)
            vocab_size = self.num_embeddings
            vocab_start_index, vocab_end_index = rank * vocab_size, (rank + 1) * vocab_size
            # Create a mask of valid vocab ids (1 means it needs to be masked).
            input_ids_mask = (input < vocab_start_index) | (input >= vocab_end_index)
            input = input - vocab_start_index
            input[input_ids_mask] = 0
            embeddings = super().forward(input)
            embeddings[input_ids_mask] = 0.0
            return embeddings


class ColumnParallelEmbedding(nn.Embedding):
    def __init__(self, num_embeddings, embedding_dim, *args, process_group=None, **kwargs):
        self.process_group = process_group
        if process_group is not None:
            world_size = torch.distributed.get_world_size(process_group)
            if embedding_dim % world_size != 0:
Tri Dao's avatar
Tri Dao committed
152
153
154
155
                raise ValueError(
                    f"embedding_dim ({embedding_dim}) must be divisible by "
                    f"world_size ({world_size})"
                )
156
157
158
159
160
        else:
            world_size = 1
        super().__init__(num_embeddings, embedding_dim // world_size, *args, **kwargs)


161
class ParallelGPT2Embeddings(nn.Module):
Tri Dao's avatar
Tri Dao committed
162
163
164
165
166
167
168
169
170
171
172
    def __init__(
        self,
        embed_dim,
        vocab_size,
        max_position_embeddings,
        process_group,
        padding_idx=None,
        sequence_parallel=True,
        device=None,
        dtype=None,
    ):
173
        """
Tri Dao's avatar
Tri Dao committed
174
        If max_position_embeddings <= 0, there's no position embeddings
175
        """
Tri Dao's avatar
Tri Dao committed
176
        factory_kwargs = {"device": device, "dtype": dtype}
177
178
        super().__init__()
        self.process_group = process_group
179
        self.sequence_parallel = sequence_parallel
180
        self.word_embeddings = VocabParallelEmbedding(
Tri Dao's avatar
Tri Dao committed
181
182
183
184
185
            vocab_size,
            embed_dim,
            padding_idx=padding_idx,
            process_group=process_group,
            **factory_kwargs,
186
        )
187
188
        self.max_position_embeddings = max_position_embeddings
        if self.max_position_embeddings > 0:
189
190
            self.position_embeddings = ColumnParallelEmbedding(
                max_position_embeddings, embed_dim, process_group=process_group, **factory_kwargs
191
192
193
194
            )

    def forward(self, input_ids, position_ids=None, combine_batch_seqlen_dim=False):
        """
Tri Dao's avatar
Tri Dao committed
195
196
        input_ids: (batch, seqlen)
        position_ids: (batch, seqlen)
197
198
199
        """
        batch_size, seqlen = input_ids.shape
        world_size = torch.distributed.get_world_size(self.process_group)
200
201
202
203
204
205
        embeddings = self.word_embeddings(input_ids)
        if self.max_position_embeddings > 0:
            if position_ids is None:
                position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
            position_embeddings = self.position_embeddings(position_ids)
            if world_size <= 1:
206
                embeddings = embeddings + position_embeddings
207
            else:
208
                partition_dim = self.position_embeddings.embedding_dim
209
                rank = torch.distributed.get_rank(self.process_group)
Tri Dao's avatar
Tri Dao committed
210
211
212
                embeddings[
                    ..., rank * partition_dim : (rank + 1) * partition_dim
                ] += position_embeddings
213
        if combine_batch_seqlen_dim:
Tri Dao's avatar
Tri Dao committed
214
            embeddings = rearrange(embeddings, "b s d -> (b s) d")
215
216
        reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce
        return embeddings if world_size <= 1 else reduce_fn(embeddings, self.process_group)