embed.py 24.1 KB
Newer Older
Jiarui Fang's avatar
Jiarui Fang committed
1
2
3
4
5
6
7
import torch
import torch.nn.init as init
from torch import Tensor
from torch import nn as nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter

8
9
from colossalai.legacy.context import ParallelMode, seed
from colossalai.legacy.core import global_context as gpc
10
11
12
13
from colossalai.legacy.nn.layer.base_layer import ParallelLayer
from colossalai.legacy.nn.layer.parallel_1d._utils import gather_forward_split_backward, reduce_grad, reduce_input
from colossalai.legacy.nn.layer.parallel_1d.layers import Linear1D_Row
from colossalai.legacy.nn.layer.utils import divide
14
from colossalai.legacy.registry import LAYERS, LOSSES
Jiarui Fang's avatar
Jiarui Fang committed
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
from colossalai.utils import get_current_device


class VocabParallelEmbedding(torch.nn.Module):
    """Language model embeddings.

    Arguments:
        hidden_size: hidden size
        vocab_size: vocabulary size
        max_sequence_length: maximum size of sequence. This
                             is used for positional embedding
        embedding_dropout_prob: dropout probability for embeddings
        init_method: weight initialization method
        num_tokentypes: size of the token-type embeddings. 0 value
                        will ignore this embedding
    """

32
33
34
    def __init__(
        self, hidden_size, vocab_size, max_sequence_length, embedding_dropout_prob, num_tokentypes=0, dtype=torch.float
    ):
Jiarui Fang's avatar
Jiarui Fang committed
35
36
37
38
39
40
41
        super(VocabParallelEmbedding, self).__init__()

        self.hidden_size = hidden_size
        self.num_tokentypes = num_tokentypes

        # Word embeddings (parallel).
        self.word_embeddings = VocabParallelEmbedding1D(vocab_size, self.hidden_size, dtype=dtype)
42
        self._word_embeddings_key = "word_embeddings"
Jiarui Fang's avatar
Jiarui Fang committed
43
44
45

        # Position embedding (serial).
        self.position_embeddings = torch.nn.Embedding(max_sequence_length, self.hidden_size, dtype=dtype)
46
        self._position_embeddings_key = "position_embeddings"
Jiarui Fang's avatar
Jiarui Fang committed
47
48
49
50
51
52
53
        # Initialize the position embeddings.
        # self.init_method(self.position_embeddings.weight)

        # Token type embedding.
        # Add this as an optional field that can be added through
        # method call so we can load a pretrain model without
        # token types and add them as needed.
54
        self._tokentype_embeddings_key = "tokentype_embeddings"
Jiarui Fang's avatar
Jiarui Fang committed
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
        if self.num_tokentypes > 0:
            self.tokentype_embeddings = torch.nn.Embedding(self.num_tokentypes, self.hidden_size, dtype=dtype)
            # Initialize the token-type embeddings.
            # self.init_method(self.tokentype_embeddings.weight)
        else:
            self.tokentype_embeddings = None

        # Embeddings dropout
        self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)

    def zero_parameters(self):
        """Zero out all parameters in embedding."""
        self.word_embeddings.weight.data.fill_(0)
        self.word_embeddings.weight.shared = True
        self.position_embeddings.weight.data.fill_(0)
        self.position_embeddings.weight.shared = True
        if self.num_tokentypes > 0:
            self.tokentype_embeddings.weight.data.fill_(0)
            self.tokentype_embeddings.weight.shared = True

    def add_tokentype_embeddings(self, num_tokentypes):
        """Add token-type embedding. This function is provided so we can add
        token-type embeddings in case the pretrained model does not have it.
        This allows us to load the model normally and then add this embedding.
        """
        if self.tokentype_embeddings is not None:
81
            raise Exception("tokentype embeddings is already initialized")
Jiarui Fang's avatar
Jiarui Fang committed
82
        if torch.distributed.get_rank() == 0:
83
            print("adding embedding for {} tokentypes".format(num_tokentypes), flush=True)
Jiarui Fang's avatar
Jiarui Fang committed
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
        self.num_tokentypes = num_tokentypes
        self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes, self.hidden_size)
        # Initialize the token-type embeddings.
        # self.init_method(self.tokentype_embeddings.weight)

    def forward(self, input_ids, position_ids=None, tokentype_ids=None):
        # Embeddings.
        if input_ids is not None:
            input_shape = input_ids.size()
            input_ids = input_ids.view(-1, input_shape[-1])
        words_embeddings = self.word_embeddings(input_ids)

        if position_ids is not None:
            position_ids = position_ids.view(-1, input_shape[-1])
        if position_ids is None:
            position_ids = torch.arange(0, input_shape[-1] + 0, dtype=torch.long, device=get_current_device())
            position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
        position_embeddings = self.position_embeddings(position_ids)

        embeddings = words_embeddings + position_embeddings

        # Dropout.
        with seed(ParallelMode.TENSOR):
            embeddings = self.embedding_dropout(embeddings)
        return embeddings

110
    def state_dict_for_save_checkpoint(self, destination=None, prefix="", keep_vars=False):
Jiarui Fang's avatar
Jiarui Fang committed
111
112
113
        """For easy load."""

        state_dict_ = {}
114
115
        state_dict_[self._word_embeddings_key] = self.word_embeddings.state_dict(destination, prefix, keep_vars)
        state_dict_[self._position_embeddings_key] = self.position_embeddings.state_dict(destination, prefix, keep_vars)
Jiarui Fang's avatar
Jiarui Fang committed
116
        if self.num_tokentypes > 0:
117
118
119
            state_dict_[self._tokentype_embeddings_key] = self.tokentype_embeddings.state_dict(
                destination, prefix, keep_vars
            )
Jiarui Fang's avatar
Jiarui Fang committed
120
121
122
123
124
125
126
127
128
129
130
131
132

        return state_dict_

    def load_state_dict(self, state_dict, strict=True):
        """Customized load."""

        # Word embedding.
        if self._word_embeddings_key in state_dict:
            state_dict_ = state_dict[self._word_embeddings_key]
        else:
            # for backward compatibility.
            state_dict_ = {}
            for key in state_dict.keys():
133
134
                if "word_embeddings" in key:
                    state_dict_[key.split("word_embeddings.")[1]] = state_dict[key]
Jiarui Fang's avatar
Jiarui Fang committed
135
136
137
138
139
140
141
142
143
        self.word_embeddings.load_state_dict(state_dict_, strict=strict)

        # Position embedding.
        if self._position_embeddings_key in state_dict:
            state_dict_ = state_dict[self._position_embeddings_key]
        else:
            # for backward compatibility.
            state_dict_ = {}
            for key in state_dict.keys():
144
145
                if "position_embeddings" in key:
                    state_dict_[key.split("position_embeddings.")[1]] = state_dict[key]
Jiarui Fang's avatar
Jiarui Fang committed
146
147
148
149
150
151
152
153
154
155
        self.position_embeddings.load_state_dict(state_dict_, strict=strict)

        # Tokentype embedding.
        if self.num_tokentypes > 0:
            state_dict_ = {}
            if self._tokentype_embeddings_key in state_dict:
                state_dict_ = state_dict[self._tokentype_embeddings_key]
            else:
                # for backward compatibility.
                for key in state_dict.keys():
156
157
                    if "tokentype_embeddings" in key:
                        state_dict_[key.split("tokentype_embeddings.")[1]] = state_dict[key]
Jiarui Fang's avatar
Jiarui Fang committed
158
159
160
            if len(state_dict_.keys()) > 0:
                self.tokentype_embeddings.load_state_dict(state_dict_, strict=strict)
            else:
161
162
163
                print(
                    "***WARNING*** expected tokentype embeddings in the " "checkpoint but could not find it", flush=True
                )
Jiarui Fang's avatar
Jiarui Fang committed
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184


class VocabParallelEmbedding1D(torch.nn.Module):
    """Embedding parallelized in the vocabulary dimension.

    This is mainly adapted from torch.nn.Embedding and all the default
    values are kept.
    Arguments:
        num_embeddings: vocabulary size.
        embedding_dim: size of hidden state.
        init_method: method to initialize weights.
    """

    def __init__(self, num_embeddings, embedding_dim, dtype=None, init_method=None):
        super(VocabParallelEmbedding1D, self).__init__()
        # Keep the input dimensions.
        self.num_embeddings = num_embeddings
        self.embedding_dim = embedding_dim
        # Set the details for compatibility.
        self.padding_idx = None
        self.max_norm = None
185
        self.norm_type = 2.0
Jiarui Fang's avatar
Jiarui Fang committed
186
187
188
189
190
        self.scale_grad_by_freq = False
        self.sparse = False
        self._weight = None
        self.tensor_model_parallel_size = gpc.tensor_parallel_size
        # Divide the weight matrix along the vocabulary dimension.
191
192
193
194
        self.vocab_start_index, self.vocab_end_index = VocabUtility.vocab_range_from_global_vocab_size(
            self.num_embeddings, gpc.get_local_rank(ParallelMode.PARALLEL_1D), self.tensor_model_parallel_size
        )
        self.num_embeddings_per_partition = self.vocab_end_index - self.vocab_start_index
Jiarui Fang's avatar
Jiarui Fang committed
195
196

        # Allocate weights and initialize.
197
        factory_kwargs = {"device": get_current_device(), "dtype": dtype}
Jiarui Fang's avatar
Jiarui Fang committed
198
199
200
201
202
203
        self.weight = Parameter(torch.empty(self.num_embeddings_per_partition, self.embedding_dim, **factory_kwargs))
        init.uniform_(self.weight, -1, 1)

    def forward(self, input_):
        if self.tensor_model_parallel_size > 1:
            # Build the mask.
204
            input_mask = (input_ < self.vocab_start_index) | (input_ >= self.vocab_end_index)
Jiarui Fang's avatar
Jiarui Fang committed
205
206
207
208
209
210
            # Mask the input.
            masked_input = input_.clone() - self.vocab_start_index
            masked_input[input_mask] = 0
        else:
            masked_input = input_
            # Get the embeddings.
211
212
213
214
215
216
217
218
219
        output_parallel = F.embedding(
            masked_input,
            self.weight,
            self.padding_idx,
            self.max_norm,
            self.norm_type,
            self.scale_grad_by_freq,
            self.sparse,
        )
Jiarui Fang's avatar
Jiarui Fang committed
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
        # Mask the output embedding.
        if self.tensor_model_parallel_size > 1:
            output_parallel[input_mask, :] = 0.0
        # Reduce across all the model parallel GPUs.
        output = output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
        return output


@LOSSES.register_module
class vocab_parallel_cross_entropy(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, vocab_parallel_logits, target):
        """Helper function for the cross entropy."""
        vocab_parallel_logits = vocab_parallel_logits[..., :-1, :].contiguous()
        target = target[..., 1:].contiguous()
237
238
239
        return _VocabParallelCrossEntropy.apply(
            vocab_parallel_logits.view(-1, vocab_parallel_logits.size(-1)), target.view(-1)
        )
Jiarui Fang's avatar
Jiarui Fang committed
240
241
242
243
244
245
246


class _VocabParallelCrossEntropy(torch.autograd.Function):
    @staticmethod
    def forward(ctx, vocab_parallel_logits, target):
        # Maximum value along vocab dimension across all GPUs.
        logits_max = torch.max(vocab_parallel_logits, dim=-1)[0]
247
248
249
        torch.distributed.all_reduce(
            logits_max, op=torch.distributed.ReduceOp.MAX, group=gpc.get_group(ParallelMode.PARALLEL_1D)
        )
Jiarui Fang's avatar
Jiarui Fang committed
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
        # Subtract the maximum value.
        vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1))

        # Get the partition's vocab indices
        get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
        partition_vocab_size = vocab_parallel_logits.size()[-1]
        rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
        world_size = gpc.tensor_parallel_size
        vocab_start_index, vocab_end_index = get_vocab_range(partition_vocab_size, rank, world_size)

        # Create a mask of valid vocab ids (1 means it needs to be masked).
        target_mask = (target < vocab_start_index) | (target >= vocab_end_index)
        masked_target = target.clone() - vocab_start_index
        masked_target[target_mask] = 0

        # Get predicted-logits = logits[target].
        # For Simplicity, we convert logits to a 2-D tensor with size
        # [*, partition-vocab-size] and target to a 1-D tensor of size [*].
        logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size)
        masked_target_1d = masked_target.view(-1)
        arange_1d = torch.arange(start=0, end=logits_2d.size()[0], device=logits_2d.device)
        predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
        predicted_logits_1d = predicted_logits_1d.clone().contiguous()
        predicted_logits = predicted_logits_1d.view_as(target)
        predicted_logits[target_mask] = 0.0
        # All reduce is needed to get the chunks from other GPUs.
276
277
278
        torch.distributed.all_reduce(
            predicted_logits, op=torch.distributed.ReduceOp.SUM, group=gpc.get_group(ParallelMode.PARALLEL_1D)
        )
Jiarui Fang's avatar
Jiarui Fang committed
279
280
281
282
283

        # Sum of exponential of logits along vocab dimension across all GPUs.
        exp_logits = vocab_parallel_logits
        torch.exp(vocab_parallel_logits, out=exp_logits)
        sum_exp_logits = exp_logits.sum(dim=-1)
284
285
286
        torch.distributed.all_reduce(
            sum_exp_logits, op=torch.distributed.ReduceOp.SUM, group=gpc.get_group(ParallelMode.PARALLEL_1D)
        )
Jiarui Fang's avatar
Jiarui Fang committed
287
288
289
290
291
292
293
294
295
296
297

        # Loss = log(sum(exp(logits))) - predicted-logit.
        loss = torch.log(sum_exp_logits) - predicted_logits
        loss = loss.mean()
        # Store softmax, target-mask and masked-target for backward pass.
        exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
        ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
        return loss

    @staticmethod
    def backward(ctx, grad_output):
digger yu's avatar
digger yu committed
298
        # Retrieve tensors from the forward path.
Jiarui Fang's avatar
Jiarui Fang committed
299
300
301
302
303
304
305
306
307
308
        softmax, target_mask, masked_target_1d = ctx.saved_tensors

        # All the inputs have softmax as their gradient.
        grad_input = softmax
        # For simplicity, work with the 2D gradient.
        partition_vocab_size = softmax.size()[-1]
        grad_2d = grad_input.view(-1, partition_vocab_size)

        # Add the gradient from matching classes.
        arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=grad_2d.device)
309
        grad_2d[arange_1d, masked_target_1d] -= 1.0 - target_mask.view(-1).float()
Jiarui Fang's avatar
Jiarui Fang committed
310
311
312
313
314
315
316
317
318

        # Finally elementwise multiplication with the output gradients.
        grad_input.mul_(grad_output.unsqueeze(dim=-1))

        return grad_input, None


class VocabUtility:
    """Split the vocabulary into `world_size` chunks amd return the
319
320
    first and last index of the vocabulary belonging to the `rank`
    partition: Note that indices in [fist, last)"""
Jiarui Fang's avatar
Jiarui Fang committed
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385

    @staticmethod
    def vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank, world_size):
        index_f = rank * per_partition_vocab_size
        index_l = index_f + per_partition_vocab_size
        return index_f, index_l

    @staticmethod
    def vocab_range_from_global_vocab_size(global_vocab_size, rank, world_size):
        per_partition_vocab_size = divide(global_vocab_size, world_size)
        return VocabUtility.vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank, world_size)


class VocabParallelGPTLMHead1D(ParallelLayer):
    """
    Language model head that shares the same parameters with the embedding matrix.
    """

    def __init__(self, embed=None, vocab_size=None, dtype=None, embed_dim=None):
        super().__init__()
        if embed is not None:
            self.head = embed
        else:
            self.head = VocabParallelEmbedding1D(vocab_size, embed_dim, dtype=dtype)

    def forward(self, x: Tensor) -> Tensor:
        x = reduce_grad(x, ParallelMode.PARALLEL_1D)
        x = F.linear(x, self.head.weight)
        return x


###################################


class HiddenParallelEmbedding(torch.nn.Module):
    """Language model embeddings.

    Arguments:
        hidden_size: hidden size
        vocab_size: vocabulary size
        max_sequence_length: maximum size of sequence. This
                             is used for positional embedding
        embedding_dropout_prob: dropout probability for embeddings
        init_method: weight initialization method
        num_tokentypes: size of the token-type embeddings. 0 value
                        will ignore this embedding
    """

    def __init__(
        self,
        hidden_size,
        vocab_size,
        max_sequence_length,
        embedding_dropout_prob,
        dtype=torch.float,
        padding_idx: int = 0,
        num_tokentypes=0,
    ):
        super(HiddenParallelEmbedding, self).__init__()

        self.hidden_size = hidden_size
        self.num_tokentypes = num_tokentypes

        # Word embeddings (parallel).
        self.word_embeddings = HiddenParallelEmbedding1D(vocab_size, hidden_size, dtype, padding_idx)
386
        self._word_embeddings_key = "word_embeddings"
Jiarui Fang's avatar
Jiarui Fang committed
387
388
389

        # Position embedding (serial).
        self.position_embeddings = torch.nn.Embedding(max_sequence_length, self.hidden_size)
390
        self._position_embeddings_key = "position_embeddings"
Jiarui Fang's avatar
Jiarui Fang committed
391
392
393
394
395
396
397
        # Initialize the position embeddings.
        # self.init_method(self.position_embeddings.weight)

        # Token type embedding.
        # Add this as an optional field that can be added through
        # method call so we can load a pretrain model without
        # token types and add them as needed.
398
        self._tokentype_embeddings_key = "tokentype_embeddings"
Jiarui Fang's avatar
Jiarui Fang committed
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
        if self.num_tokentypes > 0:
            self.tokentype_embeddings = torch.nn.Embedding(self.num_tokentypes, self.hidden_size)
            # Initialize the token-type embeddings.
            # self.init_method(self.tokentype_embeddings.weight)
        else:
            self.tokentype_embeddings = None

        # Embeddings dropout
        self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)

    def zero_parameters(self):
        """Zero out all parameters in embedding."""
        self.word_embeddings.weight.data.fill_(0)
        self.word_embeddings.weight.shared = True
        self.position_embeddings.weight.data.fill_(0)
        self.position_embeddings.weight.shared = True
        if self.num_tokentypes > 0:
            self.tokentype_embeddings.weight.data.fill_(0)
            self.tokentype_embeddings.weight.shared = True

    def add_tokentype_embeddings(self, num_tokentypes):
        """Add token-type embedding. This function is provided so we can add
        token-type embeddings in case the pretrained model does not have it.
        This allows us to load the model normally and then add this embedding.
        """
        if self.tokentype_embeddings is not None:
425
            raise Exception("tokentype embeddings is already initialized")
Jiarui Fang's avatar
Jiarui Fang committed
426
        if torch.distributed.get_rank() == 0:
427
            print("adding embedding for {} tokentypes".format(num_tokentypes), flush=True)
Jiarui Fang's avatar
Jiarui Fang committed
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
        self.num_tokentypes = num_tokentypes
        self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes, self.hidden_size)
        # Initialize the token-type embeddings.
        # self.init_method(self.tokentype_embeddings.weight)

    def forward(self, input_ids, position_ids=None, tokentype_ids=None):
        if input_ids is not None:
            input_shape = input_ids.size()
            input_ids = input_ids.view(-1, input_shape[-1])
        words_embeddings = self.word_embeddings(input_ids)

        if position_ids is not None:
            position_ids = position_ids.view(-1, input_shape[-1])
        if position_ids is None:
            position_ids = torch.arange(0, input_shape[-1] + 0, dtype=torch.long, device=get_current_device())
            position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
        position_embeddings = self.position_embeddings(position_ids)

        embeddings = words_embeddings + position_embeddings

        # Dropout.
        with seed(ParallelMode.TENSOR):
            embeddings = self.embedding_dropout(embeddings)
        return embeddings

453
    def state_dict_for_save_checkpoint(self, destination=None, prefix="", keep_vars=False):
Jiarui Fang's avatar
Jiarui Fang committed
454
455
456
        """For easy load."""

        state_dict_ = {}
457
458
        state_dict_[self._word_embeddings_key] = self.word_embeddings.state_dict(destination, prefix, keep_vars)
        state_dict_[self._position_embeddings_key] = self.position_embeddings.state_dict(destination, prefix, keep_vars)
Jiarui Fang's avatar
Jiarui Fang committed
459
        if self.num_tokentypes > 0:
460
461
462
            state_dict_[self._tokentype_embeddings_key] = self.tokentype_embeddings.state_dict(
                destination, prefix, keep_vars
            )
Jiarui Fang's avatar
Jiarui Fang committed
463
464
465
466
467
468
469
470
471
472
473
474
475

        return state_dict_

    def load_state_dict(self, state_dict, strict=True):
        """Customized load."""

        # Word embedding.
        if self._word_embeddings_key in state_dict:
            state_dict_ = state_dict[self._word_embeddings_key]
        else:
            # for backward compatibility.
            state_dict_ = {}
            for key in state_dict.keys():
476
477
                if "word_embeddings" in key:
                    state_dict_[key.split("word_embeddings.")[1]] = state_dict[key]
Jiarui Fang's avatar
Jiarui Fang committed
478
479
480
481
482
483
484
485
486
        self.word_embeddings.load_state_dict(state_dict_, strict=strict)

        # Position embedding.
        if self._position_embeddings_key in state_dict:
            state_dict_ = state_dict[self._position_embeddings_key]
        else:
            # for backward compatibility.
            state_dict_ = {}
            for key in state_dict.keys():
487
488
                if "position_embeddings" in key:
                    state_dict_[key.split("position_embeddings.")[1]] = state_dict[key]
Jiarui Fang's avatar
Jiarui Fang committed
489
490
491
492
493
494
495
496
497
498
        self.position_embeddings.load_state_dict(state_dict_, strict=strict)

        # Tokentype embedding.
        if self.num_tokentypes > 0:
            state_dict_ = {}
            if self._tokentype_embeddings_key in state_dict:
                state_dict_ = state_dict[self._tokentype_embeddings_key]
            else:
                # for backward compatibility.
                for key in state_dict.keys():
499
500
                    if "tokentype_embeddings" in key:
                        state_dict_[key.split("tokentype_embeddings.")[1]] = state_dict[key]
Jiarui Fang's avatar
Jiarui Fang committed
501
502
503
            if len(state_dict_.keys()) > 0:
                self.tokentype_embeddings.load_state_dict(state_dict_, strict=strict)
            else:
504
505
506
                print(
                    "***WARNING*** expected tokentype embeddings in the " "checkpoint but could not find it", flush=True
                )
Jiarui Fang's avatar
Jiarui Fang committed
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528


class HiddenParallelEmbedding1D(torch.nn.Module):
    """Embedding parallelized in the vocabulary dimension.

    This is mainly adapted from torch.nn.Embedding and all the default
    values are kept.
    Arguments:
        num_embeddings: vocabulary size.
        embedding_dim: size of hidden state.
        init_method: method to initialize weights.
    """

    def __init__(self, num_embeddings, embedding_dim, dtype=torch.float, padding_idx: int = None, init_method=None):
        super(HiddenParallelEmbedding1D, self).__init__()
        # Keep the input dimensions.
        self.num_embeddings = num_embeddings
        self.embedding_dim = embedding_dim
        embed_dim_per_partition = divide(embedding_dim, gpc.tensor_parallel_size)
        # Set the details for compatibility.
        self.padding_idx = padding_idx
        self.max_norm = None
529
        self.norm_type = 2.0
Jiarui Fang's avatar
Jiarui Fang committed
530
531
532
533
534
        self.scale_grad_by_freq = False
        self.sparse = False
        self._weight = None

        # Allocate weights and initialize.
535
        factory_kwargs = {"device": get_current_device(), "dtype": dtype}
Jiarui Fang's avatar
Jiarui Fang committed
536
537
538
539
540
        self.weight = Parameter(torch.empty(num_embeddings, embed_dim_per_partition, **factory_kwargs))
        init.uniform_(self.weight, -1, 1)

    def forward(self, input_):
        # Get the embeddings.
541
542
543
        output_parallel = F.embedding(
            input_, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse
        )
Jiarui Fang's avatar
Jiarui Fang committed
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570

        # Reduce across all the model parallel GPUs.
        output = gather_forward_split_backward(output_parallel, ParallelMode.PARALLEL_1D, dim=-1)
        return output


@LAYERS.register_module
class HiddenParallelGPTLMHead1D(ParallelLayer):
    """
    Language model head that shares the same parameters with the embedding matrix.
    """

    def __init__(
        self,
        embed=None,
        embed_dim=None,
        vocab_size=None,
        dtype=None,
    ):
        super().__init__()
        if embed is not None:
            self.head = embed
            self.synced_embed = True
        else:
            # self.embedding = HiddenParallelEmbedding1D(vocab_size, hidden_size, dtype, padding_idx)
            # (hidden_size/q, vocab_size)
            self.synced_embed = False
571
572
573
            self.head = Linear1D_Row(
                in_features=embed_dim, out_features=vocab_size, bias=False, dtype=dtype, parallel_input=False
            )
Jiarui Fang's avatar
Jiarui Fang committed
574
575
576
577
578
579
580
581

    def forward(self, x: Tensor) -> Tensor:
        if self.synced_embed:
            x = F.linear(x, self.head.weight)
        else:
            x = self.head(x)

        return x