"tests/test_class_sh_module_local.py" did not exist on "ce626e09f55678278a8e359621f8d97b3bc5991e"
module.py 56.2 KB
Newer Older
1
# Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
4
5
6
7
8
#
# See LICENSE for license information.
"""
Wrapper module for Transformer related layers with FP8 support.
"""
import functools
import operator
9
import warnings
10
11
12
13
14
15
16
17
18
from typing import Any, Callable, Iterable, List, Sequence, Tuple, Union

import jax.numpy as jnp
import numpy as np
from flax import linen as nn
from flax.linen import partitioning as nn_partitioning
from jax import lax
from jax import nn as jax_nn
from jax import random as jax_random
19
from jax.ad_checkpoint import checkpoint_name
20

21
22
from ..dot import type_safe_dot_general
from ..fp8 import FP8Helper, FP8MetaPackage
23
24
from ..layernorm import canonicalize_layernorm_type
from ..layernorm import layernorm, layernorm_fp8_dot
25
from ..mlp import fused_layernorm_fp8_mlp, activation_lu
26
27
from ..softmax import is_softmax_kernel_available
from ..softmax import softmax, SoftmaxType
28
from ..sharding import with_sharding_constraint_by_logical_axes
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49

PRNGKey = Any
Shape = Tuple[int, ...]
DType = jnp.dtype
Array = jnp.ndarray
PrecisionLike = Union[None, str, lax.Precision, Tuple[str, str], Tuple[lax.Precision,
                                                                       lax.Precision]]
Initializer = Callable[[PRNGKey, Shape, DType], Array]


def _normalize_axes(axes: Iterable[int], ndim: int) -> Tuple[int]:
    # A tuple by convention. len(axes_tuple) then also gives the rank efficiently.
    return tuple(ax if ax >= 0 else ndim + ax for ax in axes)


def _canonicalize_tuple(x):
    if isinstance(x, Iterable):
        return tuple(x)
    return (x,)


50
51
52
53
54
55
56
def _obtain_default_layernorm_scale_init_if_need(original_init, zero_centered_gamma):
    if original_init is None:
        if not zero_centered_gamma:
            return nn.initializers.ones
    return nn.initializers.zeros


57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
def _create_layernorm_parameters(layernorm_type, shape, scale_init, scale_axes, bias_init,
                                 bias_axes, dtype):
    scale = nn_partitioning.param_with_axes('scale',
                                            scale_init,
                                            shape,
                                            jnp.float32,
                                            axes=scale_axes)
    scale = jnp.asarray(scale, dtype)

    layernorm_type = canonicalize_layernorm_type(layernorm_type)
    if layernorm_type == 'layernorm':
        bias = nn_partitioning.param_with_axes('ln_bias',
                                               bias_init,
                                               shape,
                                               jnp.float32,
                                               axes=bias_axes)
        bias = jnp.asarray(bias, dtype)
    else:
        assert layernorm_type == 'rmsnorm'
        bias = None

    return scale, bias


def _convert_to_activation_function(fn_or_string: Union[str, Callable]) -> Callable:
    """Convert a string to an activation function."""
    if fn_or_string == 'linear':
        return lambda x: x
    if isinstance(fn_or_string, str):
        return getattr(nn, fn_or_string)
    if callable(fn_or_string):
        return fn_or_string

    raise ValueError(f"don't know how to convert {fn_or_string} to an activation function")


def _combine_biases(*masks: List[Array]):
    """Combine attention biases."""
    masks = [m for m in masks if m is not None]
    if not masks:
        return None
    assert all(map(lambda x: x.ndim == masks[0].ndim,
                   masks)), (f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks))}')
    mask, *other_masks = masks
    for other_mask in other_masks:
        mask = mask + other_mask
    return mask


106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
def _apply_low_rank_adaptation(x, axis, features, lora_a_kernel, lora_b_kernel, alpha):
    """Low Rank Adaptation Implementation"""

    assert len(axis) <= 5
    hidden_in_names = 'ijklm'[:len(axis)]
    assert len(features) <= 5
    hidden_out_names = 'nopqr'[:len(features)]
    rank_name = 's'

    assert lora_a_kernel.shape[-1] == lora_b_kernel.shape[-2]
    rank = lora_a_kernel.shape[-1]
    scaling = alpha / rank if alpha is not None else 1.0

    x_einsum_express = f"...{hidden_in_names}"
    lora_a_einsum_express = f"{hidden_in_names}{hidden_out_names[:-1]}{rank_name}"
    lora_b_einsum_express = f"{hidden_out_names[:-1]}{rank_name}{hidden_out_names[-1]}"
    output_einsum_express = f"...{hidden_out_names}"
    final_einsum_express = f"{x_einsum_express},{lora_a_einsum_express},{lora_b_einsum_express}" \
                           f"->{output_einsum_express}"

    output = jnp.einsum(final_einsum_express, x, lora_a_kernel, lora_b_kernel)
    output = output * scaling
    return output


131
class Softmax(nn.Module):    # pylint: disable=too-few-public-methods
132
133
    r"""
    Applies softmax over a mini-batch of inputs.
134
135
136
137
138
139
140
    The input's shape should be [batch, heads, q_seqlen, k_seqlen].

    .. code-block:: python
        shifted_input = input + bias
        masked_scaled = (1 - mask)*(shifted_input * scale_factor)
        softmax_mask = mask * -1e-10
        output = softmax(masked_scaled + softmax_mask)
141
142
143
144

    Parameters
    ----------
    scale_factor : float, default = 1.0
145
146
147
        Scalar for the input to softmax.
    softmax_type : SoftmaxType, default = SoftmaxType.SCALED
        Indicate the type of softmax.
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
    """

    scale_factor: float = 1.0
    softmax_type: SoftmaxType = SoftmaxType.SCALED

    @nn.compact
    def __call__(self, inputs: Array, mask: Array = None, bias: Array = None) -> jnp.ndarray:
        batch = inputs.shape[0]
        heads = inputs.shape[1]
        q_seqlen = inputs.shape[2]
        k_seqlen = inputs.shape[3]
        dtype = inputs.dtype
        logits = inputs

        if (self.softmax_type is not SoftmaxType.SCALED and is_softmax_kernel_available(
                self.softmax_type, batch, heads, q_seqlen, k_seqlen, inputs.dtype)):

            if bias is not None:
                logits = logits + bias.astype(dtype)

            mask_ = mask
            if self.softmax_type is not SoftmaxType.SCALED_MASKED:
                mask_ = None

172
            outputs = softmax(logits, mask_, self.scale_factor, self.softmax_type)
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
        else:
            attention_bias = None
            if mask is not None:
                attention_bias = lax.select(mask > 0,
                                            jnp.full(mask.shape, -1e10).astype(dtype),
                                            jnp.full(mask.shape, 0.).astype(dtype))

            if bias is not None:
                attention_bias = _combine_biases(attention_bias, bias)

            if attention_bias is not None:
                logits = logits + attention_bias.astype(dtype)

            # For the case that self.softmax == SoftmaxType.SCALED_UPPER_TRIANG_MASKED
            # and kernel is unavailable, then try on pure scaled softmax custom calls.
            if is_softmax_kernel_available(SoftmaxType.SCALED, batch, heads, q_seqlen, k_seqlen,
                                           dtype):
190
                outputs = softmax(logits, None, self.scale_factor, SoftmaxType.SCALED)
191
            else:
192
                outputs = jax_nn.softmax(logits * self.scale_factor)
193
194
195
196

        return outputs


197
class LayerNorm(nn.Module):    # pylint: disable=too-few-public-methods
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
    r"""
    Applies layer normalization over a mini-batch of inputs.
    There are two types of normalization supported by this module,
    regular and root mean square layer Normalization.

    The regular layer normalization is as described in
    the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`__

    .. math::
        y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta

    :math:`\gamma` and :math:`\beta` are learnable affine transform parameters of
    size of each input sample.

    The root mean square layer normalization (RMSNorm) is as described in
    the paper `Root Mean Square Layer Normalization <https://arxiv.org/abs/1910.07467>`__

    .. math::
        y = \frac{x}{ \mathrm{RMS}[x] + \epsilon} * \gamma

    .. math::
        RMS = \sqrt{\mathrm{E}[x^2]}

    :math:`\gamma` is learnable affine transform parameters of
    size of each input sample.

    Parameters
    ----------
    epsilon : float, default = 1e-6
227
        A value added to the denominator of layer normalization for numerical stability.
228
    layernorm_type : {'layernorm', 'rmsnorm'}, default = 'layernorm'
229
        Indicate the type of layer normalization.
230
231
232
233
234
235
236
237
238
239
    zero_centered_gamma : bool, default = False
        If set to `True`, the LayerNorm formula changes to

        .. math::
            y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} *
            (1 + \gamma) + \beta

        This parameter is only applicable for 'layernorm'.
        The default of `scale_init` will also be changed. See `scale_init`.
    scale_init : Initializer, default = None
240
        Used for initializing scale factors :math:`\gamma`.
241
242
243
        If `None` is provided, scale_init is set according to the value of zero_centered_gamma.
        If zero_centered_gamma is set to `True`, then scale_init is `flax.linen.initializers.zeros`.
        Otherwise, scale_init is `flax.linen.initializers.ones`.
244
        It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
245
    scale_axes : Tuple[str, ...], default = ('embed', )
246
        The name of axes used to shard the scale factors :math:`\gamma` with a corresponding mesh.
247
    bias_init : Initializer, default = flax.linen.initializers.zeros
248
249
250
        Used for initializing shift factors :math:`\beta`,
        only used when :attr:`layernorm_type='layernorm'`.
        It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
251
252
    bias_axes : Tuple[str, ...], default = ('embed', )
        The name of axes used to shard the shift factors :math:`\beta` with a corresponding mesh.
253
        only used when :attr:`layernorm_type='layernorm'`.
254
255
256
257
258

    Optimization parameters
    -----------------------
    dtype : jax.numpy.dtype, default  = jax.numpy.float32
        the data type used to allocate the initial parameters.
Jeng Bai-Cheng's avatar
Jeng Bai-Cheng committed
259
    transpose_batch_sequence : bool, default = False
260
261
        Indicate whether the input tensors were switched axis of batch
        and sequence length dimension. If set to True, the input tensors
262
263
264
265
        should be in (seqlen, batch, hidden), otherwise (batch, seqlen, hidden).
    """
    epsilon: float = 1e-6
    layernorm_type: str = 'layernorm'
266
267
    zero_centered_gamma: bool = False
    scale_init: Initializer = None
268
269
270
271
    scale_axes: Tuple[str, ...] = ('embed',)
    bias_init: Initializer = nn.initializers.zeros
    bias_axes: Tuple[str, ...] = ('embed',)
    dtype: DType = jnp.float32
Jeng Bai-Cheng's avatar
Jeng Bai-Cheng committed
272
    transpose_batch_sequence: bool = False
273
    sharding_type = None
274

275
    def __post_init__(self):
276
277
        self.scale_init = _obtain_default_layernorm_scale_init_if_need(
            self.scale_init, self.zero_centered_gamma)
278
279
        super().__post_init__()

280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
    @nn.compact
    def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
        """
        Applies layer normalization to the input :attr:`inputs`.

        Parameters
        ----------
        inputs : jax.numpy.ndarray
            Input tensors.

        Returns
        -------
        outputs : jax.numpy.ndarray
            Output tensors.
        """
295
296
        warnings.warn("sharding_type of LayerNorm would be removed in the near feature",
                      DeprecationWarning)
297
298
299
300
301
302
303
304

        features = x.shape[-1]
        scale, ln_bias = _create_layernorm_parameters(self.layernorm_type, (features,),
                                                      self.scale_init, self.scale_axes,
                                                      self.bias_init, self.bias_axes, self.dtype)
        return layernorm(x,
                         scale,
                         ln_bias,
305
306
                         layernorm_type=self.layernorm_type,
                         zero_centered_gamma=self.zero_centered_gamma,
307
                         epsilon=self.epsilon)
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347


class TransformerEngineBase(nn.Module):
    """
    Base class of transformer engine
    """

    @staticmethod
    def get_fp8_metas(num_of_gemm: int) -> List[jnp.ndarray]:
        """
        Get the FP8 metas
        """
        num_of_meta = num_of_gemm * FP8Helper.NUM_META_PER_GEMM
        axes = ('fp8_meta_axis', 'fp8_meta_history')

        fp8_max = nn_partitioning.variable_with_axes(FP8Helper.FP8_COLLECTION_NAME,
                                                     FP8Helper.FP8_MAX_NAME,
                                                     FP8Helper.generate_fp8_max_array,
                                                     num_of_meta,
                                                     axes=axes)
        fp8_metas_amax = nn_partitioning.variable_with_axes(
            FP8Helper.FP8_COLLECTION_NAME,
            FP8Helper.FP8_AMAX_NAME,
            jnp.zeros, (num_of_meta, FP8Helper.AMAX_HISTORY_LEN),
            jnp.float32,
            axes=axes)
        fp8_metas_scale = nn_partitioning.variable_with_axes(FP8Helper.FP8_COLLECTION_NAME,
                                                             FP8Helper.FP8_SCALE_NAME,
                                                             jnp.ones, (num_of_meta, 1),
                                                             jnp.float32,
                                                             axes=axes)
        fp8_metas_scale_inv = nn_partitioning.variable_with_axes(FP8Helper.FP8_COLLECTION_NAME,
                                                                 FP8Helper.FP8_SCALE_INV_NAME,
                                                                 jnp.ones, (num_of_meta, 1),
                                                                 jnp.float32,
                                                                 axes=axes)

        return fp8_max.value, fp8_metas_amax.value, fp8_metas_scale.value, fp8_metas_scale_inv.value

    @staticmethod
348
    def get_fp8_meta_package(num_of_gemm: int) -> FP8MetaPackage:
349
350
351
352
353
354
        """
        Get the FP8 metas
        """
        fp8_max, fp8_metas_amax, fp8_metas_scale, fp8_metas_scale_inv = \
            TransformerEngineBase.get_fp8_metas(num_of_gemm)

355
356
        return FP8MetaPackage(num_of_gemm, fp8_max, fp8_metas_amax, fp8_metas_scale,
                              fp8_metas_scale_inv)
357
358
359
360
361
362
363
364
365


class DenseGeneral(TransformerEngineBase):
    """
    Applies a linear transformation to the incoming data :math:`y = xA^T + b`

    Parameters
    ----------
    features : Union[Iterable[int], int]
366
        The hidden size of each output sample.
367
368
    kernel_init : Initializer, default =
        flax.linen.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
369
370
        Used for initializing weights.
        It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
371
    kernel_axes : Tuple[str, ...], default = ()
372
        The name of axes used to shard the weights with a corresponding mesh.
373
    use_bias: bool, default = False
374
375
        Indicate whether to enable bias shifting.
        If set to False, the layer will not learn an additive bias.
376
    bias_init: Initializer, default = flax.linen.initializers.zeros
377
378
        Used for initializing bias, only used when :attr:`use_bias=True`.
        It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
379
    bias_axes: Tuple[str, ...], default = ()
380
381
        The name of axes used to shard bias with a corresponding mesh,
        only used when :attr:`use_bias=True`.
382
383
384
385
386
387
388
389
    enable_low_rank_adaptation: bool, default = False
        Indicate whether to enable low rank adaptation for each linear layer.
    low_rank_adaptation_dim: int, default = 32
        The dimension for low rank adaptation, only used when
        :attr:`enable_low_rank_adaptation=True`
    low_rank_adaptation_alpha: float, default = None
        The alpha for computing the scaling factor of LoRA output.
        :math:`\frac{alpha}{rank} * lora_output`. None means no scaling.
390
    axis:  Union[Iterable[int], int], default = -1
391
        An integer tuple with axes to apply the transformation on.
392
393
394
395

    Optimization parameters
    -----------------------
    dtype : jax.numpy.dtype, default  = jax.numpy.float32
396
        The data type used to allocate the initial parameters.
397
    transpose_batch_sequence : bool, default = True
398
399
        Indicate whether the input tensors were switched axis of batch
        and sequence length dimension. If set to True, the input tensors
400
401
402
403
404
405
        should be in (seqlen, batch, hidden), otherwise (batch, seqlen, hidden).
    """

    features: Union[Iterable[int], int]
    kernel_init: Initializer = None
    kernel_axes: Tuple[str, ...] = ()
Jeng Bai-Cheng's avatar
Jeng Bai-Cheng committed
406
    use_bias: bool = True
407
408
    bias_init: Initializer = nn.initializers.zeros
    bias_axes: Tuple[str, ...] = ()
409
410
411
    enable_low_rank_adaptation: bool = False
    low_rank_adaptation_dim: int = 32
    low_rank_adaptation_alpha: float = None
412
413
    axis: Union[Iterable[int], int] = -1
    dtype: DType = jnp.float32
Jeng Bai-Cheng's avatar
Jeng Bai-Cheng committed
414
    transpose_batch_sequence: bool = False
415
    sharding_type = None
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436

    def __post_init__(self):
        if self.kernel_init is None:
            self.kernel_init = nn.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
        super().__post_init__()

    @nn.compact
    def __call__(self, inputs: Array) -> Array:
        """
        Apply the linear transformation to the input.

        Parameters
        ----------
        inputs : jax.numpy.ndarray
            Input tensors.

        Returns
        -------
        outputs : jax.numpy.ndarray
            Output tensors.
        """
437
438
439
        warnings.warn("sharding_type of DenseGeneral would be removed in the near feature",
                      DeprecationWarning)

440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
        features = _canonicalize_tuple(self.features)
        axis = _canonicalize_tuple(self.axis)

        inputs = jnp.asarray(inputs, self.dtype)
        axis = _normalize_axes(axis, inputs.ndim)

        kernel_shape = tuple(inputs.shape[ax] for ax in axis) + features
        kernel_param_shape = (np.prod([inputs.shape[ax] for ax in axis]),) + features
        kernel = nn_partitioning.param_with_axes('kernel',
                                                 self.kernel_init,
                                                 kernel_param_shape,
                                                 jnp.float32,
                                                 axes=self.kernel_axes)

        kernel = jnp.reshape(kernel, kernel_shape)

        if self.use_bias:
            bias = nn_partitioning.param_with_axes('bias',
458
459
                                                   self.bias_init,
                                                   features,
460
                                                   jnp.float32,
461
                                                   axes=self.bias_axes)
462
            bias = bias.astype(self.dtype)
463
464
465
466
        else:
            bias = None

        contract_ind = tuple(range(0, len(axis)))
467
        fp8_gemm_pkg = None
Ming-Xu Huang's avatar
Ming-Xu Huang committed
468
        if FP8Helper.is_fp8_enabled():
469
470
471
472
473
474
475
            fp8_gemm_pkg = \
                    TransformerEngineBase.get_fp8_meta_package(1)

        y = type_safe_dot_general(inputs,
                                  kernel,
                                  fp8_meta_pkg=fp8_gemm_pkg,
                                  contracting_dims=(axis, contract_ind))
476

477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
        if self.enable_low_rank_adaptation:
            lora_a_kernel_shape = (*kernel_shape[:len(axis)], *features[:-1],
                                   self.low_rank_adaptation_dim)
            lora_a_kernel_init_shape = (kernel_param_shape[0], *features[:-1],
                                        self.low_rank_adaptation_dim)
            lora_a_kernel_axes = (None,) * len(lora_a_kernel_init_shape)
            lora_a_kernel = nn_partitioning.param_with_axes('lora_a_kernel',
                                                            self.kernel_init,
                                                            lora_a_kernel_init_shape,
                                                            jnp.float32,
                                                            axes=lora_a_kernel_axes)
            lora_a_kernel = jnp.reshape(lora_a_kernel, lora_a_kernel_shape)
            lora_a_kernel = lora_a_kernel.astype(self.dtype)

            lora_b_kernel_shape = (*features[:-1], self.low_rank_adaptation_dim, features[-1])
            lora_b_kernel_axes = (None,) * len(lora_b_kernel_shape)
            lora_b_kernel = nn_partitioning.param_with_axes('lora_b_kernel',
                                                            nn.initializers.zeros,
                                                            lora_b_kernel_shape,
                                                            jnp.float32,
                                                            axes=lora_b_kernel_axes)
            lora_b_kernel = lora_b_kernel.astype(self.dtype)

            y += _apply_low_rank_adaptation(inputs, axis, features, lora_a_kernel, lora_b_kernel,
                                            self.low_rank_adaptation_alpha)

503
        if bias is not None:
504
505
            bias_shape = (1,) * (y.ndim - bias.ndim) + bias.shape
            y += jnp.reshape(bias, bias_shape)
506
507
508
509
510
511
512
513
514
515
        return y


class LayerNormDenseGeneral(TransformerEngineBase):
    r"""
    Applies layer normalization followed by linear transformation to the incoming data.

    Parameters
    ----------
    features : Union[Iterable[int], int]
516
        The hidden size of each output sample.
517
    enable_layernorm: bool, default = True
518
        Indicate whether to enable layer normalization before linear transformation.
519
    layernorm_type : {'layernorm', 'rmsnorm'}, default = 'layernorm'
520
        Indicate the type of layer normalization.
521
    epsilon : float, default = 1e-6
522
        A value added to the denominator of layer normalization for numerical stability.
523
524
525
526
527
528
529
530
531
532
    zero_centered_gamma : bool, default = False
        If set to `True`, the LayerNorm formula changes to

        .. math::
            y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} *
            (1 + \gamma) + \beta

        This parameter is only applicable for 'layernorm'.
        The default of `scale_init` will also be changed. See `scale_init`
    scale_init : Initializer, default = None
533
        Used for initializing scale factors :math:`\gamma`.
534
535
536
        If `None` is provided, scale_init is set according to the value of zero_centered_gamma.
        If zero_centered_gamma is set to `True`, then scale_init is `flax.linen.initializers.zeros`.
        Otherwise, scale_init is `flax.linen.initializers.ones`.
537
        It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
538
    scale_axes : Tuple[str, ...], default = ('embed', )
539
540
        The name of axes used to shard the scale factors :math:`\gamma` with a corresponding mesh,
        only used when :attr:`enable_layernorm=True`.
541
    ln_bias_init: Initializer, default = flax.linen.initializers.zeros
542
543
544
        Used for initializing shift factors :math:`\beta`,
        only used when :attr:`enable_layernorm=True` and :attr:`layernorm_type='layernorm'`.
        It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
545
546
    ln_bias_axes: Tuple[str, ...], default = ('embed', )
        The name of axes used to shard the shift factors :math:`\beta` with a corresponding mesh.
547
        It is only used when :attr:`enable_layernorm=True` and :attr:`layernorm_type='layernorm'`.
548
549
    kernel_init : Initializer, default =
        flax.linen.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
550
551
        Used for initializing weights.
        It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
552
    kernel_axes : Tuple[str, ...], default = ()
553
        The name of axes used to shard the weights with a corresponding mesh.
554
    use_bias: bool, default = False
555
556
        Indicate whether to enable bias shifting.
        If set to False, the layer will not learn an additive bias.
557
    bias_init: Initializer, default = flax.linen.initializers.zeros
558
559
        Used for initializing bias, only used when :attr:`use_bias=True`.
        It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
560
    bias_axes: Tuple[str, ...], default = ()
561
562
        The name of axes used to shard bias with a corresponding mesh,
        only used when :attr:`use_bias=True`.
563
    return_layernorm_output: bool, default = True
564
        Indicate whether to return the output of layer normalization.
565
        If set False, return None as the second tensor in outputs.
566
567
568
569
570
571
572
573
    enable_low_rank_adaptation: bool, default = False
        Indicate whether to enable low rank adaptation for each linear layer.
    low_rank_adaptation_dim: int, default = 32
        The dimension for low rank adaptation, only used when
        :attr:`enable_low_rank_adaptation=True`
    low_rank_adaptation_alpha: float, default = None
        The alpha for computing the scaling factor of LoRA output.
        :math:`\frac{alpha}{rank} * lora_output`. None means no scaling.
574
    axis:  Union[Iterable[int], int], default = -1
575
        An integer tuple with axes to apply the transformation on.
576
577
578
579
580
581
582
583
    layernorm_input_axes: Tuple[str, ...], default = None
        Indicate the logical axes of sharding constraint to the input of layernorm, like
        (BATCH_AXES, SEQLEN_AXES, HIDDEN_AXES). Default is None, which means not to insert
        sharding constraint.
    dot_input_axes: Tuple[str, ...], default = None
        Indicate the logical axes of sharding constraint to the input of dot, like
        (BATCH_AXES, SEQLEN_AXES, HIDDEN_AXES). Default is None, which means not to insert
        sharding constraint.
584
585
586
587

    Optimization parameters
    -----------------------
    dtype : jax.numpy.dtype, default  = jax.numpy.float32
588
        The data type used to allocate the initial parameters.
589
    transpose_batch_sequence : bool, default = True
590
591
        Indicate whether the input tensors were switched axis of batch
        and sequence length dimension. If set to True, the input tensors
592
593
        should be in (seqlen, batch, hidden), otherwise (batch, seqlen, hidden).
    depth_scaling: float, default = None
594
        The factor to scale the output from `DenseGeneral`. It should be a float
595
596
597
598
599
600
601
        value or None. When None is set, then no scaling is applied.
    """

    features: Union[Iterable[int], int]
    enable_layernorm: bool = True
    layernorm_type: str = 'layernorm'
    epsilon: float = 1e-6
602
603
    zero_centered_gamma: bool = False
    scale_init: Initializer = None
604
605
606
607
608
609
610
611
612
    scale_axes: Tuple[str, ...] = ('embed',)
    ln_bias_init: Initializer = nn.initializers.zeros
    ln_bias_axes: Tuple[str, ...] = ('embed',)
    kernel_init: Initializer = None
    kernel_axes: Tuple[str, ...] = ()
    use_bias: bool = False
    bias_init: Initializer = nn.initializers.zeros
    bias_axes: Tuple[str, ...] = ()
    return_layernorm_output: bool = True
613
614
615
    enable_low_rank_adaptation: bool = False
    low_rank_adaptation_dim: int = 32
    low_rank_adaptation_alpha: float = None
616
617
618
    axis: Union[Iterable[int], int] = -1
    dtype: DType = jnp.float32
    transpose_batch_sequence: bool = True
619
620
    layernorm_input_axes: Tuple[str, ...] = None
    dot_input_axes: Tuple[str, ...] = None
621
    depth_scaling: float = None
622
    sharding_type = None
623
624
625
626

    def __post_init__(self):
        if self.kernel_init is None:
            self.kernel_init = nn.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
627
628
        self.scale_init = _obtain_default_layernorm_scale_init_if_need(
            self.scale_init, self.zero_centered_gamma)
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
        super().__post_init__()

    @nn.compact
    def __call__(self, inputs: Array) -> Array:
        """
        Apply layer normalization to the input followed by a linear transformation.

        Parameters
        ----------
        inputs: jax.numpy.ndarray
            Input tensor.

        Returns
        -------
        outputs : jax.numpy.ndarray
            Output tensors.
        ln_outputs: jax.numpy.ndarray
            The output tensors of layer normalization.
647
            If :attr:`return_layernorm_output=False`, then this would be None.
648
        """
649
650
651
        warnings.warn("sharding_type of LayerNormDenseGeneral would be removed in the near feature",
                      DeprecationWarning)

652
653
        ln_output = None

Ming-Xu Huang's avatar
Ming-Xu Huang committed
654
        fuse_layernorm = FP8Helper.is_fp8_enabled(
655
656
657
        ) and not self.return_layernorm_output and self.enable_layernorm

        if self.enable_layernorm:
658
659
            inputs = with_sharding_constraint_by_logical_axes(inputs, self.layernorm_input_axes)

660
            assert self.axis == -1    # Only support axis = =-1 at this moment
661
662
663
664
665
666
667
668
669
670
671
672
            features = inputs.shape[-1]

            scale, ln_bias = _create_layernorm_parameters(self.layernorm_type, (features,),
                                                          self.scale_init, self.scale_axes,
                                                          self.ln_bias_init, self.ln_bias_axes,
                                                          self.dtype)

            if not fuse_layernorm:
                y = layernorm(inputs,
                              scale,
                              ln_bias,
                              layernorm_type=self.layernorm_type,
673
                              zero_centered_gamma=self.zero_centered_gamma,
674
                              epsilon=self.epsilon)
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
            else:
                assert not self.return_layernorm_output
                y = inputs
        else:
            y = inputs

        if self.return_layernorm_output:
            ln_output = y

        # DenseGeneral
        features = _canonicalize_tuple(self.features)
        axis = _canonicalize_tuple(self.axis)

        axis = _normalize_axes(axis, y.ndim)

        kernel_shape = tuple(y.shape[ax] for ax in axis) + features
        kernel_param_shape = (np.prod([inputs.shape[ax] for ax in axis]),) + features
        kernel = nn_partitioning.param_with_axes('kernel',
                                                 self.kernel_init,
                                                 kernel_param_shape,
                                                 jnp.float32,
                                                 axes=self.kernel_axes)

        kernel = jnp.reshape(kernel, kernel_shape)

        contract_ind = tuple(range(0, len(axis)))

702
        fp8_meta_package = None
Ming-Xu Huang's avatar
Ming-Xu Huang committed
703
        if FP8Helper.is_fp8_enabled():
704
705
706
707
708
709
710
711
712
713
714
            fp8_meta_package = \
                    TransformerEngineBase.get_fp8_meta_package(1)

        if fuse_layernorm:
            z = layernorm_fp8_dot(y,
                                  kernel,
                                  scale,
                                  ln_bias,
                                  fp8_meta_package,
                                  self.layernorm_type,
                                  zero_centered_gamma=self.zero_centered_gamma,
715
716
717
                                  epsilon=self.epsilon,
                                  layernorm_input_axes=self.layernorm_input_axes,
                                  dot_input_axes=self.dot_input_axes)
718
        else:
719
            y = with_sharding_constraint_by_logical_axes(y, self.dot_input_axes)
720
721
722
723
            z = type_safe_dot_general(y,
                                      kernel,
                                      fp8_meta_pkg=fp8_meta_package,
                                      contracting_dims=(axis, contract_ind))
724

725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
        if self.enable_low_rank_adaptation:
            lora_a_kernel_shape = (*kernel_shape[:len(axis)], *features[:-1],
                                   self.low_rank_adaptation_dim)
            lora_a_kernel_init_shape = (kernel_param_shape[0], *features[:-1],
                                        self.low_rank_adaptation_dim)
            lora_a_kernel_axes = (None,) * len(lora_a_kernel_init_shape)
            lora_a_kernel = nn_partitioning.param_with_axes('lora_a_kernel',
                                                            self.kernel_init,
                                                            lora_a_kernel_init_shape,
                                                            jnp.float32,
                                                            axes=lora_a_kernel_axes)
            lora_a_kernel = jnp.reshape(lora_a_kernel, lora_a_kernel_shape)
            lora_a_kernel = lora_a_kernel.astype(self.dtype)

            lora_b_kernel_shape = (*features[:-1], self.low_rank_adaptation_dim, features[-1])
            lora_b_kernel_axes = (None,) * len(lora_b_kernel_shape)
            lora_b_kernel = nn_partitioning.param_with_axes('lora_b_kernel',
                                                            nn.initializers.zeros,
                                                            lora_b_kernel_shape,
                                                            jnp.float32,
                                                            axes=lora_b_kernel_axes)
            lora_b_kernel = lora_b_kernel.astype(self.dtype)

            z += _apply_low_rank_adaptation(y, axis, features, lora_a_kernel, lora_b_kernel,
                                            self.low_rank_adaptation_alpha)

751
752
753
        bias = None
        if self.use_bias:
            bias = nn_partitioning.param_with_axes('bias',
754
755
                                                   self.bias_init,
                                                   features,
756
                                                   jnp.float32,
757
                                                   axes=self.bias_axes)
758
            bias = bias.astype(self.dtype)
759
760

        if bias is not None:
761
762
            bias_shape = (1,) * (z.ndim - bias.ndim) + bias.shape
            z += jnp.reshape(bias, bias_shape)
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777

        if self.depth_scaling is not None:
            z = z / self.depth_scaling

        return z, ln_output    # dense_output, layer_norm_output


class LayerNormMLP(TransformerEngineBase):
    r"""
    Applies layer normalization on the input followed by the MLP module,
    consisting of 2 successive linear transformations, separated by given activations.

    Parameters
    ----------
    intermediate_dim: int, default = 2048
778
        Intermediate size to which input samples are projected.
779
    enable_layernorm: bool, default = True
780
        Indicate whether to enable layer normalization before linear transformation.
781
    layernorm_type : {'layernorm', 'rmsnorm'}, default = 'layernorm'
782
        Indicate the type of layer normalization.
783
    epsilon : float, default = 1e-6
784
        A value added to the denominator of layer normalization for numerical stability.
785
786
787
788
789
790
791
792
793
794
    zero_centered_gamma : bool, default = False
        If set to `True`, the LayerNorm formula changes to

        .. math::
            y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} *
            (1 + \gamma) + \beta

        This parameter is only applicable for 'layernorm'.
        The default of `scale_init` will also be changed. See `scale_init`.
    scale_init : Initializer, default = None
795
        Used for initializing scale factors :math:`\gamma`.
796
797
798
        If `None` is provided, scale_init is set according to the value of zero_centered_gamma.
        If zero_centered_gamma is set to `True`, then scale_init is `flax.linen.initializers.zeros`.
        Otherwise, scale_init is `flax.linen.initializers.ones`.
799
        It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
800
    scale_axes : Tuple[str, ...], default = ('embed', )
801
802
        The name of axes used to shard the scale factors :math:`\gamma` with a corresponding mesh,
        only used when :attr:`enable_layernorm=True`.
803
    ln_bias_init: Initializer, default = flax.linen.initializers.zeros
804
805
806
        Used for initializing shift factors :math:`\beta`,
        only used when :attr:`enable_layernorm=True` and :attr:`layernorm_type='layernorm'`.
        It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
807
808
    ln_bias_axes: Tuple[str, ...], default = ('embed', )
        The name of axes used to shard the shift factors :math:`\beta` with a corresponding mesh.
809
        Only used when :attr:`enable_layernorm=True` and :attr:`layernorm_type='layernorm'`.
810
811
    kernel_init : Initializer, default =
        flax.linen.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
812
813
        Used for initializing the weights of both linear transformations.
        It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
814
    kernel_axes_1 : Tuple[str, ...], default = ('embed', 'act', 'mlp')
815
        The name of axes used to shard the weights with a corresponding mesh for
816
817
        the weight of the first linear transformations.
    kernel_axes_2 : Tuple[str, ...], default = ('mlp', 'embed')
818
        The name of axes used to shard the weights with a corresponding mesh for
819
820
        the weight of the second linear transformations.
    use_bias: bool, default = False
821
822
        Indicate whether to enable bias shifting.
        If set to False, the layer will not learn an additive bias.
823
    bias_init: Initializer, default = flax.linen.initializers.zeros
824
825
        Used for initializing bias, only used when :attr:`use_bias=True`.
        It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
826
    bias_axes_1: Tuple[str, ...], default = ('mlp',)
827
        The name of axes used to shard bias with a corresponding mesh  for
828
        the weight of the first linear transformations.
829
        Only used when :attr:`use_bias=True`.
830
    bias_axes_2: Tuple[str, ...], default = ('embed',)
831
        The name of axes used to shard bias with a corresponding mesh  for
832
        the weight of the second linear transformations.
833
        Only used when :attr:`use_bias=True`.
834
    return_layernorm_output: bool, default = True
835
        Indicate whether to return the output of layer normalization.
836
837
        If set False, return None as the second tensor in outputs.
    activations: Sequence[Union[str, Callable]], default = ('relu',)
838
        The sequence of activation functions to apply after the first linear transformation.
839
        Each activation has its own transformation layer.
840
841
    intermediate_dropout_rng_name: str, default = 'dropout'
        The key in given RNGs via flax.linen.Module.apply that for generating Dropout masks.
842
    intermediate_dropout_rate: float, default = 0.1
843
        Dropout probability for the dropout op after the :attr:`activations`.
Ming-Xu Huang's avatar
Ming-Xu Huang committed
844
845
    intermediate_hidden_dropout_dims: Sequence[int], default = ()
        Dimensions that will share the same dropout mask for hidden
846
847
848
849
850
851
852
853
    enable_low_rank_adaptation: bool, default = False
        Indicate whether to enable low rank adaptation for each linear layer.
    low_rank_adaptation_dim: int, default = 32
        The dimension for low rank adaptation, only used when
        :attr:`enable_low_rank_adaptation=True`.
    low_rank_adaptation_alpha: float, default = None
        The alpha for computing the scaling factor of LoRA output.
        :math:`\frac{alpha}{rank} * lora_output`. None means no scaling.
854
    axis:  Union[Iterable[int], int], default = -1
855
        An integer tuple with axes to apply the transformation on.
856
857
858
859
860
861
862
863
864
865
866
867
    layernorm_input_axes: Tuple[str, ...], default = None
        Indicate the logical axes of sharding constraint to the input of layernorm, like
        (BATCH_AXES, SEQLEN_AXES, HIDDEN_AXES). Default is None, which means not to insert
        sharding constraint.
    dot_1_input_axes: Tuple[str, ...], default = None
        Indicate the logical axes of sharding constraint to the input of 1st dot, like
        (BATCH_AXES, SEQLEN_AXES, HIDDEN_AXES). Default is None, which means not to insert
        sharding constraint.
    dot_2_input_axes: Tuple[str, ...], default = None
        Indicate the logical axes of sharding constraint to the input of 2nd dot, like
        (BATCH_AXES, SEQLEN_AXES, HIDDEN_AXES). Default is None, which means not to insert
        sharding constraint.
868
869
870
871

    Optimization parameters
    -----------------------
    dtype : jax.numpy.dtype, default  = jax.numpy.float32
872
        The data type used to allocate the initial parameters.
873
    transpose_batch_sequence : bool, default = True
874
875
        Indicate whether the input tensors were switched axis of batch
        and sequence length dimension. If set to True, the input tensors
876
877
878
879
880
881
882
        should be in (seqlen, batch, hidden), otherwise (batch, seqlen, hidden).
    """

    intermediate_dim: int = 2048
    enable_layernorm: bool = True
    layernorm_type: str = 'layernorm'
    epsilon: float = 1e-6
883
884
    zero_centered_gamma: bool = False
    scale_init: Initializer = None
885
886
887
888
889
890
891
892
    scale_axes: Tuple[str, ...] = ('embed',)
    ln_bias_init: Initializer = nn.initializers.zeros
    ln_bias_axes: Tuple[str, ...] = ('embed',)
    kernel_init: Initializer = None
    kernel_axes_1: Tuple[str, ...] = ('embed', 'act', 'mlp')
    kernel_axes_2: Tuple[str, ...] = ('mlp', 'embed')
    use_bias: bool = False
    bias_init: Initializer = nn.initializers.zeros
893
    bias_axes_1: Tuple[str, ...] = ('act', 'mlp')
894
895
896
    bias_axes_2: Tuple[str, ...] = ('embed',)
    return_layernorm_output: bool = True
    activations: Sequence[Union[str, Callable]] = ('relu',)
897
    intermediate_dropout_rng_name: str = 'dropout'
898
    intermediate_dropout_rate: float = 0.1
Ming-Xu Huang's avatar
Ming-Xu Huang committed
899
    intermediate_hidden_dropout_dims: Sequence[int] = ()
900
901
902
    enable_low_rank_adaptation: bool = False
    low_rank_adaptation_dim: int = 32
    low_rank_adaptation_alpha: float = None
903
904
905
    axis: Union[Iterable[int], int] = -1
    dtype: DType = jnp.float32
    transpose_batch_sequence: bool = True
906
907
908
    layernorm_input_axes: Tuple[str, ...] = None
    dot_1_input_axes: Tuple[str, ...] = None
    dot_2_input_axes: Tuple[str, ...] = None
909
    major_sharding_type = None
910
911
912
913

    def __post_init__(self):
        if self.kernel_init is None:
            self.kernel_init = nn.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
914
915
        self.scale_init = _obtain_default_layernorm_scale_init_if_need(
            self.scale_init, self.zero_centered_gamma)
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
        super().__post_init__()

    @nn.compact
    def __call__(self, inputs: Array, deterministic: bool = False) -> Array:
        """
        Apply layer normalization to the input followed by a feedforward network (MLP Block).

        Parameters
        ----------
        inputs: jax.numpy.ndarray
            Input tensor.
        deterministic: bool, default  = False
            Disable dropout ops if set to True.

        Returns
        -------
        outputs : jax.numpy.ndarray
            Output tensors.
        ln_outputs: jax.numpy.ndarray
            The output tensors of layer normalization.
936
            If :attr:`return_layernorm_output=False`, then this would be None.
937
        """
938
939
940
        warnings.warn("major_sharding_type of LayerNormMLP would be removed in the near feature",
                      DeprecationWarning)

941
942
        ln_output = None

Ming-Xu Huang's avatar
Ming-Xu Huang committed
943
        fuse_layernorm = FP8Helper.is_fp8_enabled(
944
945
        ) and not self.return_layernorm_output and self.enable_layernorm

946
        gated_act_pool = [('gelu', 'linear'),
947
948
949
950
                          ('silu', 'linear'),
                          ('relu', 'linear'),
                          ('quick_gelu', 'linear'),
                          ('squared_relu', 'linear')]
951
        act_pool = [('gelu',),
952
953
954
955
                    ('silu',),
                    ('relu',),
                    ('quick_gelu',),
                    ('squared_relu',)]
956
        normalized_acts = []
957
958
959
        for act in self.activations:
            if not isinstance(act, str):
                return False
960
961
962
            normalized_acts.append(act.lower())
        normalized_acts = tuple(reversed(normalized_acts)
                               if normalized_acts[0] == 'linear' else normalized_acts)
963

964
        is_act_implemented = normalized_acts in (gated_act_pool + act_pool)
965
966
967

        use_fused_layernorm_mlp = fuse_layernorm and is_act_implemented and\
                                self.intermediate_dropout_rate < 1e-3
968

969
970
        # LayerNorm
        if self.enable_layernorm:
971
            assert self.axis == -1    # Only support axis == -1 at this moment
972
            inputs = with_sharding_constraint_by_logical_axes(inputs, self.layernorm_input_axes)
973

974
975
976
977
978
979
980
981
982
983
984
985
            features = inputs.shape[-1]

            scale, ln_bias = _create_layernorm_parameters(self.layernorm_type, (features,),
                                                          self.scale_init, self.scale_axes,
                                                          self.ln_bias_init, self.ln_bias_axes,
                                                          self.dtype)

            if not fuse_layernorm:
                y = layernorm(inputs,
                              scale,
                              ln_bias,
                              layernorm_type=self.layernorm_type,
986
                              zero_centered_gamma=self.zero_centered_gamma,
987
                              epsilon=self.epsilon)
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
            else:
                assert not self.return_layernorm_output
                y = inputs
        else:
            y = inputs

        if self.return_layernorm_output:
            ln_output = y

        def kernel_1_init(key, num_kernels, stack_axis, *init_args):
            kernels = []
            for _ in range(num_kernels):
                key, init_key = jax_random.split(key)
                kernels.append(self.kernel_init(init_key, *init_args))
            return jnp.stack(kernels, axis=stack_axis, dtype=jnp.float32)

        num_of_gemm = 2
1005
1006
1007
1008
        fp8_meta_package = None
        if FP8Helper.is_fp8_enabled():
            fp8_meta_package = \
                    TransformerEngineBase.get_fp8_meta_package(num_of_gemm)
1009

1010
        num_activations = len(normalized_acts)
1011
1012
        axis = _canonicalize_tuple(self.axis)
        axis = _normalize_axes(axis, y.ndim)
1013

1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
        intermediate_dim = _canonicalize_tuple((num_activations, self.intermediate_dim))
        kernel_1_shape = tuple(y.shape[ax] for ax in axis) + intermediate_dim
        kernel_1_each_shape = (np.prod([y.shape[ax] for ax in axis]), self.intermediate_dim)
        kernel_1 = nn_partitioning.param_with_axes('wi_kernel',
                                                   kernel_1_init,
                                                   num_activations,
                                                   -2,
                                                   kernel_1_each_shape,
                                                   jnp.float32,
                                                   axes=self.kernel_axes_1)
        kernel_1 = jnp.reshape(kernel_1, kernel_1_shape)
        hidden_size = inputs.shape[-1]
        hidden_size_tuple = _canonicalize_tuple(hidden_size)
        kernel_2_shape = (self.intermediate_dim,) + hidden_size_tuple
        kernel_2_param_shape = (self.intermediate_dim, np.prod(hidden_size_tuple))
        kernel_2 = nn_partitioning.param_with_axes('wo_kernel',
                                                   self.kernel_init,
                                                   kernel_2_param_shape,
                                                   jnp.float32,
                                                   axes=self.kernel_axes_2)
        kernel_2 = jnp.reshape(kernel_2, kernel_2_shape)
        contract_ind = tuple(range(0, len(axis)))
1036

1037
1038
1039
        ffn1_ckpt_name = 'ffn1'
        ffn2_ckpt_name = 'ffn2'

1040
        if use_fused_layernorm_mlp:
1041
1042
            assert self.axis == -1    # Only support axis = =-1 at this moment

1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
            if self.use_bias:
                bias_1_shape = intermediate_dim
                bias_1 = nn_partitioning.param_with_axes('wi_bias',
                                                         self.bias_init,
                                                         bias_1_shape,
                                                         jnp.float32,
                                                         axes=self.bias_axes_1)
                bias_1 = bias_1.astype(self.dtype)

                bias_2_shape = (hidden_size,)
                bias_2 = nn_partitioning.param_with_axes('wo_bias',
                                                         self.bias_init,
                                                         bias_2_shape,
                                                         jnp.float32,
                                                         axes=self.bias_axes_2)
                bias_2 = bias_2.astype(self.dtype)
            else:
1060
1061
                bias_1 = None
                bias_2 = None
1062

1063
            out = fused_layernorm_fp8_mlp(y,
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
                                         scale,
                                         ln_bias, [kernel_1, kernel_2], [bias_1, bias_2],
                                         fp8_meta_package,
                                         self.layernorm_type,
                                         zero_centered_gamma=self.zero_centered_gamma,
                                         epsilon=self.epsilon,
                                         layernorm_input_axes=self.layernorm_input_axes,
                                         dot_1_input_axes=self.dot_1_input_axes,
                                         dot_2_input_axes=self.dot_2_input_axes,
                                         ffn1_ckpt_name=ffn1_ckpt_name,
1074
                                         ffn2_ckpt_name=ffn2_ckpt_name,
1075
                                         activation_type = normalized_acts,
1076
                                         use_bias = self.use_bias)
1077
        else:    # not use_fused_ln_geglu_mlp
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
            # DenseGeneral 1
            gemm1_fp8_meta_package = None if fp8_meta_package is None \
                                     else fp8_meta_package.get_package_by_gemm_idx(0)
            if fuse_layernorm:
                x = layernorm_fp8_dot(y,
                                      kernel_1,
                                      scale,
                                      ln_bias,
                                      gemm1_fp8_meta_package,
                                      self.layernorm_type,
                                      zero_centered_gamma=self.zero_centered_gamma,
1089
1090
1091
                                      epsilon=self.epsilon,
                                      layernorm_input_axes=self.layernorm_input_axes,
                                      dot_input_axes=self.dot_1_input_axes)
1092
            else:
1093
                y = with_sharding_constraint_by_logical_axes(y, self.dot_1_input_axes)
1094
1095
1096
1097
                x = type_safe_dot_general(y,
                                          kernel_1,
                                          fp8_meta_pkg=gemm1_fp8_meta_package,
                                          contracting_dims=(axis, contract_ind))
1098

1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
            if self.enable_low_rank_adaptation:
                wi_lora_a_kernel_shape = (*kernel_1_shape[:len(axis)], num_activations,
                                          self.low_rank_adaptation_dim)
                wi_lora_a_kernel_init_shape = (kernel_1_each_shape[0], num_activations,
                                               self.low_rank_adaptation_dim)
                wi_lora_a_kernel_init_each_shape = (kernel_1_each_shape[0],
                                                    self.low_rank_adaptation_dim)
                wi_lora_a_kernel_axes = (None,) * len(wi_lora_a_kernel_init_shape)
                wi_lora_a_kernel = nn_partitioning.param_with_axes('wi_lora_a_kernel',
                                                                   kernel_1_init,
                                                                   num_activations,
                                                                   -2,
                                                                   wi_lora_a_kernel_init_each_shape,
                                                                   jnp.float32,
                                                                   axes=wi_lora_a_kernel_axes)
                wi_lora_a_kernel = jnp.reshape(wi_lora_a_kernel, wi_lora_a_kernel_shape)
                wi_lora_a_kernel = wi_lora_a_kernel.astype(self.dtype)

                wi_lora_b_kernel_shape = (num_activations, self.low_rank_adaptation_dim,
                                          self.intermediate_dim)
                wi_lora_b_kernel_axes = (None,) * len(wi_lora_b_kernel_shape)
                wi_lora_b_kernel = nn_partitioning.param_with_axes('wi_lora_b_kernel',
                                                                   nn.initializers.zeros,
                                                                   wi_lora_b_kernel_shape,
                                                                   jnp.float32,
                                                                   axes=wi_lora_b_kernel_axes)
                wi_lora_b_kernel = wi_lora_b_kernel.astype(self.dtype)

                x += _apply_low_rank_adaptation(y, axis, intermediate_dim, wi_lora_a_kernel,
                                                wi_lora_b_kernel, self.low_rank_adaptation_alpha)

1130
            bias_1 = None
1131
            if self.use_bias:
1132
                bias_1 = nn_partitioning.param_with_axes('wi_bias',
1133
1134
                                                       self.bias_init,
                                                       intermediate_dim,
1135
                                                       jnp.float32,
1136
                                                       axes=self.bias_axes_1)
1137
1138
1139
                bias_1 = bias_1.astype(self.dtype)
                bias_1_shape = (1,) * (x.ndim - bias_1.ndim) + bias_1.shape
                x += jnp.reshape(bias_1, bias_1_shape)
1140

1141
            x = checkpoint_name(x, ffn1_ckpt_name)
1142
            if is_act_implemented:
1143
                z = activation_lu(x, normalized_acts)
1144
            else:
1145
                activations = []
1146
                x = jnp.split(x, num_activations, axis=-2)
1147
                for idx, act_fn in enumerate(normalized_acts):
1148
1149
1150
                    x_i = _convert_to_activation_function(act_fn)(x[idx])
                    activations.append(x_i)
                z = functools.reduce(operator.mul, activations)
1151
1152
                # Remove act axis
                z = jnp.reshape(z, (*z.shape[:-2], -1))
1153

Ming-Xu Huang's avatar
Ming-Xu Huang committed
1154
            z = nn.Dropout(rate=self.intermediate_dropout_rate,
1155
1156
                           broadcast_dims=self.intermediate_hidden_dropout_dims,
                           rng_collection=self.intermediate_dropout_rng_name)(
Ming-Xu Huang's avatar
Ming-Xu Huang committed
1157
                               z, deterministic=deterministic)
1158

1159
1160
            z = with_sharding_constraint_by_logical_axes(z, self.dot_2_input_axes)

1161
            # DenseGeneral 2
1162
1163
1164
1165
1166
1167
1168
            gemm2_fp8_meta_package = None if fp8_meta_package is None \
                                     else fp8_meta_package.get_package_by_gemm_idx(1)

            out = type_safe_dot_general(z,
                                        kernel_2,
                                        fp8_meta_pkg=gemm2_fp8_meta_package,
                                        contracting_dims=(axis, contract_ind))
1169

1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
            if self.enable_low_rank_adaptation:
                wo_lora_a_kernel_shape = (self.intermediate_dim, self.low_rank_adaptation_dim)
                wo_lora_a_kernel_axes = (None,) * len(wo_lora_a_kernel_shape)
                wo_lora_a_kernel = nn_partitioning.param_with_axes('wo_lora_a_kernel',
                                                                   self.kernel_init,
                                                                   wo_lora_a_kernel_shape,
                                                                   jnp.float32,
                                                                   axes=wo_lora_a_kernel_axes)
                wo_lora_a_kernel = wo_lora_a_kernel.astype(self.dtype)

                wo_lora_b_kernel_shape = (self.low_rank_adaptation_dim, hidden_size)
                wo_lora_b_kernel_axes = (None,) * len(wo_lora_b_kernel_shape)
                wo_lora_b_kernel = nn_partitioning.param_with_axes('wo_lora_b_kernel',
                                                                   nn.initializers.zeros,
                                                                   wo_lora_b_kernel_shape,
                                                                   jnp.float32,
                                                                   axes=wo_lora_b_kernel_axes)
                wo_lora_b_kernel = wo_lora_b_kernel.astype(self.dtype)

                out += _apply_low_rank_adaptation(z, axis, hidden_size_tuple, wo_lora_a_kernel,
                                                  wo_lora_b_kernel, self.low_rank_adaptation_alpha)

1192
            bias_2 = None
1193
            if self.use_bias:
1194
                bias_2 = nn_partitioning.param_with_axes('wo_bias',
1195
                                                       self.bias_init, (hidden_size,),
1196
                                                       jnp.float32,
1197
                                                       axes=self.bias_axes_2)
1198
1199
                bias_2 = bias_2.astype(self.dtype)
                out += jnp.reshape(bias_2, (1,) * (out.ndim - 1) + (-1,))
1200

1201
            out = checkpoint_name(out, ffn2_ckpt_name)
1202

1203
        return out, ln_output    # Output, layner_norm_output