layernorm_dense.py 11 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Fused Layer normalization and dense layer transformation operations for Transformer Engine in JAX.

This module provides optimized implementations of layer normalization followed by
dense layer transformation (GEMM) operations, which are commonly used in transformer
architectures. It supports various normalization types, quantization, and
distributed training through sharding constraints.
"""

Alp Dener's avatar
Alp Dener committed
12
import warnings
13
14
15
16
17
18
19
20
21
22
23
24
from functools import partial
from typing import Tuple

import jax
import jax.numpy as jnp

from . import cpp_extensions as tex

from .quantize import (
    QuantizerSet,
    noop_quantizer_set,
    with_sharding_constraint_by_logical_axes,
25
    TensorUsage,
26
)
27
from .sharding import get_sequence_parallel_dim
28
29


Alp Dener's avatar
Alp Dener committed
30
31
32
33
34
35
36
37
38
39
LAYERNORM_DENSE_BATCH_FIRST_WARNING_ISSUED = False


def _issue_batch_first_warning(msg):
    global LAYERNORM_DENSE_BATCH_FIRST_WARNING_ISSUED
    if not LAYERNORM_DENSE_BATCH_FIRST_WARNING_ISSUED:
        warnings.warn(msg, UserWarning)
        LAYERNORM_DENSE_BATCH_FIRST_WARNING_ISSUED = True


40
41
42
43
44
45
46
47
48
49
50
def layernorm_dense(
    x: jnp.ndarray,
    kernel: jnp.ndarray,
    gamma: jnp.ndarray,
    beta: jnp.ndarray,
    bias: jnp.ndarray = None,
    norm_type: str = "layernorm",
    zero_centered_gamma: bool = False,
    epsilon: float = 1e-6,
    layernorm_input_axes: Tuple[str, ...] = None,
    dot_input_axes: Tuple[str, ...] = None,
51
    kernel_axes: Tuple[str, ...] = None,
Alp Dener's avatar
Alp Dener committed
52
    batch_first: bool = True,
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
    quantizer_set: QuantizerSet = noop_quantizer_set,
) -> jnp.ndarray:
    """Apply layer normalization followed by dense layer transformation.

    This function implements the following sequence of operations:
        1. Layer normalization: (x - mean) / sqrt(var + epsilon) * gamma + beta
        2. Linear transformation: y = x * kernel + bias

    Args:
        x: Input tensor with shape [batch..., hidden_in]
        kernel: Weight matrix with shape [hidden_in, hidden_out]
        gamma: Scale parameter for normalization with shape [hidden_in]
        beta: Bias parameter for normalization with shape [hidden_in]
        bias: Optional bias term for dense layer transformation with shape [hidden_out]
        norm_type: Type of normalization ("layernorm" or "rmsnorm")
        zero_centered_gamma: Whether to use zero-centered gamma for normalization
        epsilon: Small constant for numerical stability in normalization
        layernorm_input_axes: Logical axes for sharding the layernorm input
        dot_input_axes: Logical axes for sharding the matrix multiplication input
72
        kernel_axes: Logical axes for sharding the weight matrix
Alp Dener's avatar
Alp Dener committed
73
        batch_first: Assume that X is batched in the first dimension if it has more than 2 dims.
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
        quantizer_set: Set of quantizers for different tensor types

    Returns:
        Output tensor with shape [batch..., hidden_out]

    Note:
        - For RMSNorm (norm_type="rmsnorm"), beta must be None and zero_centered_gamma
          must be False
        - The function supports automatic differentiation through JAX's custom VJP
        - Quantization is applied to both the normalized input and kernel
    """
    output = _layernorm_dense(
        x,
        kernel,
        gamma,
        beta,
        bias,
        norm_type,
        zero_centered_gamma,
        epsilon,
        layernorm_input_axes,
        dot_input_axes,
96
        kernel_axes,
Alp Dener's avatar
Alp Dener committed
97
        batch_first,
98
99
100
101
102
103
104
105
106
107
108
109
110
        quantizer_set,
    )
    return output


@partial(
    jax.custom_vjp,
    nondiff_argnums=(
        5,
        6,
        7,
        8,
        9,
111
        10,
Alp Dener's avatar
Alp Dener committed
112
        11,
113
114
115
116
117
118
119
120
121
122
123
124
125
    ),
)
def _layernorm_dense(
    x: jnp.ndarray,
    kernel: jnp.ndarray,
    gamma: jnp.ndarray,
    beta: jnp.ndarray,
    bias: jnp.ndarray,
    norm_type: str,
    zero_centered_gamma: bool,
    epsilon: float,
    layernorm_input_axes: Tuple[str, ...],
    dot_input_axes: Tuple[str, ...],
126
    kernel_axes: Tuple[str, ...],
Alp Dener's avatar
Alp Dener committed
127
    batch_first: bool,
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
    quantizer_set,
):
    """Internal implementation of layernorm_dense with custom VJP.

    This function implements the forward pass of layernorm_dense with support for
    automatic differentiation. It handles the normalization and dense layer transformation
    operations, including quantization and sharding constraints.

    Args:
        x: Input tensor
        kernel: Weight matrix
        gamma: Scale parameter for normalization
        beta: Bias parameter for normalization
        bias: Optional bias term
        norm_type: Type of normalization
        zero_centered_gamma: Whether to use zero-centered gamma
        epsilon: Small constant for numerical stability
        layernorm_input_axes: Logical axes for layernorm sharding
        dot_input_axes: Logical axes for matrix multiplication sharding
Alp Dener's avatar
Alp Dener committed
147
        batch_first: Assume that X is batched in the first dimension.
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
        quantizer_set: Set of quantizers

    Returns:
        Output tensor from the combined operations
    """
    output, _ = _layernorm_dense_fwd_rule(
        x,
        kernel,
        gamma,
        beta,
        bias,
        norm_type,
        zero_centered_gamma,
        epsilon,
        layernorm_input_axes,
        dot_input_axes,
164
        kernel_axes,
Alp Dener's avatar
Alp Dener committed
165
        batch_first,
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
        quantizer_set,
    )
    return output


def _layernorm_dense_fwd_rule(
    x,
    kernel,
    gamma,
    beta,
    bias,
    norm_type,
    zero_centered_gamma,
    epsilon,
    layernorm_input_axes,
    dot_input_axes,
182
    kernel_axes,
Alp Dener's avatar
Alp Dener committed
183
    batch_first,
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
    quantizer_set,
):
    """Forward pass rule for layernorm_dense.

    Implements the forward pass computation including:
    1. Layer normalization with quantization
    2. Matrix multiplication with quantized kernel
    3. Optional bias addition
    4. Sharding constraints

    Returns:
        Tuple of (output, context) for automatic differentiation
    """
    x_contracting_dims = (len(x.shape) - 1,)
    k_contracting_dims = (0,)
    assert x.shape[-1] == kernel.shape[0]

Alp Dener's avatar
Alp Dener committed
201
202
203
204
205
206
207
208
209
210
211
    x_bdim = None
    if x.ndim > 2:
        if not batch_first:
            _issue_batch_first_warning(
                "TE/JAX `layernorm_dense()` fused-layer implementation does not officially "
                "support sequence-first inputs and may produce incorrect results when "
                "`batch_first=False` or `transpose_batch_sequence=True`. Use sequence-first "
                "inputs at your own discretion."
            )
        x_bdim = 0 if batch_first else x.ndim - 2

212
213
214
215
216
217
218
219
220
    x = with_sharding_constraint_by_logical_axes(x, layernorm_input_axes)

    casted_ln_out, mu, rsigma = tex.normalization_fwd(
        x,
        gamma,
        beta,
        zero_centered_gamma,
        epsilon,
        norm_type,
Alp Dener's avatar
Alp Dener committed
221
222
        quantizer=quantizer_set.x,
        noop_scaled_tensor=True,
223
    )
224
    casted_ln_out = with_sharding_constraint_by_logical_axes(casted_ln_out, dot_input_axes)
225
226

    # Kernel in (hidden_in, hidden_out...)
227
    flatten_axis = 1 - len(kernel.shape)
Alp Dener's avatar
Alp Dener committed
228
229
230
    casted_kernel = tex.quantize(
        kernel, flatten_axis=flatten_axis, quantizer=quantizer_set.kernel, noop_scaled_tensor=True
    )
231
    casted_kernel = with_sharding_constraint_by_logical_axes(casted_kernel, kernel_axes)
232
233
234

    # NN GEMM
    # (batch..., hidden_in) x (hidden_in, hidden_out...)
Alp Dener's avatar
Alp Dener committed
235
    use_bias = bias is not None
236
    output = tex.gemm(
237
238
        casted_ln_out.get_tensor(TensorUsage.LHS),
        casted_kernel.get_tensor(TensorUsage.RHS),
Alp Dener's avatar
Alp Dener committed
239
240
241
242
        contracting_dims=(x_contracting_dims, k_contracting_dims),
        batched_dims=((x_bdim,), ()),
        bias=bias if not tex.gemm_uses_jax_dot() else None,
        fuse_bias=use_bias if not tex.gemm_uses_jax_dot() else False,
243
244
    )

Alp Dener's avatar
Alp Dener committed
245
    if use_bias and tex.gemm_uses_jax_dot():
246
247
248
249
        bias_new_shape = (1,) * (output.ndim - bias.ndim) + bias.shape
        output += jnp.reshape(bias, bias_new_shape)

    ctx = (
250
251
        casted_ln_out.get_tensor(TensorUsage.LHS_TRANS),
        casted_kernel.get_tensor(TensorUsage.RHS_TRANS),
252
253
254
255
256
257
258
259
260
261
262
        x.shape,
        kernel.shape,
        mu,
        rsigma,
        x,
        gamma,
        beta,
        x_contracting_dims,
        k_contracting_dims,
        use_bias,
        quantizer_set,
263
        flatten_axis,
Alp Dener's avatar
Alp Dener committed
264
        x_bdim,
265
266
267
268
269
270
271
272
273
274
275
    )

    return output, ctx


def _layernorm_dense_bwd_rule(
    norm_type,
    zero_centered_gamma,
    epsilon,
    layernorm_input_axes,
    dot_input_axes,  # pylint: disable=unused-argument
276
    kernel_axes,
Alp Dener's avatar
Alp Dener committed
277
    batch_first,  # pylint: disable=unused-argument
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
    ctx,
    grad,
):
    """Backward pass rule for layernorm_dense.

    Implements the backward pass computation including:
    1. Gradient computation for matrix multiplication
    2. Gradient computation for layer normalization
    3. Gradient computation for bias terms
    4. Proper handling of quantization

    Returns:
        Tuple of gradients for all input parameters
    """
    (
293
294
        casted_ln_out,
        casted_kernel,
295
296
297
298
299
300
301
302
303
304
305
        x_shape,
        kernel_shape,
        mu,
        rsigma,
        x,
        gamma,
        beta,
        x_contracting_dims_in_fwd,
        k_contracting_dims_in_fwd,
        use_bias,
        quantizer_set,
306
        flatten_axis,
Alp Dener's avatar
Alp Dener committed
307
        x_bdim,
308
309
    ) = ctx

310
    casted_grad, dbias = tex.quantize_dbias(
Alp Dener's avatar
Alp Dener committed
311
312
313
314
315
        grad,
        is_dbias=use_bias,
        flatten_axis=flatten_axis,
        quantizer=quantizer_set.dgrad,
        noop_scaled_tensor=True,
316
    )
317
318
319
320
321
322
323
324
325
326
327

    # k_non_contracting_dims calibrated with the shape difference of grad.ndim vs kernel.ndim
    g_constracting_dim = tuple(
        range(grad.ndim - len(kernel_shape) + len(k_contracting_dims_in_fwd), grad.ndim)
    )
    # k_non_contracting_dims
    k_constracting_dim = tuple(
        dim for dim in range(len(kernel_shape)) if dim not in k_contracting_dims_in_fwd
    )

    # NT GEMM
328
329
330
    sequence_dim = get_sequence_parallel_dim(
        layernorm_input_axes, x_contracting_dims_in_fwd, (x_bdim,)
    )
331
    dgrad = tex.gemm(
332
333
        casted_grad.get_tensor(TensorUsage.LHS),
        casted_kernel,
Alp Dener's avatar
Alp Dener committed
334
335
        contracting_dims=(g_constracting_dim, k_constracting_dim),
        batched_dims=((x_bdim,), ()),
336
337
        sequence_parallel_output=sequence_dim is not None and not tex.gemm_uses_jax_dot(),
        sequence_dim=sequence_dim if not tex.gemm_uses_jax_dot() else None,
338
339
340
341
342
343
344
345
346
347
    )

    dgrad = with_sharding_constraint_by_logical_axes(dgrad, layernorm_input_axes)

    g_constracting_dim = x_constracting_dim = tuple(
        range(0, len(x_shape) - len(x_contracting_dims_in_fwd))
    )

    # TN GEMM
    wgrad = tex.gemm(
348
349
        casted_ln_out,
        casted_grad.get_tensor(TensorUsage.RHS),
Alp Dener's avatar
Alp Dener committed
350
351
        contracting_dims=(x_constracting_dim, g_constracting_dim),
        batched_dims=((x_bdim,), (x_bdim,)),
352
353
    )

354
355
    wgrad = with_sharding_constraint_by_logical_axes(wgrad, kernel_axes)

356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
    dx, dgamma, dbeta = tex.normalization_bwd(
        dgrad,
        x,
        mu,
        rsigma,
        gamma,
        beta,
        zero_centered_gamma=zero_centered_gamma,
        epsilon=epsilon,
        norm_type=norm_type,
    )

    return dx, wgrad, dgamma, dbeta, dbias, quantizer_set


_layernorm_dense.defvjp(_layernorm_dense_fwd_rule, _layernorm_dense_bwd_rule)