layernorm_mlp.py 16.1 KB
Newer Older
1
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
#
# See LICENSE for license information.
4
5
6
7
8
9
10
11
12
13
14
"""Multi-layer perceptron (MLP) operations with layer normalization for Transformer Engine in JAX.

This module provides optimized implementations of MLP blocks commonly used in transformer
architectures. Each MLP block consists of:
1. Layer normalization
2. First dense layer transformation (GEMM1) with bias and activation
3. Second dense layer transformation (GEMM2) with bias

The implementation supports various normalization types, activation functions,
quantization, and distributed training through sharding constraints.
"""
15

16
from typing import List, Tuple, Sequence, Union, Callable
17
from functools import partial
18
19
20

import jax
import jax.numpy as jnp
21
from jax.ad_checkpoint import checkpoint_name
22

23
from . import cpp_extensions as tex
24
from .layernorm import canonicalize_norm_type
25
26
27
28
29
from .quantize import (
    with_sharding_constraint_by_logical_axes,
    QuantizerSet,
    noop_quantizer_set,
    TensorUsage,
30
    get_quantize_config,
31
)
Alp Dener's avatar
Alp Dener committed
32
33


34
def layernorm_mlp(
35
36
37
38
39
    x: jnp.ndarray,
    gamma: jnp.ndarray,
    beta: jnp.ndarray,
    kernels: List[jnp.ndarray],
    biases: List[jnp.ndarray],
40
    norm_type: str,
41
42
    zero_centered_gamma: bool = False,
    epsilon: float = 1e-6,
43
    norm_input_axes: Tuple[str, ...] = None,
44
45
    dot_1_input_axes: Tuple[str, ...] = None,
    dot_2_input_axes: Tuple[str, ...] = None,
46
47
    kernel_1_axes: Tuple[str, ...] = None,
    kernel_2_axes: Tuple[str, ...] = None,
48
49
50
    ffn1_ckpt_name: str = "ffn1",
    ffn2_ckpt_name: str = "ffn2",
    activation_type: Sequence[Union[str, Callable]] = ("gelu",),
51
    quantizer_sets: Tuple[QuantizerSet] = (noop_quantizer_set, noop_quantizer_set),
52
) -> jnp.ndarray:
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
    """Apply layer normalization followed by MLP block.

    This function implements the following sequence of operations:
        1. Layer normalization: (x - mean) / sqrt(var + epsilon) * gamma + beta
        2. First dense layer transformation: y1 = x * kernel1 + bias1
        3. Activation function: y2 = activation(y1)
        4. Second dense layer transformation: y3 = y2 * kernel2 + bias2

    Args:
        x: Input tensor with shape [batch..., hidden_in]
        gamma: Scale parameter for normalization with shape [hidden_in]
        beta: Bias parameter for normalization with shape [hidden_in]
        kernels: List of two weight matrices:
            - kernel1: [hidden_in, intermediate]
            - kernel2: [intermediate, hidden_in]
        biases: List of two bias terms:
            - bias1: [intermediate]
            - bias2: [hidden_in]
        norm_type: Type of normalization ("layernorm" or "rmsnorm")
        zero_centered_gamma: Whether to use zero-centered gamma for normalization
        epsilon: Small constant for numerical stability in normalization
        norm_input_axes: Logical axes for sharding the layernorm input
        dot_1_input_axes: Logical axes for sharding the first matrix multiplication
        dot_2_input_axes: Logical axes for sharding the second matrix multiplication
77
78
        kernel_1_axes: Logical axes for sharding the first weight matrix
        kernel_2_axes: Logical axes for sharding the second weight matrix
79
80
81
82
83
84
85
86
87
88
89
90
91
92
        ffn1_ckpt_name: Name for checkpointing the first feed-forward network
        ffn2_ckpt_name: Name for checkpointing the second feed-forward network
        activation_type: Activation function(s) to apply after the first dense layer transformation
        quantizer_sets: Tuple of two quantizer sets for the two dense layer transformations

    Returns:
        Output tensor with shape [batch..., hidden_in]

    Note:
        - For RMSNorm (norm_type="rmsnorm"), beta must be None and zero_centered_gamma
          must be False
        - The function supports automatic differentiation through JAX's custom VJP
        - Quantization is applied to both dense layer transformations
        - Checkpointing is applied to both feed-forward networks for memory efficiency
93
94
95
96
97
98
99
100
    """
    assert len(kernels) == 2

    kernel_1 = kernels[0]
    kernel_2 = kernels[1]
    bias_1 = biases[0]
    bias_2 = biases[1]

101
102
103
    norm_type = canonicalize_norm_type(norm_type)
    if norm_type == "rmsnorm":
        assert beta is None, "beta should be None if norm_type is 'rmsnorm'"
104
105
        assert (
            not zero_centered_gamma
106
        ), "zero_centered_gamma is not supported if norm_type is 'rmsnorm'"
107

108
109
110
111
112
    if not get_quantize_config().is_fp8_enabled():
        input_dtype = x.dtype
        kernel_1 = kernel_1.astype(input_dtype)
        kernel_2 = kernel_2.astype(input_dtype)

113
    output = _layernorm_mlp(
114
115
116
117
118
119
120
        x,
        gamma,
        beta,
        kernel_1,
        kernel_2,
        bias_1,
        bias_2,
121
        norm_type,
122
123
        zero_centered_gamma,
        epsilon,
124
        norm_input_axes,
125
126
        dot_1_input_axes,
        dot_2_input_axes,
127
128
        kernel_1_axes,
        kernel_2_axes,
129
130
131
        ffn1_ckpt_name,
        ffn2_ckpt_name,
        activation_type,
132
        quantizer_sets,
133
    )
134
135
136
    return output


137
@partial(jax.custom_vjp, nondiff_argnums=(7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17))
138
def _layernorm_mlp(
139
140
141
142
143
144
145
    x: jnp.ndarray,
    gamma: jnp.ndarray,
    beta: jnp.ndarray,
    kernel_1: jnp.ndarray,
    kernel_2: jnp.ndarray,
    bias_1: jnp.ndarray,
    bias_2: jnp.ndarray,
146
    norm_type: str,
147
148
    zero_centered_gamma: bool,
    epsilon: float,
149
    norm_input_axes: Tuple[str, ...],
150
151
    dot_1_input_axes: Tuple[str, ...],
    dot_2_input_axes: Tuple[str, ...],
152
153
    kernel_1_axes: Tuple[str, ...],
    kernel_2_axes: Tuple[str, ...],
154
155
156
    ffn1_ckpt_name: str,
    ffn2_ckpt_name: str,
    activation_type: Sequence[Union[str, Callable]],
157
    quantizer_sets,
158
):
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
    """Internal implementation of layernorm_mlp with custom VJP.

    This function implements the forward pass of layernorm_mlp with support for
    automatic differentiation. It handles the normalization, dense layer transformations,
    activation, and quantization operations.

    Args:
        x: Input tensor
        gamma: Scale parameter for normalization
        beta: Bias parameter for normalization
        kernel_1: First weight matrix
        kernel_2: Second weight matrix
        bias_1: First bias term
        bias_2: Second bias term
        norm_type: Type of normalization
        zero_centered_gamma: Whether to use zero-centered gamma
        epsilon: Small constant for numerical stability
        norm_input_axes: Logical axes for layernorm sharding
        dot_1_input_axes: Logical axes for first matrix multiplication sharding
        dot_2_input_axes: Logical axes for second matrix multiplication sharding
        ffn1_ckpt_name: Name for first feed-forward network checkpointing
        ffn2_ckpt_name: Name for second feed-forward network checkpointing
        activation_type: Activation function(s)
        quantizer_sets: Tuple of quantizer sets

    Returns:
        Output tensor from the combined operations
    """
    output, _ = _layernorm_mlp_fwd_rule(
188
189
190
191
192
193
194
        x,
        gamma,
        beta,
        kernel_1,
        kernel_2,
        bias_1,
        bias_2,
195
        norm_type,
196
197
        zero_centered_gamma,
        epsilon,
198
        norm_input_axes,
199
200
        dot_1_input_axes,
        dot_2_input_axes,
201
202
        kernel_1_axes,
        kernel_2_axes,
203
        ffn1_ckpt_name,
204
205
        ffn2_ckpt_name,
        activation_type,
206
        quantizer_sets,
207
208
209
210
    )
    return output


211
def _layernorm_mlp_fwd_rule(
212
213
214
215
216
217
218
    x,
    gamma,
    beta,
    kernel_1,
    kernel_2,
    bias_1,
    bias_2,
219
    norm_type,
220
221
    zero_centered_gamma,
    epsilon,
222
    norm_input_axes,
223
224
    dot_1_input_axes,
    dot_2_input_axes,
225
226
    kernel_1_axes,
    kernel_2_axes,
227
228
229
    ffn1_ckpt_name,
    ffn2_ckpt_name,
    activation_type,
230
    quantizer_sets,
231
):
232
233
234
235
236
237
238
239
240
241
242
243
244
245
    """Forward pass rule for layernorm_mlp.

    Implements the forward pass computation including:
    1. Layer normalization with quantization
    2. First matrix multiplication with quantized kernel
    3. Activation function application
    4. Second matrix multiplication with quantized kernel
    5. Optional bias additions
    6. Sharding constraints
    7. Checkpointing for memory efficiency

    Returns:
        Tuple of (output, context) for automatic differentiation
    """
246
    del kernel_1_axes, kernel_2_axes
247

248
    ffn1_quantizer_set, ffn2_quantizer_set = quantizer_sets
249
250

    # x should be in shape of (batch..., hidden)
251
    # Kernel_1 should be in shape of (hidden_in, activation_len, intermediate)
252
    # Kernel_2 should be in shape of (intermediate, hidden_in)
253
    assert len(kernel_1.shape) == 3
254
    assert len(kernel_2.shape) == 2
255
    assert kernel_1.shape[-2] == len(activation_type)
256
257

    x_contracting_dims = (len(x.shape) - 1,)
258
    k_contracting_dims = (0,)
259

260
    assert x.shape[x_contracting_dims[0]] == kernel_1.shape[k_contracting_dims[0]]
261

262
263
264
265
266
267
268
269
270
271
272
273
274
    use_bias_1 = bias_1 is not None
    use_bias_2 = bias_1 is not None

    x = with_sharding_constraint_by_logical_axes(x, norm_input_axes)

    casted_ln_out, mu, rsigma = tex.normalization_fwd(
        x,
        gamma,
        beta,
        zero_centered_gamma,
        epsilon,
        norm_type,
        quantizer=ffn1_quantizer_set.x,
275
    )
276
    casted_ln_out = with_sharding_constraint_by_logical_axes(casted_ln_out, dot_1_input_axes)
277

Alp Dener's avatar
Alp Dener committed
278
    casted_kernel_1 = tex.quantize(
279
280
281
        kernel_1,
        flatten_axis=-2,
        quantizer=ffn1_quantizer_set.kernel,
Alp Dener's avatar
Alp Dener committed
282
    )
283

284
    # NN GEMM
285
    # (batch..., hidden_in) x (hidden_in, hidden_out)
286
    dot_1_output = tex.gemm(
287
288
        casted_ln_out.get_tensor(TensorUsage.LHS),
        casted_kernel_1.get_tensor(TensorUsage.RHS),
Alp Dener's avatar
Alp Dener committed
289
290
291
        contracting_dims=(x_contracting_dims, k_contracting_dims),
        bias=bias_1 if not tex.gemm_uses_jax_dot() else None,
        fuse_bias=use_bias_1 if not tex.gemm_uses_jax_dot() else False,
292
    )
293

Alp Dener's avatar
Alp Dener committed
294
    if use_bias_1 and tex.gemm_uses_jax_dot():
295
296
297
        bias_1_shape = bias_1.shape
        bias_1_new_shape = (1,) * (dot_1_output.ndim - bias_1.ndim) + bias_1_shape
        dot_1_output += jnp.reshape(bias_1, bias_1_new_shape)
298

299
300
301
302
303
304
305
    # This sharding constraint is needed to correct the Shardy sharding propagation
    if dot_2_input_axes is not None:
        dot_1_output_axes = (
            dot_2_input_axes[:-1] + (None,) + dot_2_input_axes[-1:]
        )  # add the act_num axis
        dot_1_output = with_sharding_constraint_by_logical_axes(dot_1_output, dot_1_output_axes)

306
    dot_1_output = checkpoint_name(dot_1_output, ffn1_ckpt_name)
307
308

    # (batch..., hidden_in) -> (batch..., hidden)
Alp Dener's avatar
Alp Dener committed
309
    casted_act_out = tex.act_lu(
310
311
312
        dot_1_output,
        activation_type,
        quantizer=ffn2_quantizer_set.x,
Alp Dener's avatar
Alp Dener committed
313
    )
314

315
    casted_act_out = with_sharding_constraint_by_logical_axes(casted_act_out, dot_2_input_axes)
316

Alp Dener's avatar
Alp Dener committed
317
    casted_kernel_2 = tex.quantize(
318
319
        kernel_2,
        quantizer=ffn2_quantizer_set.kernel,
Alp Dener's avatar
Alp Dener committed
320
    )
321

322
    # NN GEMM
323
    # (batch..., hidden_in) x (hidden_out, hidden_in)
324
    dot_2_output = tex.gemm(
325
326
        casted_act_out.get_tensor(TensorUsage.LHS),
        casted_kernel_2.get_tensor(TensorUsage.RHS),
Alp Dener's avatar
Alp Dener committed
327
328
329
        contracting_dims=(x_contracting_dims, k_contracting_dims),
        bias=bias_2 if not tex.gemm_uses_jax_dot() else None,
        fuse_bias=use_bias_2 if not tex.gemm_uses_jax_dot() else False,
330
    )
331

Alp Dener's avatar
Alp Dener committed
332
    if use_bias_2 and tex.gemm_uses_jax_dot():
333
334
335
        bias_2_shape = bias_2.shape
        bias_2_new_shape = (1,) * (dot_2_output.ndim - bias_2.ndim) + bias_2_shape
        dot_2_output += jnp.reshape(bias_2, bias_2_new_shape)
336

337
338
    dot_2_output = checkpoint_name(dot_2_output, ffn2_ckpt_name)

339
340
341
342
343
    ctx = (
        x,
        mu,
        rsigma,
        gamma,
344
        beta,
345
346
        casted_ln_out.get_tensor(TensorUsage.LHS_TRANS),
        casted_kernel_1.get_tensor(TensorUsage.RHS_TRANS),
347
        dot_1_output,
348
349
        casted_act_out.get_tensor(TensorUsage.LHS_TRANS),
        casted_kernel_2.get_tensor(TensorUsage.RHS_TRANS),
350
        x_contracting_dims,
351
352
353
354
355
356
        k_contracting_dims,
        kernel_1.shape,
        kernel_2.shape,
        use_bias_1,
        use_bias_2,
        quantizer_sets,
357
    )
358
359
360
361

    return dot_2_output, ctx


362
363
def _layernorm_mlp_bwd_rule(
    norm_type,
364
365
    zero_centered_gamma,
    epsilon,
366
    norm_input_axes,
367
368
    dot_1_input_axes,
    dot_2_input_axes,
369
370
371
372
    kernel_1_axes,
    kernel_2_axes,
    ffn1_ckpt_name,
    ffn2_ckpt_name,
373
374
375
376
    activation_type,
    ctx,
    grad,
):
377
378
379
380
381
382
383
384
385
386
387
388
389
    """Backward pass rule for layernorm_mlp.

    Implements the backward pass computation including:
    1. Gradient computation for second matrix multiplication
    2. Gradient computation for activation function
    3. Gradient computation for first matrix multiplication
    4. Gradient computation for layer normalization
    5. Gradient computation for bias terms
    6. Proper handling of quantization

    Returns:
        Tuple of gradients for all input parameters
    """
390
    del norm_input_axes, ffn1_ckpt_name, ffn2_ckpt_name
391
392
393
394
395
    (
        x,
        mu,
        rsigma,
        gamma,
396
        beta,
397
398
        casted_ln_out,
        casted_kernel_1,
399
        dot_1_output,
400
401
        casted_act_out,
        casted_kernel_2,
402
403
404
405
406
407
408
        x_contracting_dims_in_fwd,
        k_contracting_dims_in_fwd,
        kernel_1_shape,
        kernel_2_shape,
        use_bias_1,
        use_bias_2,
        quantizer_sets,
409
    ) = ctx
410

411
    ffn1_quantizer_set, ffn2_quantizer_set = quantizer_sets
412
413
414

    # Since the sharding of outputs should be the same as dot_1's input
    grad = with_sharding_constraint_by_logical_axes(grad, dot_1_input_axes)
415
416

    casted_grad, dbias_2 = tex.quantize_dbias(
417
418
419
        grad,
        is_dbias=use_bias_2,
        quantizer=ffn1_quantizer_set.dgrad,
420
    )
421

422
    # k_non_contracting_dims calibrated with the shape difference of grad.ndim vs kernel_1.ndim
423
    g_contracting_dims_2 = tuple(
424
425
426
        range(grad.ndim - len(kernel_2_shape) + len(k_contracting_dims_in_fwd), grad.ndim)
    )
    # k_non_contracting_dims
427
    k_contracting_dims_2 = tuple(
428
        dim for dim in range(len(kernel_2_shape)) if dim not in k_contracting_dims_in_fwd
429
    )
430

431
    # NT GEMM
432
    # (batch..., hidden_out) x (hidden_in, hidden_out)
433
    dgrad_2 = tex.gemm(
434
435
        casted_grad.get_tensor(TensorUsage.LHS),
        casted_kernel_2,
Alp Dener's avatar
Alp Dener committed
436
        contracting_dims=(g_contracting_dims_2, k_contracting_dims_2),
437
    )
438
439
440

    dgrad_2 = with_sharding_constraint_by_logical_axes(dgrad_2, dot_2_input_axes)

441
    x_contracting_dims = g_contracting_dims = tuple(
442
        range(0, len(x.shape) - len(x_contracting_dims_in_fwd))
443
444
    )

445
446
447
    # TN GEMM
    # (hidden, batch...,) x (hidden, batch...)
    wgrad_2 = tex.gemm(
448
449
        casted_act_out,
        casted_grad.get_tensor(TensorUsage.RHS),
Alp Dener's avatar
Alp Dener committed
450
        contracting_dims=(x_contracting_dims, g_contracting_dims),
451
    )
452
    wgrad_2 = with_sharding_constraint_by_logical_axes(wgrad_2, kernel_2_axes)
453

454
455
456
457
458
459
    casted_dact_out, dbias_1 = tex.quantize_dact_dbias(
        dgrad_2,
        dot_1_output,
        activation_type=activation_type,
        is_dbias=use_bias_1,
        quantizer=ffn2_quantizer_set.dgrad,
460
    )
461
462

    # k_non_contracting_dims calibrated with the shape difference of grad.ndim vs kernel_1.ndim
463
    dact_out_ndim = casted_dact_out.get_tensor(TensorUsage.LHS).data.ndim
464
465
    g_contracting_dims_1 = tuple(
        range(dact_out_ndim - len(kernel_1_shape) + len(k_contracting_dims_in_fwd), dact_out_ndim)
466
    )
467
    # k_non_contracting_dims
468
    k_contracting_dims_1 = tuple(
469
        dim for dim in range(len(kernel_1_shape)) if dim not in k_contracting_dims_in_fwd
470
    )
471
472
473

    # NT GEMM
    dgrad_1 = tex.gemm(
474
475
        casted_dact_out.get_tensor(TensorUsage.LHS),
        casted_kernel_1,
Alp Dener's avatar
Alp Dener committed
476
        contracting_dims=(g_contracting_dims_1, k_contracting_dims_1),
477
    )
478

479
    dgrad_1 = with_sharding_constraint_by_logical_axes(dgrad_1, dot_1_input_axes)
480
481
482
483

    # TN GEMM
    # (hidden, batch...) x (hidden, batch...)
    wgrad_1 = tex.gemm(
484
485
        casted_ln_out,
        casted_dact_out.get_tensor(TensorUsage.RHS),
Alp Dener's avatar
Alp Dener committed
486
        contracting_dims=(x_contracting_dims, g_contracting_dims),
487
    )
488

489
490
    wgrad_1 = with_sharding_constraint_by_logical_axes(wgrad_1, kernel_1_axes)

491
492
493
494
495
496
497
498
499
500
    dx, dgamma, dbeta = tex.normalization_bwd(
        dgrad_1,
        x,
        mu,
        rsigma,
        gamma,
        beta,
        zero_centered_gamma=zero_centered_gamma,
        epsilon=epsilon,
        norm_type=norm_type,
501
    )
502

503
504
    return (dx, dgamma, dbeta, wgrad_1, wgrad_2, dbias_1, dbias_2, quantizer_sets)

505

506
_layernorm_mlp.defvjp(_layernorm_mlp_fwd_rule, _layernorm_mlp_bwd_rule)