test_distributed_layernorm_mlp.py 14.6 KB
Newer Older
1
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
#
# See LICENSE for license information.
4
from typing import Callable, Sequence, Union, Optional
5
6
7
8
9
10
import pytest

import jax
import jax.numpy as jnp
import numpy as np
from jax.sharding import Mesh, NamedSharding, PartitionSpec
11
12
13
14
15
16
from utils import (
    assert_allclose,
    assert_tree_like_allclose,
    is_devices_enough,
    pytest_parametrize_wrapper,
)
17

18
19
from transformer_engine.common import recipe
from transformer_engine.jax.quantize import is_fp8_available, ScalingMode
20
21
from transformer_engine.jax import fp8_autocast
from transformer_engine.jax.flax import LayerNormMLP
22
from transformer_engine.jax.layernorm_mlp import layernorm_mlp
23
from transformer_engine.jax.sharding import (
24
25
    HIDDEN_AXES,
    HIDDEN_TP_AXES,
26
    BATCH_AXES,
27
28
29
30
31
32
    SEQLEN_TP_AXES,
    SEQLEN_AXES,
    W_NO_SHARD_AXES,
    W_FSDP_AXES,
    W_TP_AXES,
    W_JOINED_AXES,
33
)
34
from transformer_engine.jax.sharding import MeshResource
35
from transformer_engine.jax.quantize import QuantizerFactory
36
from transformer_engine.jax.cpp_extensions.misc import get_min_device_compute_capability
37

38
39

is_fp8_supported, reason = is_fp8_available()
40
is_mxfp8_supported, reason = is_fp8_available(ScalingMode.MXFP8_1D_SCALING)
41
42
43
44

SUPPORTED_RECIPES = []
if is_fp8_supported:
    SUPPORTED_RECIPES.append(pytest.param(recipe.DelayedScaling(), id="DelayedScaling"))
45
    SUPPORTED_RECIPES.append(pytest.param(recipe.Float8CurrentScaling(), id="CurrentScaling"))
46
47
48
if is_mxfp8_supported:
    SUPPORTED_RECIPES.append(pytest.param(recipe.MXFP8BlockScaling(), id="MXFP8BlockScaling"))

49
DTYPES = [jnp.bfloat16, jnp.float16]
50
INPUT_SHAPE = [[4, 64, 128]]  # [batch, seqlen, hidden_in]
51
52
53
54

LAYERNORM_INPUT_AXES = (BATCH_AXES, SEQLEN_TP_AXES, HIDDEN_AXES)
DOT_1_INPUT_AXES = (BATCH_AXES, SEQLEN_AXES, HIDDEN_AXES)
DOT_2_INPUT_AXES = (BATCH_AXES, SEQLEN_AXES, HIDDEN_TP_AXES)
55
56
57
58
59
60
KERNEL_1_AXES = (W_FSDP_AXES, W_JOINED_AXES, W_TP_AXES)
KERNEL_2_AXES = (W_TP_AXES, W_FSDP_AXES)
LN_SCALE_AXES = (W_NO_SHARD_AXES,)
LN_BIAS_AXES = (W_NO_SHARD_AXES,)
BIAS_1_AXES = (W_JOINED_AXES, W_TP_AXES)
BIAS_2_AXES = (W_NO_SHARD_AXES,)
61
INTERMEDIATE = 64
62

63

64
65
66
67
68
# Only test with FSDP and TP as DP is not used
def generate_fsdp_and_tp_configs():
    configs = []
    if is_devices_enough(2):
        configs.append(
69
70
            [2, (1, 2), ("fsdp", "tp"), MeshResource(fsdp_resource="fsdp", tp_resource="tp")]
        )
71
72
    if is_devices_enough(4):
        configs.append(
73
74
            [4, (2, 2), ("fsdp", "tp"), MeshResource(fsdp_resource="fsdp", tp_resource="tp")]
        )
75
76
77
78
79
80
81
82
83
84
85
86
87
88
    return configs


class TestDistributedLayernormMLP:

    def generate_inputs(self, input_shape, activation_type, use_bias, dtype):
        batch, seqlen, hidden_in = input_shape
        hidden_out = hidden_in

        key = jax.random.PRNGKey(0)
        subkeys = jax.random.split(key, 6)

        x = jax.random.normal(subkeys[0], (batch, seqlen, hidden_in), dtype)
        gamma = jax.random.normal(subkeys[5], (hidden_in,), dtype=dtype)
89
        k1 = jax.random.normal(
90
            subkeys[1], (hidden_in, len(activation_type), INTERMEDIATE), dtype
91
92
93
94
        ) / jnp.sqrt(hidden_in)
        k2 = jax.random.normal(subkeys[2], (INTERMEDIATE, hidden_out), dtype) / jnp.sqrt(
            INTERMEDIATE
        )
95
        if use_bias:
96
            b1 = jax.random.normal(subkeys[3], (len(activation_type), INTERMEDIATE), dtype)
97
98
99
100
101
102
103
            b2 = jax.random.normal(subkeys[4], (hidden_out,), dtype)
        else:
            b1 = None
            b2 = None

        return (x, gamma, k1, k2, b1, b2)

104
105
106
107
108
109
    def layernorm_fp8_mlp_prim_func(
        self,
        x: jnp.ndarray,
        ln_scale: jnp.ndarray,
        kernel_1: jnp.ndarray,
        kernel_2: jnp.ndarray,
110
111
        bias_1: Optional[jnp.ndarray],
        bias_2: Optional[jnp.ndarray],
112
        layernorm_type: str = "rmsnorm",
113
        activation_type: Sequence[Union[str, Callable]] = ("gelu",),
114
115
116
        multi_gpus: bool = False,
    ) -> jnp.ndarray:

117
118
119
120
        if multi_gpus:
            layernorm_input_axes = LAYERNORM_INPUT_AXES
            dot_1_input_axes = DOT_1_INPUT_AXES
            dot_2_input_axes = DOT_2_INPUT_AXES
121
122
            kernel_1_axes = KERNEL_1_AXES
            kernel_2_axes = KERNEL_2_AXES
123
124
        else:
            layernorm_input_axes = None
125
126
            dot_1_input_axes = dot_2_input_axes = None
            kernel_1_axes = kernel_2_axes = None
127

128
129
        quantizer_sets = QuantizerFactory.create_set(n_quantizer_sets=2)

130
131
        # out = ((x * kernel_1) + bias_1) * kernel_2 + bias_2
        return jnp.mean(
132
            layernorm_mlp(
133
134
135
136
137
138
                x,
                ln_scale,
                None,
                [kernel_1, kernel_2],
                [bias_1, bias_2],
                layernorm_type,
139
                norm_input_axes=layernorm_input_axes,
140
141
                dot_1_input_axes=dot_1_input_axes,
                dot_2_input_axes=dot_2_input_axes,
142
143
                kernel_1_axes=kernel_1_axes,
                kernel_2_axes=kernel_2_axes,
144
                activation_type=activation_type,
145
                quantizer_sets=quantizer_sets,
146
147
            )
        )
148

149
150
    def _test_layernorm_mlp_grad(
        self, mesh_config, activation_type, use_bias, input_shape, dtype, fp8_recipe, use_shardy
151
    ):
152
        jax.config.update("jax_use_shardy_partitioner", use_shardy)
153
        device_count, mesh_shape, mesh_axes, mesh_resource = mesh_config
154
        layernorm_type = "rmsnorm"
155

156
157
158
        inputs = [x, gamma, k1, k2, b1, b2] = self.generate_inputs(
            input_shape, activation_type, use_bias, dtype
        )
159
        static_inputs = [layernorm_type, activation_type]
160
161
162
        value_and_grad_func = jax.value_and_grad(
            self.layernorm_fp8_mlp_prim_func, argnums=range(len(inputs))
        )
163
164

        # Single GPU
165
166
167
168
169
        with fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
            single_jitter = jax.jit(
                value_and_grad_func,
                static_argnums=range(len(inputs), len(static_inputs) + len(inputs)),
            )
170
171
172
173
174
            single_fwd, single_grads = single_jitter(*inputs, *static_inputs)

        # Multi GPUs
        devices = np.asarray(jax.devices()[:device_count]).reshape(*mesh_shape)
        mesh = Mesh(devices, mesh_axes)
175
        with mesh, fp8_autocast(enabled=True, fp8_recipe=fp8_recipe, mesh_resource=mesh_resource):
176
            k1_sharding = NamedSharding(mesh, PartitionSpec("fsdp", None, "tp"))
177
            k2_sharding = NamedSharding(mesh, PartitionSpec("tp", "fsdp"))
178
179
180
            k1_ = jax.device_put(k1, k1_sharding)
            k2_ = jax.device_put(k2, k2_sharding)
            if use_bias:
181
                b1_sharding = NamedSharding(mesh, PartitionSpec(None, "tp"))
182
183
184
185
186
187
188
                b1_ = jax.device_put(b1, b1_sharding)
            else:
                b1_sharding = b1_ = None
            multi_inputs = [*inputs[:2], k1_, k2_, b1_, *inputs[5:]]

            # Position ref for sharding pspec lists
            #   x, gamma, k1, k2, b1,
189
            #   b2
190
191
192
193
194
195
196
197
198
199
            in_shardings = (
                None,
                None,
                k1_sharding,
                k2_sharding,
                b1_sharding,
                None,
            )
            out_shardings = (
                None,
200
                (None, None, k1_sharding, k2_sharding, b1_sharding, None),
201
202
203
204
205
206
207
208
            )

            multi_jitter = jax.jit(
                value_and_grad_func,
                in_shardings=in_shardings,
                out_shardings=out_shardings,
                static_argnums=range(len(multi_inputs), len(static_inputs) + len(multi_inputs) + 1),
            )  # +1 for multi_gpus
209
210
211
212
213
214

            multi_fwd, multi_grads = multi_jitter(*multi_inputs, *static_inputs, True)

        assert_allclose(multi_fwd, single_fwd, dtype=dtype)
        for i in range(len(inputs)):
            if multi_grads[i] is not None:
215
216
217
                if isinstance(multi_grads[i], list):
                    assert isinstance(single_grads[i], list)
                    for m_grad, s_grad in zip(multi_grads[i], single_grads[i]):
218
219
220
                        assert_allclose(
                            m_grad, s_grad, dtype=dtype, err_msg=f"multi_grads[{i}] is not close"
                        )
221
                else:
222
223
224
225
226
227
228
                    assert_allclose(
                        multi_grads[i],
                        single_grads[i],
                        dtype=dtype,
                        err_msg=f"multi_grads[{i}] is not close",
                    )

229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
    @pytest.mark.skipif(not is_fp8_supported, reason=reason)
    @pytest_parametrize_wrapper("mesh_config", generate_fsdp_and_tp_configs())
    @pytest_parametrize_wrapper("input_shape", INPUT_SHAPE)
    @pytest_parametrize_wrapper("activation_type", [("gelu",), ("gelu", "linear")])
    @pytest_parametrize_wrapper("dtype", DTYPES)
    @pytest_parametrize_wrapper("use_bias", [True, False])
    @pytest_parametrize_wrapper("fp8_recipe", SUPPORTED_RECIPES)
    def test_layernorm_mlp_grad(
        self, mesh_config, activation_type, use_bias, input_shape, dtype, fp8_recipe
    ):
        self._test_layernorm_mlp_grad(
            mesh_config,
            activation_type,
            use_bias,
            input_shape,
            dtype,
            fp8_recipe,
            use_shardy=False,
        )

    @pytest.mark.skipif(not is_fp8_supported, reason=reason)
    @pytest_parametrize_wrapper("mesh_config", generate_fsdp_and_tp_configs())
    @pytest_parametrize_wrapper("input_shape", INPUT_SHAPE)
    @pytest_parametrize_wrapper("activation_type", [("gelu",), ("gelu", "linear")])
    @pytest_parametrize_wrapper("dtype", DTYPES)
    @pytest_parametrize_wrapper("use_bias", [True, False])
    def test_layernorm_mlp_grad_shardy(
        self, mesh_config, activation_type, use_bias, input_shape, dtype
    ):
        # We don't test block scaling with Shardy because at the time of writing,
        # it is not supported in JAX's scaled_matmul_stablehlo.
        self._test_layernorm_mlp_grad(
            mesh_config,
            activation_type,
            use_bias,
            input_shape,
            dtype,
            fp8_recipe=recipe.DelayedScaling(),
            use_shardy=True,
        )

270
    def _test_layernorm_mlp(
271
272
273
274
275
276
277
278
279
        self,
        mesh_config,
        activation_type,
        use_bias,
        input_shape,
        dtype,
        use_fp8,
        fp8_recipe,
        use_shardy,
280
    ):
281
        jax.config.update("jax_use_shardy_partitioner", use_shardy)
282
        batch, seqlen, hidden_in = input_shape
283
        layernorm_type = "rmsnorm"
284
285
286
287
288

        rng = jax.random.PRNGKey(0)
        subkeys = jax.random.split(rng, 2)

        x = jax.random.normal(subkeys[0], (batch, seqlen, hidden_in), dtype)
289
        init_rngs = {"params": subkeys[1]}
290
291

        # Single GPUs
292
        with fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
293
294
            ln_mlp_single = LayerNormMLP(
                layernorm_type=layernorm_type,
295
                transpose_batch_sequence=False,  # input: [batch, seqlen, hidden]
296
297
298
299
                intermediate_dim=INTERMEDIATE,
                activations=activation_type,
                use_bias=use_bias,
            )
300
            params_single = ln_mlp_single.init(init_rngs, x, deterministic=True)
301
302
303
            mlp_out_single, ln_out_single = ln_mlp_single.apply(
                params_single, x, deterministic=True
            )
304
305
306
307
308

        # Multi GPUs
        device_count, mesh_shape, mesh_axes, mesh_resource = mesh_config
        devices = np.asarray(jax.devices()[:device_count]).reshape(*mesh_shape)
        mesh = Mesh(devices, mesh_axes)
309
310
311
        with mesh, fp8_autocast(
            enabled=use_fp8, fp8_recipe=fp8_recipe, mesh_resource=mesh_resource
        ):
312
313
314
315
316
            ln_mlp_sharded = LayerNormMLP(
                layernorm_type=layernorm_type,
                transpose_batch_sequence=False,
                intermediate_dim=INTERMEDIATE,
                activations=activation_type,
317
318
319
320
                scale_axes=LN_SCALE_AXES,
                ln_bias_axes=LN_BIAS_AXES,
                kernel_axes_1=KERNEL_1_AXES,
                kernel_axes_2=KERNEL_2_AXES,
321
                use_bias=use_bias,
322
323
                bias_axes_1=BIAS_1_AXES,
                bias_axes_2=BIAS_2_AXES,
324
325
326
327
328
                layernorm_input_axes=LAYERNORM_INPUT_AXES,
                dot_1_input_axes=DOT_1_INPUT_AXES,
                dot_2_input_axes=DOT_2_INPUT_AXES,
                name="mlp",
            )
329
            params_sharded = ln_mlp_sharded.init(init_rngs, x, deterministic=True)
330
331
332
            mlp_out_sharded, ln_out_sharded = ln_mlp_sharded.apply(
                params_sharded, x, deterministic=True
            )
333
334

        # Make sure params values are the same
335
        assert_tree_like_allclose(params_sharded["params"], params_single["params"])
336
        assert_allclose(ln_out_sharded, ln_out_single, dtype=dtype)
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351

        atol = None
        rtol = None
        l40_tolerance_update = (
            get_min_device_compute_capability() == 89
            and fp8_recipe == recipe.DelayedScaling()
            and use_fp8
            and dtype == jnp.float16
            and activation_type == ("gelu",)
        )
        if l40_tolerance_update:
            atol = 0.04
            rtol = 11

        assert_allclose(mlp_out_sharded, mlp_out_single, dtype=dtype, atol=atol, rtol=rtol)
352

353
354
355
356
357
    @pytest_parametrize_wrapper("input_shape", INPUT_SHAPE)
    @pytest_parametrize_wrapper("mesh_config", generate_fsdp_and_tp_configs())
    @pytest_parametrize_wrapper("activation_type", [("gelu",), ("silu", "linear")])
    @pytest_parametrize_wrapper("dtype", DTYPES)
    @pytest_parametrize_wrapper("use_bias", [True, False])
358
359
360
361
    @pytest_parametrize_wrapper("use_shardy", [False, True])
    def test_layernorm_mlp_layer(
        self, mesh_config, activation_type, use_bias, input_shape, dtype, use_shardy
    ):
362
        self._test_layernorm_mlp(
363
364
365
366
367
368
369
370
            mesh_config,
            activation_type,
            use_bias,
            input_shape,
            dtype,
            use_fp8=False,
            fp8_recipe=None,
            use_shardy=use_shardy,
371
        )
372

373
374
375
376
377
378
379
    @pytest.mark.skipif(not is_fp8_supported, reason=reason)
    @pytest_parametrize_wrapper("mesh_config", generate_fsdp_and_tp_configs())
    @pytest_parametrize_wrapper("activation_type", [("gelu",), ("gelu", "linear")])
    @pytest_parametrize_wrapper("use_bias", [True, False])
    @pytest_parametrize_wrapper("input_shape", INPUT_SHAPE)
    @pytest_parametrize_wrapper("dtype", DTYPES)
    @pytest_parametrize_wrapper("fp8_recipe", SUPPORTED_RECIPES)
380
    def test_layernorm_mlp_layer_fp8(
381
382
383
384
385
386
387
388
389
390
        self, mesh_config, activation_type, use_bias, input_shape, dtype, fp8_recipe
    ):
        self._test_layernorm_mlp(
            mesh_config,
            activation_type,
            use_bias,
            input_shape,
            dtype,
            use_fp8=True,
            fp8_recipe=fp8_recipe,
391
            use_shardy=False,
392
        )