test_distributed_layernorm_mlp.py 14.1 KB
Newer Older
1
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
#
# See LICENSE for license information.
4
from typing import Callable, Sequence, Union, Optional
5
6
7
8
9
10
import pytest

import jax
import jax.numpy as jnp
import numpy as np
from jax.sharding import Mesh, NamedSharding, PartitionSpec
11
12
13
14
15
16
from utils import (
    assert_allclose,
    assert_tree_like_allclose,
    is_devices_enough,
    pytest_parametrize_wrapper,
)
17

18
19
from transformer_engine.common import recipe
from transformer_engine.jax.quantize import is_fp8_available, ScalingMode
20
21
from transformer_engine.jax import fp8_autocast
from transformer_engine.jax.flax import LayerNormMLP
22
from transformer_engine.jax.layernorm_mlp import layernorm_mlp
23
from transformer_engine.jax.sharding import (
24
25
    HIDDEN_AXES,
    HIDDEN_TP_AXES,
26
    BATCH_AXES,
27
28
29
30
31
32
    SEQLEN_TP_AXES,
    SEQLEN_AXES,
    W_NO_SHARD_AXES,
    W_FSDP_AXES,
    W_TP_AXES,
    W_JOINED_AXES,
33
)
34
from transformer_engine.jax.sharding import MeshResource
35
from transformer_engine.jax.quantize import QuantizerFactory
36

37
38

is_fp8_supported, reason = is_fp8_available()
39
is_mxfp8_supported, reason = is_fp8_available(ScalingMode.MXFP8_1D_SCALING)
40
41
42
43

SUPPORTED_RECIPES = []
if is_fp8_supported:
    SUPPORTED_RECIPES.append(pytest.param(recipe.DelayedScaling(), id="DelayedScaling"))
44
    SUPPORTED_RECIPES.append(pytest.param(recipe.Float8CurrentScaling(), id="CurrentScaling"))
45
46
47
if is_mxfp8_supported:
    SUPPORTED_RECIPES.append(pytest.param(recipe.MXFP8BlockScaling(), id="MXFP8BlockScaling"))

48
DTYPES = [jnp.bfloat16, jnp.float16]
49
INPUT_SHAPE = [[4, 64, 128]]  # [batch, seqlen, hidden_in]
50
51
52
53

LAYERNORM_INPUT_AXES = (BATCH_AXES, SEQLEN_TP_AXES, HIDDEN_AXES)
DOT_1_INPUT_AXES = (BATCH_AXES, SEQLEN_AXES, HIDDEN_AXES)
DOT_2_INPUT_AXES = (BATCH_AXES, SEQLEN_AXES, HIDDEN_TP_AXES)
54
55
56
57
58
59
KERNEL_1_AXES = (W_FSDP_AXES, W_JOINED_AXES, W_TP_AXES)
KERNEL_2_AXES = (W_TP_AXES, W_FSDP_AXES)
LN_SCALE_AXES = (W_NO_SHARD_AXES,)
LN_BIAS_AXES = (W_NO_SHARD_AXES,)
BIAS_1_AXES = (W_JOINED_AXES, W_TP_AXES)
BIAS_2_AXES = (W_NO_SHARD_AXES,)
60
INTERMEDIATE = 64
61

62

63
64
65
66
67
# Only test with FSDP and TP as DP is not used
def generate_fsdp_and_tp_configs():
    configs = []
    if is_devices_enough(2):
        configs.append(
68
69
            [2, (1, 2), ("fsdp", "tp"), MeshResource(fsdp_resource="fsdp", tp_resource="tp")]
        )
70
71
    if is_devices_enough(4):
        configs.append(
72
73
            [4, (2, 2), ("fsdp", "tp"), MeshResource(fsdp_resource="fsdp", tp_resource="tp")]
        )
74
75
76
77
78
79
80
81
82
83
84
85
86
87
    return configs


class TestDistributedLayernormMLP:

    def generate_inputs(self, input_shape, activation_type, use_bias, dtype):
        batch, seqlen, hidden_in = input_shape
        hidden_out = hidden_in

        key = jax.random.PRNGKey(0)
        subkeys = jax.random.split(key, 6)

        x = jax.random.normal(subkeys[0], (batch, seqlen, hidden_in), dtype)
        gamma = jax.random.normal(subkeys[5], (hidden_in,), dtype=dtype)
88
        k1 = jax.random.normal(
89
            subkeys[1], (hidden_in, len(activation_type), INTERMEDIATE), dtype
90
91
92
93
        ) / jnp.sqrt(hidden_in)
        k2 = jax.random.normal(subkeys[2], (INTERMEDIATE, hidden_out), dtype) / jnp.sqrt(
            INTERMEDIATE
        )
94
        if use_bias:
95
            b1 = jax.random.normal(subkeys[3], (len(activation_type), INTERMEDIATE), dtype)
96
97
98
99
100
101
102
            b2 = jax.random.normal(subkeys[4], (hidden_out,), dtype)
        else:
            b1 = None
            b2 = None

        return (x, gamma, k1, k2, b1, b2)

103
104
105
106
107
108
    def layernorm_fp8_mlp_prim_func(
        self,
        x: jnp.ndarray,
        ln_scale: jnp.ndarray,
        kernel_1: jnp.ndarray,
        kernel_2: jnp.ndarray,
109
110
        bias_1: Optional[jnp.ndarray],
        bias_2: Optional[jnp.ndarray],
111
        layernorm_type: str = "rmsnorm",
112
        activation_type: Sequence[Union[str, Callable]] = ("gelu",),
113
114
115
        multi_gpus: bool = False,
    ) -> jnp.ndarray:

116
117
118
119
        if multi_gpus:
            layernorm_input_axes = LAYERNORM_INPUT_AXES
            dot_1_input_axes = DOT_1_INPUT_AXES
            dot_2_input_axes = DOT_2_INPUT_AXES
120
121
            kernel_1_axes = KERNEL_1_AXES
            kernel_2_axes = KERNEL_2_AXES
122
123
        else:
            layernorm_input_axes = None
124
125
            dot_1_input_axes = dot_2_input_axes = None
            kernel_1_axes = kernel_2_axes = None
126

127
128
        quantizer_sets = QuantizerFactory.create_set(n_quantizer_sets=2)

129
130
        # out = ((x * kernel_1) + bias_1) * kernel_2 + bias_2
        return jnp.mean(
131
            layernorm_mlp(
132
133
134
135
136
137
                x,
                ln_scale,
                None,
                [kernel_1, kernel_2],
                [bias_1, bias_2],
                layernorm_type,
138
                norm_input_axes=layernorm_input_axes,
139
140
                dot_1_input_axes=dot_1_input_axes,
                dot_2_input_axes=dot_2_input_axes,
141
142
                kernel_1_axes=kernel_1_axes,
                kernel_2_axes=kernel_2_axes,
143
                activation_type=activation_type,
144
                quantizer_sets=quantizer_sets,
145
146
            )
        )
147

148
149
    def _test_layernorm_mlp_grad(
        self, mesh_config, activation_type, use_bias, input_shape, dtype, fp8_recipe, use_shardy
150
    ):
151
        jax.config.update("jax_use_shardy_partitioner", use_shardy)
152
        device_count, mesh_shape, mesh_axes, mesh_resource = mesh_config
153
        layernorm_type = "rmsnorm"
154

155
156
157
        inputs = [x, gamma, k1, k2, b1, b2] = self.generate_inputs(
            input_shape, activation_type, use_bias, dtype
        )
158
        static_inputs = [layernorm_type, activation_type]
159
160
161
        value_and_grad_func = jax.value_and_grad(
            self.layernorm_fp8_mlp_prim_func, argnums=range(len(inputs))
        )
162
163

        # Single GPU
164
165
166
167
168
        with fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
            single_jitter = jax.jit(
                value_and_grad_func,
                static_argnums=range(len(inputs), len(static_inputs) + len(inputs)),
            )
169
170
171
172
173
            single_fwd, single_grads = single_jitter(*inputs, *static_inputs)

        # Multi GPUs
        devices = np.asarray(jax.devices()[:device_count]).reshape(*mesh_shape)
        mesh = Mesh(devices, mesh_axes)
174
        with mesh, fp8_autocast(enabled=True, fp8_recipe=fp8_recipe, mesh_resource=mesh_resource):
175
            k1_sharding = NamedSharding(mesh, PartitionSpec("fsdp", None, "tp"))
176
            k2_sharding = NamedSharding(mesh, PartitionSpec("tp", "fsdp"))
177
178
179
            k1_ = jax.device_put(k1, k1_sharding)
            k2_ = jax.device_put(k2, k2_sharding)
            if use_bias:
180
                b1_sharding = NamedSharding(mesh, PartitionSpec(None, "tp"))
181
182
183
184
185
186
187
                b1_ = jax.device_put(b1, b1_sharding)
            else:
                b1_sharding = b1_ = None
            multi_inputs = [*inputs[:2], k1_, k2_, b1_, *inputs[5:]]

            # Position ref for sharding pspec lists
            #   x, gamma, k1, k2, b1,
188
            #   b2
189
190
191
192
193
194
195
196
197
198
            in_shardings = (
                None,
                None,
                k1_sharding,
                k2_sharding,
                b1_sharding,
                None,
            )
            out_shardings = (
                None,
199
                (None, None, k1_sharding, k2_sharding, b1_sharding, None),
200
201
202
203
204
205
206
207
            )

            multi_jitter = jax.jit(
                value_and_grad_func,
                in_shardings=in_shardings,
                out_shardings=out_shardings,
                static_argnums=range(len(multi_inputs), len(static_inputs) + len(multi_inputs) + 1),
            )  # +1 for multi_gpus
208
209
210
211
212
213

            multi_fwd, multi_grads = multi_jitter(*multi_inputs, *static_inputs, True)

        assert_allclose(multi_fwd, single_fwd, dtype=dtype)
        for i in range(len(inputs)):
            if multi_grads[i] is not None:
214
215
216
                if isinstance(multi_grads[i], list):
                    assert isinstance(single_grads[i], list)
                    for m_grad, s_grad in zip(multi_grads[i], single_grads[i]):
217
218
219
                        assert_allclose(
                            m_grad, s_grad, dtype=dtype, err_msg=f"multi_grads[{i}] is not close"
                        )
220
                else:
221
222
223
224
225
226
227
                    assert_allclose(
                        multi_grads[i],
                        single_grads[i],
                        dtype=dtype,
                        err_msg=f"multi_grads[{i}] is not close",
                    )

228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
    @pytest.mark.skipif(not is_fp8_supported, reason=reason)
    @pytest_parametrize_wrapper("mesh_config", generate_fsdp_and_tp_configs())
    @pytest_parametrize_wrapper("input_shape", INPUT_SHAPE)
    @pytest_parametrize_wrapper("activation_type", [("gelu",), ("gelu", "linear")])
    @pytest_parametrize_wrapper("dtype", DTYPES)
    @pytest_parametrize_wrapper("use_bias", [True, False])
    @pytest_parametrize_wrapper("fp8_recipe", SUPPORTED_RECIPES)
    def test_layernorm_mlp_grad(
        self, mesh_config, activation_type, use_bias, input_shape, dtype, fp8_recipe
    ):
        self._test_layernorm_mlp_grad(
            mesh_config,
            activation_type,
            use_bias,
            input_shape,
            dtype,
            fp8_recipe,
            use_shardy=False,
        )

    @pytest.mark.skipif(not is_fp8_supported, reason=reason)
    @pytest_parametrize_wrapper("mesh_config", generate_fsdp_and_tp_configs())
    @pytest_parametrize_wrapper("input_shape", INPUT_SHAPE)
    @pytest_parametrize_wrapper("activation_type", [("gelu",), ("gelu", "linear")])
    @pytest_parametrize_wrapper("dtype", DTYPES)
    @pytest_parametrize_wrapper("use_bias", [True, False])
    def test_layernorm_mlp_grad_shardy(
        self, mesh_config, activation_type, use_bias, input_shape, dtype
    ):
        # We don't test block scaling with Shardy because at the time of writing,
        # it is not supported in JAX's scaled_matmul_stablehlo.
        self._test_layernorm_mlp_grad(
            mesh_config,
            activation_type,
            use_bias,
            input_shape,
            dtype,
            fp8_recipe=recipe.DelayedScaling(),
            use_shardy=True,
        )

269
    def _test_layernorm_mlp(
270
271
272
273
274
275
276
277
278
        self,
        mesh_config,
        activation_type,
        use_bias,
        input_shape,
        dtype,
        use_fp8,
        fp8_recipe,
        use_shardy,
279
    ):
280
        jax.config.update("jax_use_shardy_partitioner", use_shardy)
281
        batch, seqlen, hidden_in = input_shape
282
        layernorm_type = "rmsnorm"
283
284
285
286
287

        rng = jax.random.PRNGKey(0)
        subkeys = jax.random.split(rng, 2)

        x = jax.random.normal(subkeys[0], (batch, seqlen, hidden_in), dtype)
288
        init_rngs = {"params": subkeys[1]}
289
290

        # Single GPUs
291
        with fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
292
293
            ln_mlp_single = LayerNormMLP(
                layernorm_type=layernorm_type,
294
                transpose_batch_sequence=False,  # input: [batch, seqlen, hidden]
295
296
297
298
                intermediate_dim=INTERMEDIATE,
                activations=activation_type,
                use_bias=use_bias,
            )
299
            params_single = ln_mlp_single.init(init_rngs, x, deterministic=True)
300
301
302
            mlp_out_single, ln_out_single = ln_mlp_single.apply(
                params_single, x, deterministic=True
            )
303
304
305
306
307

        # Multi GPUs
        device_count, mesh_shape, mesh_axes, mesh_resource = mesh_config
        devices = np.asarray(jax.devices()[:device_count]).reshape(*mesh_shape)
        mesh = Mesh(devices, mesh_axes)
308
309
310
        with mesh, fp8_autocast(
            enabled=use_fp8, fp8_recipe=fp8_recipe, mesh_resource=mesh_resource
        ):
311
312
313
314
315
            ln_mlp_sharded = LayerNormMLP(
                layernorm_type=layernorm_type,
                transpose_batch_sequence=False,
                intermediate_dim=INTERMEDIATE,
                activations=activation_type,
316
317
318
319
                scale_axes=LN_SCALE_AXES,
                ln_bias_axes=LN_BIAS_AXES,
                kernel_axes_1=KERNEL_1_AXES,
                kernel_axes_2=KERNEL_2_AXES,
320
                use_bias=use_bias,
321
322
                bias_axes_1=BIAS_1_AXES,
                bias_axes_2=BIAS_2_AXES,
323
324
325
326
327
                layernorm_input_axes=LAYERNORM_INPUT_AXES,
                dot_1_input_axes=DOT_1_INPUT_AXES,
                dot_2_input_axes=DOT_2_INPUT_AXES,
                name="mlp",
            )
328
            params_sharded = ln_mlp_sharded.init(init_rngs, x, deterministic=True)
329
330
331
            mlp_out_sharded, ln_out_sharded = ln_mlp_sharded.apply(
                params_sharded, x, deterministic=True
            )
332
333

        # Make sure params values are the same
334
        assert_tree_like_allclose(params_sharded["params"], params_single["params"])
335
336
337
        assert_allclose(ln_out_sharded, ln_out_single, dtype=dtype)
        assert_allclose(mlp_out_sharded, mlp_out_single, dtype=dtype)

338
339
340
341
342
    @pytest_parametrize_wrapper("input_shape", INPUT_SHAPE)
    @pytest_parametrize_wrapper("mesh_config", generate_fsdp_and_tp_configs())
    @pytest_parametrize_wrapper("activation_type", [("gelu",), ("silu", "linear")])
    @pytest_parametrize_wrapper("dtype", DTYPES)
    @pytest_parametrize_wrapper("use_bias", [True, False])
343
344
345
346
    @pytest_parametrize_wrapper("use_shardy", [False, True])
    def test_layernorm_mlp_layer(
        self, mesh_config, activation_type, use_bias, input_shape, dtype, use_shardy
    ):
347
        self._test_layernorm_mlp(
348
349
350
351
352
353
354
355
            mesh_config,
            activation_type,
            use_bias,
            input_shape,
            dtype,
            use_fp8=False,
            fp8_recipe=None,
            use_shardy=use_shardy,
356
        )
357

358
359
360
361
362
363
364
    @pytest.mark.skipif(not is_fp8_supported, reason=reason)
    @pytest_parametrize_wrapper("mesh_config", generate_fsdp_and_tp_configs())
    @pytest_parametrize_wrapper("activation_type", [("gelu",), ("gelu", "linear")])
    @pytest_parametrize_wrapper("use_bias", [True, False])
    @pytest_parametrize_wrapper("input_shape", INPUT_SHAPE)
    @pytest_parametrize_wrapper("dtype", DTYPES)
    @pytest_parametrize_wrapper("fp8_recipe", SUPPORTED_RECIPES)
365
    def test_layernorm_mlp_layer_fp8(
366
367
368
369
370
371
372
373
374
375
        self, mesh_config, activation_type, use_bias, input_shape, dtype, fp8_recipe
    ):
        self._test_layernorm_mlp(
            mesh_config,
            activation_type,
            use_bias,
            input_shape,
            dtype,
            use_fp8=True,
            fp8_recipe=fp8_recipe,
376
            use_shardy=False,
377
        )