test_sanity.py 42.8 KB
Newer Older
1
# Copyright (c) 2022-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Przemek Tredak's avatar
Przemek Tredak committed
2
3
4
#
# See LICENSE for license information.

5
from typing import Optional, List
6

Przemek Tredak's avatar
Przemek Tredak committed
7
8
import torch
import pytest
9
import os
Przemek Tredak's avatar
Przemek Tredak committed
10

11
12
13
import transformer_engine
import transformer_engine.pytorch as te
from transformer_engine.pytorch.quantization import FP8GlobalStateManager
Przemek Tredak's avatar
Przemek Tredak committed
14
15
16
17
18
from transformer_engine.pytorch.utils import (
    init_method_normal,
    scaled_init_method_normal,
)
from transformer_engine.pytorch import (
19
20
    autocast,
    quantized_model_init,
Przemek Tredak's avatar
Przemek Tredak committed
21
22
    LayerNormLinear,
    Linear,
23
    GroupedLinear,
Przemek Tredak's avatar
Przemek Tredak committed
24
25
    LayerNormMLP,
    TransformerLayer,
26
27
    RMSNorm,
    LayerNorm,
28
29
30
31
32
33
34
    Float8CurrentScalingQuantizer,
    Float8Quantizer,
    Float8Tensor,
    MXFP8Tensor,
    checkpoint,
    QuantizedTensor,
    is_bf16_available,
Przemek Tredak's avatar
Przemek Tredak committed
35
36
)
from transformer_engine.common import recipe
37
import transformer_engine_torch as tex
38
from transformer_engine.pytorch.cpp_extensions import general_gemm
39
from transformer_engine.pytorch.tensor.utils import replace_raw_data
40
from utils import ModelConfig
Przemek Tredak's avatar
Przemek Tredak committed
41

42
# Only run FP8 tests on supported devices.
43
44
45
fp8_available, reason_for_no_fp8 = te.is_fp8_available(return_reason=True)
fp8_block_scaling_available, _ = te.is_fp8_block_scaling_available(return_reason=True)
mxfp8_available, reason_for_no_mxfp8 = te.is_mxfp8_available(return_reason=True)
46

47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
# Record initial RNG state from script run.
seed = 1234
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)

NVTE_TEST_NVINSPECT_ENABLED = int(os.environ.get("NVTE_TEST_NVINSPECT_ENABLED", "0"))


if NVTE_TEST_NVINSPECT_ENABLED:
    # The sanity tests should work the same,
    # when debug=True. I fed them with dummy feature
    # to prevent switching off debug, which can happen if
    # no feature is active.
    import nvdlfw_inspect.api as debug_api

    debug_api.initialize(
        os.environ["NVTE_TEST_NVINSPECT_CONFIG_FILE"],
        feature_dirs=os.environ["NVTE_TEST_NVINSPECT_FEATURE_DIRS"],
    )

67

68
69
70
71
72
73
74
75
76
def is_fp8_supported(config: ModelConfig):
    if (
        config.max_seqlen_q * config.batch_size % 16
        or config.max_seqlen_kv * config.batch_size % 16
    ):
        return False
    if config.hidden_size % 16 or config.hidden_size_kv % 16:
        return False
    return True
Przemek Tredak's avatar
Przemek Tredak committed
77

78

Przemek Tredak's avatar
Przemek Tredak committed
79
model_configs = {
80
81
82
83
    "126m": ModelConfig(2, 2048, 12, 64, num_layers=12),
    "small": ModelConfig(2, 32, 2, 32, num_layers=2),
    "weird": ModelConfig(3, 37, 3, 23, num_layers=2),
    "large": ModelConfig(2, 128, 4, 128, num_layers=1),
Przemek Tredak's avatar
Przemek Tredak committed
84
85
}

86
87
88
89
90
91
92
93
94

def nvfp4_vanilla():
    nvfp4_recipe = recipe.NVFP4BlockScaling()
    nvfp4_recipe.fp4_quant_fwd_inp = recipe.QParams()
    nvfp4_recipe.fp4_quant_fwd_weight = recipe.QParams()
    nvfp4_recipe.fp4_quant_bwd_grad = recipe.QParams()
    return nvfp4_recipe


95
96
97
fp8_recipes = []
if mxfp8_available:
    fp8_recipes.append(recipe.MXFP8BlockScaling())
98
    fp8_recipes.append(nvfp4_vanilla())  # TODO: fix check for this
99
100
101
102
103
104
if fp8_block_scaling_available:
    fp8_recipes.append(recipe.Float8BlockScaling())
if fp8_available:
    fp8_recipes.append(recipe.Float8CurrentScaling())
    fp8_recipes.append(recipe.DelayedScaling())
fp8_recipes.append(None)
Przemek Tredak's avatar
Przemek Tredak committed
105

106
param_types = [torch.float32, torch.float16]
107
if is_bf16_available():  # bf16 requires sm_80 or higher
108
    param_types.append(torch.bfloat16)
Przemek Tredak's avatar
Przemek Tredak committed
109

110
all_boolean = [True, False]
111
batch_sizes_with_zero = [0, 1, 2]
Przemek Tredak's avatar
Przemek Tredak committed
112

113
114
115
116
117
118
119
120
121
122
123
all_activations = [
    "gelu",
    "geglu",
    "qgelu",
    "qgeglu",
    "relu",
    "reglu",
    "srelu",
    "sreglu",
    "silu",
    "swiglu",
124
    "clamped_swiglu",
125
]
126
all_normalizations = ["LayerNorm", "RMSNorm"]
schetlur-nv's avatar
schetlur-nv committed
127

128

schetlur-nv's avatar
schetlur-nv committed
129
130
131
132
133
def _disable_wgrads(block):
    for p in block.parameters():
        p.requires_grad = False


134
135
136
137
138
139
@pytest.fixture(autouse=True)
def reset_global_fp8_state():
    yield
    FP8GlobalStateManager.reset()


140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
def check_grouped_tensor_pointers_helper(tensors, num_elems_in_byte=1, tensor_name="tensor"):
    """
    Verify that tensors are stored in contiguous memory.

    Args:
        tensors: List or iterable of tensors to check
        num_elems_in_byte: Number of elements packed per byte (1 for normal, 2 for NVFP4)
        tensor_name: Name to use in error messages
    """
    tensor_list = list(tensors)
    if len(tensor_list) < 2:
        return  # Nothing to check

    for i in range(1, len(tensor_list)):
        prev_tensor = tensor_list[i - 1]
        curr_tensor = tensor_list[i]

        # Calculate expected offset based on previous tensor size
        prev_numel = prev_tensor.numel()
        expected_offset = (prev_numel // num_elems_in_byte) * prev_tensor.element_size()

        # Verify current tensor's data pointer is correctly offset
        expected_ptr = prev_tensor.data_ptr() + expected_offset
        actual_ptr = curr_tensor.data_ptr()

        assert (
            actual_ptr == expected_ptr
        ), f"{tensor_name} {i} data pointer mismatch: expected {expected_ptr}, got {actual_ptr}"


def check_grouped_tensor_pointers(
    weights: List[torch.Tensor], fp8_recipe: Optional[recipe.Recipe] = None
):
    """
    Verify that the pointers of the weights are in contiguous memory for GroupedTensor.
    TODO(ksivaman): This check can be made way more efficient but for now leaving the brute force approach.
    """

    num_elems_in_a_data_byte = 1 if fp8_recipe is None else 2 if fp8_recipe.nvfp4() else 1

    # Check data.
    if hasattr(weights[0], "_data") and weights[0]._data is not None:
        data_tensors = [w._data for w in weights]
        check_grouped_tensor_pointers_helper(data_tensors, num_elems_in_byte=1, tensor_name="data")

    # Check transpose.
    if hasattr(weights[0], "_transpose") and weights[0]._transpose is not None:
        transpose_tensors = [w._transpose for w in weights]
        check_grouped_tensor_pointers_helper(
            transpose_tensors, num_elems_in_byte=1, tensor_name="transpose"
        )

    # Check scale_inv.
    if hasattr(weights[0], "_scale_inv") and weights[0]._scale_inv is not None:
        scale_inv_tensors = [w._scale_inv for w in weights]
        check_grouped_tensor_pointers_helper(
            scale_inv_tensors, num_elems_in_byte=1, tensor_name="scale_inv"
        )

    # Check rowwise scale_inv.
    if hasattr(weights[0], "_rowwise_scale_inv") and weights[0]._rowwise_scale_inv is not None:
        scale_inv_tensors = [w._rowwise_scale_inv for w in weights]
        check_grouped_tensor_pointers_helper(
            scale_inv_tensors, num_elems_in_byte=1, tensor_name="rowwise_scale_inv"
        )

    # Check columnwise scale_inv.
    if (
        hasattr(weights[0], "_columnwise_scale_inv")
        and weights[0]._columnwise_scale_inv is not None
    ):
        columnwise_scale_inv_tensors = [w._columnwise_scale_inv for w in weights]
        check_grouped_tensor_pointers_helper(
            columnwise_scale_inv_tensors,
            num_elems_in_byte=1,
            tensor_name="columnwise scale_inv",
        )

    # Check rowwise amax.
    if hasattr(weights[0], "_rowwise_amax") and weights[0]._rowwise_amax is not None:
        rowwise_amax_tensors = [w._rowwise_amax for w in weights]
        check_grouped_tensor_pointers_helper(
            rowwise_amax_tensors, num_elems_in_byte=1, tensor_name="rowwise amax"
        )

    # Check columnwise amax.
    if hasattr(weights[0], "_columnwise_amax") and weights[0]._columnwise_amax is not None:
        columnwise_amax_tensors = [w._columnwise_amax for w in weights]
        check_grouped_tensor_pointers_helper(
            columnwise_amax_tensors, num_elems_in_byte=1, tensor_name="columnwise amax"
        )

    # Check rowwise data.
    if hasattr(weights[0], "_rowwise_data") and weights[0]._rowwise_data is not None:
        rowwise_data_tensors = [w._rowwise_data for w in weights]
        check_grouped_tensor_pointers_helper(
            rowwise_data_tensors,
            num_elems_in_byte=num_elems_in_a_data_byte,
            tensor_name="rowwise data",
        )

    # Check columnwise data.
    if hasattr(weights[0], "_columnwise_data") and weights[0]._columnwise_data is not None:
        columnwise_data_tensors = [w._columnwise_data for w in weights]
        check_grouped_tensor_pointers_helper(
            columnwise_data_tensors,
            num_elems_in_byte=num_elems_in_a_data_byte,
            tensor_name="columnwise data",
        )


251
def _test_sanity_e2e_amp(block, dtype, config, fp8_recipe, skip_wgrad):
Przemek Tredak's avatar
Przemek Tredak committed
252
    te_inp_hidden_states = torch.randn(
253
        (config.max_seqlen_q, config.batch_size, config.hidden_size),
254
255
256
257
        dtype=torch.float32,
        device="cuda",
        requires_grad=True,
    )
258
    te_inp_hidden_states.retain_grad()
259
260
    te_inp_attn_mask = torch.randint(
        2,
261
        (1, 1, config.max_seqlen_q, config.max_seqlen_kv),
262
263
264
        dtype=torch.bool,
        device="cuda",
    )
schetlur-nv's avatar
schetlur-nv committed
265
266
267
268

    if skip_wgrad:
        _disable_wgrads(block)

269
270
    use_fp8 = fp8_recipe is not None
    with torch.autocast(device_type="cuda", enabled=True, dtype=dtype):
271
        with autocast(enabled=use_fp8, recipe=fp8_recipe):
272
            te_out = block(te_inp_hidden_states, attention_mask=te_inp_attn_mask)
Przemek Tredak's avatar
Przemek Tredak committed
273
274
275
276
277
        loss = te_out.sum()

    loss.backward()
    torch.cuda.synchronize()

278
    assert te_out.dtype == dtype, "AMP wrong output type."
279
    assert te_inp_hidden_states.grad is not None, "Gradient should not be empty"
280
281
282
283
284
285
    assert te_inp_hidden_states.grad.dtype == torch.float32, "AMP wrong dgrad type."
    for name, p in block.named_parameters():
        if p.requires_grad:
            assert p.grad.dtype == torch.float32, f"AMP wrong wgrad type for {name}."


286
def _test_sanity_e2e_gradient_accumulation_fusion(block, dtype, config, fp8_recipe, skip_wgrad):
287
    te_inp_hidden_states = torch.randn(
288
        (config.max_seqlen_q, config.batch_size, config.hidden_size),
289
290
291
292
293
294
        dtype=dtype,
        device="cuda",
        requires_grad=True,
    )
    te_inp_attn_mask = torch.randint(
        2,
295
        (1, 1, config.max_seqlen_q, config.max_seqlen_kv),
296
297
298
        dtype=torch.bool,
        device="cuda",
    )
299
300
301
302
303
304
305
306
307
308
309

    if skip_wgrad:
        _disable_wgrads(block)

    for name, p in block.named_parameters():
        if "layer_norm_weight" in name:
            continue
        elif "weight" in name and p.requires_grad:
            p.main_grad = torch.zeros_like(p)

    use_fp8 = fp8_recipe is not None
310
    with autocast(enabled=use_fp8, recipe=fp8_recipe):
311
        te_out = block(te_inp_hidden_states, attention_mask=te_inp_attn_mask)
312
313
314
315
    loss = te_out.sum()
    loss.backward()
    torch.cuda.synchronize()

316
    failed_grads = []
317
318
319
320
    for name, p in block.named_parameters():
        if "layer_norm_weight" in name:
            continue
        elif "weight" in name and p.requires_grad:
321
322
323
            if not torch.count_nonzero(p.main_grad) > 0:
                failed_grads.append(name)
    assert len(failed_grads) == 0, f"Gradient not accumulated for {failed_grads}."
324

Przemek Tredak's avatar
Przemek Tredak committed
325

326
def _test_sanity_e2e(block, dtype, config, fp8_recipe, skip_wgrad):
Przemek Tredak's avatar
Przemek Tredak committed
327
    te_inp_hidden_states = torch.randn(
328
        (config.max_seqlen_q, config.batch_size, config.hidden_size),
329
330
331
332
        dtype=dtype,
        device="cuda",
        requires_grad=True,
    )
333
334
335
336
337

    if skip_wgrad:
        _disable_wgrads(block)

    use_fp8 = fp8_recipe is not None
338
    with autocast(enabled=use_fp8, recipe=fp8_recipe):
339
340
341
342
343
344
        te_out = block(te_inp_hidden_states)
    loss = te_out.sum()
    loss.backward()
    torch.cuda.synchronize()


345
def _test_sanity_e2e_bert(block, dtype, config, fp8_recipe, skip_wgrad):
346
    te_inp_hidden_states = torch.randn(
347
        (config.max_seqlen_q, config.batch_size, config.hidden_size),
348
349
350
351
        dtype=dtype,
        device="cuda",
        requires_grad=True,
    )
352

353
354
    te_inp_attn_mask = torch.randint(
        2,
355
        (config.batch_size, 1, 1, config.max_seqlen_q),
356
357
358
        dtype=torch.bool,
        device="cuda",
    )
schetlur-nv's avatar
schetlur-nv committed
359
360
361
362

    if skip_wgrad:
        _disable_wgrads(block)

363
    use_fp8 = fp8_recipe is not None
364
    with autocast(enabled=use_fp8, recipe=fp8_recipe):
365
        te_out = block(te_inp_hidden_states, attention_mask=te_inp_attn_mask)
Przemek Tredak's avatar
Przemek Tredak committed
366
367
368
369
370
    loss = te_out.sum()
    loss.backward()
    torch.cuda.synchronize()


371
def _test_sanity_e2e_T5(block, dtype, config, fp8_recipe, skip_wgrad):
Przemek Tredak's avatar
Przemek Tredak committed
372
    te_inp_hidden_states = torch.randn(
373
        (config.max_seqlen_q, config.batch_size, config.hidden_size),
374
375
376
377
378
379
        dtype=dtype,
        device="cuda",
        requires_grad=True,
    )
    te_inp_attn_mask = torch.randint(
        2,
380
        (1, 1, config.max_seqlen_q, config.max_seqlen_kv),
381
382
383
384
385
386
        dtype=torch.bool,
        device="cuda",
    )

    enc_dec_attn_mask = torch.randint(
        2,
387
        (config.batch_size, 1, 1, config.max_seqlen_kv),
388
389
390
        dtype=torch.bool,
        device="cuda",
    )
schetlur-nv's avatar
schetlur-nv committed
391
392
393
394

    if skip_wgrad:
        _disable_wgrads(block)

395
    use_fp8 = fp8_recipe is not None
396
    with autocast(enabled=use_fp8, recipe=fp8_recipe):
Przemek Tredak's avatar
Przemek Tredak committed
397
        te_out = block(
398
399
            te_inp_hidden_states,
            attention_mask=te_inp_attn_mask,
400
401
            encoder_output=te_inp_hidden_states,
            enc_dec_attn_mask=enc_dec_attn_mask,
Przemek Tredak's avatar
Przemek Tredak committed
402
403
404
405
406
407
        )
    loss = te_out.sum()
    loss.backward()
    torch.cuda.synchronize()


408
409
410
def _test_sanity_common(
    block, dtype, config, fp8_recipe, skip_wgrad, skip_dgrad, microbatching=True
):
411
412
413
    if skip_dgrad and skip_wgrad:
        pytest.skip("No gradient computation; Skipping to avoid PyTorch RuntimeError.")

Przemek Tredak's avatar
Przemek Tredak committed
414
    te_inp = torch.randn(
415
        (config.max_seqlen_q, config.batch_size, config.hidden_size),
416
417
418
419
        dtype=dtype,
        device="cuda",
        requires_grad=not skip_dgrad,
    )
schetlur-nv's avatar
schetlur-nv committed
420
421
422
423

    if skip_wgrad:
        _disable_wgrads(block)

424
    use_fp8 = fp8_recipe is not None
425
    with autocast(enabled=use_fp8, recipe=fp8_recipe):
426
427
428
429
430
        if not microbatching:
            te_out = block(te_inp)
        else:
            _ = block(te_inp, is_first_microbatch=True)
            te_out = block(te_inp, is_first_microbatch=False)
Przemek Tredak's avatar
Przemek Tredak committed
431
432
433
434
435
436
437
    if isinstance(te_out, tuple):
        te_out = te_out[0]
    loss = te_out.sum()
    loss.backward()
    torch.cuda.synchronize()


438
def _test_sanity_normalization_amp(block, dtype, config, skip_wgrad, skip_dgrad):
439
440
441
442
    if skip_dgrad and skip_wgrad:
        pytest.skip("No gradient computation; Skipping to avoid PyTorch RuntimeError.")

    te_inp = torch.randn(
443
        (config.max_seqlen_q, config.batch_size, config.hidden_size),
444
445
446
        device="cuda",
        requires_grad=True,
    )
447
448
449
450
451
452
453
454
455
456
    te_inp.retain_grad()

    with torch.autocast(device_type="cuda", enabled=True, dtype=dtype):
        te_out = block(te_inp)
        loss = te_out.sum()
    loss.backward()

    torch.cuda.synchronize()

    assert te_out.dtype == dtype, "AMP wrong output type."
457
    assert te_inp.grad is not None, "Gradient should not be empty"
458
459
460
461
462
463
464
    assert te_inp.grad.dtype == torch.float32, "AMP wrong dgrad type."
    for name, p in block.named_parameters():
        if p.requires_grad:
            assert p.grad.dtype == torch.float32, f"AMP wrong wgrad type for {name}."


@pytest.mark.parametrize("dtype", param_types)
465
@pytest.mark.parametrize("model", ["small", "weird"])
466
467
468
@pytest.mark.parametrize("skip_wgrad", all_boolean)
@pytest.mark.parametrize("skip_dgrad", all_boolean)
@pytest.mark.parametrize("normalization", all_normalizations)
469
def test_sanity_normalization_amp(dtype, model, skip_wgrad, skip_dgrad, normalization):
470
471
472
    config = model_configs[model]
    module = RMSNorm if normalization == "RMSNorm" else LayerNorm

473
    block = module(config.hidden_size).to(dtype=torch.float32).cuda()
474
    _test_sanity_normalization_amp(block, dtype, config, skip_wgrad, skip_dgrad)
475
476


Przemek Tredak's avatar
Przemek Tredak committed
477
478
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
479
@pytest.mark.parametrize("model", ["small", "weird"])
480
481
@pytest.mark.parametrize("skip_wgrad", all_boolean)
@pytest.mark.parametrize("zero_centered_gamma", all_boolean)
482
@pytest.mark.parametrize("skip_dgrad", all_boolean)
483
@pytest.mark.parametrize("normalization", all_normalizations)
484
@pytest.mark.parametrize("microbatching", all_boolean)
485
def test_sanity_layernorm_linear(
486
487
488
489
490
491
492
493
    dtype,
    fp8_recipe,
    model,
    skip_wgrad,
    zero_centered_gamma,
    skip_dgrad,
    normalization,
    microbatching,
494
):
495
496
497
    config = model_configs[model]

    if fp8_recipe is not None:
498
        if not is_fp8_supported(config):
499
            pytest.skip("Model config does not support FP8")
500
501
        if fp8_recipe.nvfp4() and dtype == torch.float16:
            pytest.skip("FP16 output for NVFP4 not supported")
502

Przemek Tredak's avatar
Przemek Tredak committed
503
504
505
    sigma = 0.023
    init_method = init_method_normal(sigma)

506
507
508
509
510
511
512
513
    block = LayerNormLinear(
        config.hidden_size,
        config.hidden_size * 3,
        init_method=init_method,
        zero_centered_gamma=zero_centered_gamma,
        normalization=normalization,
        params_dtype=dtype,
        device="cuda",
Przemek Tredak's avatar
Przemek Tredak committed
514
    )
515
    _test_sanity_common(block, dtype, config, fp8_recipe, skip_wgrad, skip_dgrad, microbatching)
Przemek Tredak's avatar
Przemek Tredak committed
516
517
518
519


@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
520
@pytest.mark.parametrize("model", ["small", "weird"])
521
@pytest.mark.parametrize("skip_wgrad", all_boolean)
522
@pytest.mark.parametrize("skip_dgrad", all_boolean)
523
524
@pytest.mark.parametrize("microbatching", all_boolean)
def test_sanity_linear(dtype, fp8_recipe, model, skip_wgrad, skip_dgrad, microbatching):
Przemek Tredak's avatar
Przemek Tredak committed
525
526
    config = model_configs[model]

527
    if fp8_recipe is not None:
528
        if not is_fp8_supported(config):
529
            pytest.skip("Model config does not support FP8")
530
531
        if fp8_recipe.nvfp4() and dtype == torch.float16:
            pytest.skip("FP16 output for NVFP4 not supported")
532

Przemek Tredak's avatar
Przemek Tredak committed
533
534
535
    sigma = 0.023
    output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)

536
537
538
539
540
541
    block = Linear(
        config.hidden_size,
        config.hidden_size,
        init_method=output_layer_init_method,
        params_dtype=dtype,
        device="cuda",
Przemek Tredak's avatar
Przemek Tredak committed
542
    )
543
    _test_sanity_common(block, dtype, config, fp8_recipe, skip_wgrad, skip_dgrad, microbatching)
Przemek Tredak's avatar
Przemek Tredak committed
544
545


546
547
548
549
550
551
552
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes_with_zero)
@pytest.mark.parametrize("model", ["small", "weird"])
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("fp8_model_params", all_boolean)
@pytest.mark.parametrize("use_bias", all_boolean)
def test_sanity_linear_with_zero_tokens(dtype, bs, model, fp8_recipe, fp8_model_params, use_bias):
553
554
    if NVTE_TEST_NVINSPECT_ENABLED and fp8_model_params:
        pytest.skip("Quantized model parameters are not supported in debug mode.")
555
556
    config = model_configs[model]
    ffn_hidden_size = 4 * config.hidden_size
557
    num_tokens = bs * config.max_seqlen_q
558
559

    if fp8_recipe is not None:
560
        if not is_fp8_supported(config):
561
            pytest.skip("Model config does not support FP8")
562
563
        if fp8_recipe.nvfp4() and dtype == torch.float16:
            pytest.skip("FP16 output for NVFP4 not supported")
564
565

    use_fp8 = fp8_recipe is not None
566
    with quantized_model_init(enabled=use_fp8 and fp8_model_params, recipe=fp8_recipe):
567
568
569
        te_linear = Linear(
            config.hidden_size, ffn_hidden_size, bias=use_bias, params_dtype=dtype
        ).cuda()
570
571
572
573

    inp_hidden_states = torch.randn(
        num_tokens, config.hidden_size, dtype=dtype, requires_grad=True
    ).cuda()
574
    with autocast(enabled=use_fp8, recipe=fp8_recipe):
575
576
577
578
579
580
        out = te_linear(inp_hidden_states)
    loss = out.sum()
    loss.backward()
    assert out.shape == (num_tokens, ffn_hidden_size)


581
582
583
584
585
586
587
588
589
590
591
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes_with_zero)
@pytest.mark.parametrize("model", ["small", "weird"])
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("fp8_model_params", all_boolean)
@pytest.mark.parametrize("use_bias", all_boolean)
@pytest.mark.parametrize("empty_split", ["first", "last", "middle"])
@pytest.mark.parametrize("num_gemms", [4])
def test_sanity_grouped_linear(
    dtype, bs, model, fp8_recipe, fp8_model_params, use_bias, num_gemms, empty_split
):
592
593
    if NVTE_TEST_NVINSPECT_ENABLED and fp8_model_params:
        pytest.skip("FP8 model parameters are not supported in debug mode.")
594
595
596
597
    config = model_configs[model]
    ffn_hidden_size = 4 * config.hidden_size
    # Small batch size used to catch bug from https://github.com/NVIDIA/TransformerEngine/pull/1527.
    bs = bs * 16
598
    num_tokens = bs * config.max_seqlen_q * (num_gemms - 1)
599
600

    if fp8_recipe is not None:
601
        if not is_fp8_supported(config):
602
            pytest.skip("Model config does not support FP8")
603
604
        if fp8_recipe.nvfp4():
            pytest.skip("NVFP4 not supported for grouped linear")
605
606

    use_fp8 = fp8_recipe is not None
607
    with quantized_model_init(enabled=use_fp8 and fp8_model_params, recipe=fp8_recipe):
608
        te_grouped_linear = GroupedLinear(
609
610
611
612
613
            num_gemms,
            config.hidden_size,
            ffn_hidden_size,
            bias=use_bias,
            params_dtype=dtype,
614
615
        ).cuda()

616
617
618
619
620
    # Verify that weights are stored in contiguous GroupedTensor storage.
    weights = [getattr(te_grouped_linear, f"weight{i}") for i in range(num_gemms)]
    if fp8_recipe is None or not (fp8_recipe.delayed() or fp8_recipe.float8_current_scaling()):
        check_grouped_tensor_pointers(weights, fp8_recipe)

621
622
623
    inp_hidden_states = torch.randn(
        num_tokens, config.hidden_size, dtype=dtype, requires_grad=True
    ).cuda()
624
    m_splits = [bs * config.max_seqlen_q] * num_gemms
625
626
627
628
629
630
631
    if empty_split == "first":
        m_splits[0] = 0
    elif empty_split == "last":
        m_splits[-1] = 0
    elif empty_split == "middle":
        m_splits[num_gemms // 2] = 0

632
    with autocast(enabled=use_fp8, recipe=fp8_recipe):
633
634
635
636
637
638
        out = te_grouped_linear(inp_hidden_states, m_splits)
    loss = out.sum()
    loss.backward()
    assert out.shape == (num_tokens, ffn_hidden_size)


Przemek Tredak's avatar
Przemek Tredak committed
639
640
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
641
@pytest.mark.parametrize("model", ["small", "weird"])
642
643
@pytest.mark.parametrize("skip_wgrad", all_boolean)
@pytest.mark.parametrize("zero_centered_gamma", all_boolean)
644
@pytest.mark.parametrize("skip_dgrad", all_boolean)
645
@pytest.mark.parametrize("activation", all_activations)
646
@pytest.mark.parametrize("normalization", all_normalizations)
647
@pytest.mark.parametrize("microbatching", all_boolean)
648
@pytest.mark.parametrize("checkpoint", all_boolean)
649
def test_sanity_layernorm_mlp(
650
651
652
653
654
655
656
657
658
    dtype,
    fp8_recipe,
    model,
    skip_wgrad,
    zero_centered_gamma,
    skip_dgrad,
    activation,
    normalization,
    microbatching,
659
    checkpoint,
660
):
661
662
663
    config = model_configs[model]

    if fp8_recipe is not None:
664
        if not is_fp8_supported(config):
665
            pytest.skip("Model config does not support FP8")
666
667
        if fp8_recipe.nvfp4() and dtype == torch.float16:
            pytest.skip("FP16 output for NVFP4 not supported")
668

Przemek Tredak's avatar
Przemek Tredak committed
669
670
671
    sigma = 0.023
    init_method = init_method_normal(sigma)
    output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
672
    activation_params = None if activation != "clamped_swiglu" else {"limit": 7.0, "alpha": 1.702}
673
674
675
676
677
678
679
    block = LayerNormMLP(
        config.hidden_size,
        4 * config.hidden_size,
        init_method=init_method,
        output_layer_init_method=output_layer_init_method,
        zero_centered_gamma=zero_centered_gamma,
        activation=activation,
680
        activation_params=activation_params,
681
682
683
        normalization=normalization,
        params_dtype=dtype,
        device="cuda",
684
        checkpoint=checkpoint,
Przemek Tredak's avatar
Przemek Tredak committed
685
    )
686
    _test_sanity_common(block, dtype, config, fp8_recipe, skip_wgrad, skip_dgrad, microbatching)
Przemek Tredak's avatar
Przemek Tredak committed
687
688
689
690


@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
691
@pytest.mark.parametrize("model", ["small"])
692
@pytest.mark.parametrize("skip_wgrad", all_boolean)
ngoyal2707's avatar
ngoyal2707 committed
693
@pytest.mark.parametrize("bias", all_boolean)
694
@pytest.mark.parametrize("activation", ["gelu", "swiglu"])
695
@pytest.mark.parametrize("normalization", all_normalizations)
696
@pytest.mark.parametrize("parallel_attention_mlp", all_boolean)
697
698
699
700
701
702
703
704
705
706
def test_sanity_gpt(
    dtype,
    fp8_recipe,
    model,
    skip_wgrad,
    bias,
    activation,
    normalization,
    parallel_attention_mlp,
):
707
708
709
    config = model_configs[model]

    if fp8_recipe is not None:
710
        if not is_fp8_supported(config):
711
            pytest.skip("Model config does not support FP8")
712
713
        if fp8_recipe.nvfp4() and dtype == torch.float16:
            pytest.skip("FP16 output for NVFP4 not supported")
714

Przemek Tredak's avatar
Przemek Tredak committed
715
716
717
718
    sigma = 0.023
    init_method = init_method_normal(sigma)
    output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)

719
720
721
    block = TransformerLayer(
        config.hidden_size,
        4 * config.hidden_size,
722
        config.num_heads,
723
724
725
726
727
728
729
730
731
732
733
734
735
        init_method=init_method,
        output_layer_init_method=output_layer_init_method,
        hidden_dropout=0.1,
        attention_dropout=0.1,
        kv_channels=config.kv_channels,
        params_dtype=dtype,
        apply_residual_connection_post_layernorm=False,
        output_layernorm=False,
        bias=bias,
        activation=activation,
        normalization=normalization,
        device="cuda",
        parallel_attention_mlp=parallel_attention_mlp,
Przemek Tredak's avatar
Przemek Tredak committed
736
737
    )

738
    _test_sanity_e2e(block, dtype, config, fp8_recipe, skip_wgrad)
739
740
741
742
743
744


def test_sanity_gpt_126m():
    fp8_recipe = None
    if fp8_available:
        fp8_recipe = recipe.DelayedScaling(
745
746
            margin=0,
            fp8_format=recipe.Format.E4M3,
747
748
749
750
751
752
753
754
755
756
757
758
759
            amax_history_len=16,
            amax_compute_algo="most_recent",
        )
    test_sanity_gpt(
        dtype=param_types[-1],
        fp8_recipe=fp8_recipe,
        model="126m",
        skip_wgrad=False,
        bias=True,
        activation="gelu",
        normalization="LayerNorm",
        parallel_attention_mlp=False,
    )
Przemek Tredak's avatar
Przemek Tredak committed
760
761
762
763


@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
764
@pytest.mark.parametrize("model", ["small"])
765
@pytest.mark.parametrize("skip_wgrad", all_boolean)
766
@pytest.mark.parametrize("normalization", all_normalizations)
767
def test_sanity_bert(dtype, fp8_recipe, model, skip_wgrad, normalization):
768
769
770
771
772
    config = model_configs[model]

    if fp8_recipe is not None:
        if not fp8_available:
            pytest.skip(reason_for_no_fp8)
773
        if not is_fp8_supported(config):
774
            pytest.skip("Model config does not support FP8")
775
776
        if fp8_recipe.nvfp4() and dtype == torch.float16:
            pytest.skip("FP16 output for NVFP4 not supported")
777

Przemek Tredak's avatar
Przemek Tredak committed
778
779
780
781
    sigma = 0.023
    init_method = init_method_normal(sigma)
    output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)

782
783
784
    block = TransformerLayer(
        config.hidden_size,
        4 * config.hidden_size,
785
        config.num_heads,
786
787
788
789
790
791
792
793
        init_method=init_method,
        output_layer_init_method=output_layer_init_method,
        hidden_dropout=0.1,
        attention_dropout=0.1,
        kv_channels=config.kv_channels,
        params_dtype=dtype,
        apply_residual_connection_post_layernorm=True,
        output_layernorm=True,
794
        self_attn_mask_type="causal",
795
796
        normalization=normalization,
        device="cuda",
Przemek Tredak's avatar
Przemek Tredak committed
797
798
    )

799
800
801
802
803
    _test_sanity_e2e_bert(block, dtype, config, fp8_recipe, skip_wgrad)


def test_sanity_bert_126m():
    fp8_recipe = recipe.DelayedScaling(
804
805
        margin=0,
        fp8_format=recipe.Format.E4M3,
806
807
808
809
810
811
812
813
814
815
        amax_history_len=1,
        amax_compute_algo="most_recent",
    )
    test_sanity_bert(
        dtype=param_types[-1],
        fp8_recipe=fp8_recipe,
        model="126m",
        skip_wgrad=False,
        normalization="LayerNorm",
    )
Przemek Tredak's avatar
Przemek Tredak committed
816
817
818
819


@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
820
@pytest.mark.parametrize("model", ["small"])
821
@pytest.mark.parametrize("skip_wgrad", all_boolean)
822
@pytest.mark.parametrize("normalization", all_normalizations)
823
def test_sanity_T5(dtype, fp8_recipe, model, skip_wgrad, normalization):
824
825
826
827
828
    config = model_configs[model]

    if fp8_recipe is not None:
        if not fp8_available:
            pytest.skip(reason_for_no_fp8)
829
        if not is_fp8_supported(config):
830
            pytest.skip("Model config does not support FP8")
831
832
        if fp8_recipe.nvfp4() and dtype == torch.float16:
            pytest.skip("FP16 output for NVFP4 not supported")
833

Przemek Tredak's avatar
Przemek Tredak committed
834
835
836
837
    sigma = 0.023
    init_method = init_method_normal(sigma)
    output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)

838
839
840
    block = TransformerLayer(
        config.hidden_size,
        4 * config.hidden_size,
841
        config.num_heads,
842
843
844
845
846
847
848
849
850
851
852
        init_method=init_method,
        output_layer_init_method=output_layer_init_method,
        hidden_dropout=0.1,
        attention_dropout=0.1,
        kv_channels=config.kv_channels,
        params_dtype=dtype,
        apply_residual_connection_post_layernorm=False,
        output_layernorm=False,
        layer_type="decoder",
        normalization=normalization,
        device="cuda",
Przemek Tredak's avatar
Przemek Tredak committed
853
854
    )

855
856
857
858
859
    _test_sanity_e2e_T5(block, dtype, config, fp8_recipe, skip_wgrad)


def test_sanity_T5_126m():
    fp8_recipe = recipe.DelayedScaling(
860
861
        margin=0,
        fp8_format=recipe.Format.E4M3,
862
863
864
865
866
867
868
869
870
871
        amax_history_len=1,
        amax_compute_algo="most_recent",
    )
    test_sanity_T5(
        dtype=param_types[-1],
        fp8_recipe=fp8_recipe,
        model="126m",
        skip_wgrad=False,
        normalization="LayerNorm",
    )
Przemek Tredak's avatar
Przemek Tredak committed
872
873
874
875


@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
876
@pytest.mark.parametrize("model", ["small"])
877
@pytest.mark.parametrize("skip_wgrad", all_boolean)
878
def test_sanity_amp_and_nvfuser(dtype, fp8_recipe, model, skip_wgrad):
Przemek Tredak's avatar
Przemek Tredak committed
879
880
    config = model_configs[model]

881
    if fp8_recipe is not None:
882
        if not is_fp8_supported(config):
883
            pytest.skip("Model config does not support FP8")
884
885
        if fp8_recipe.nvfp4() and dtype == torch.float16:
            pytest.skip("FP16 output for NVFP4 not supported")
886

Przemek Tredak's avatar
Przemek Tredak committed
887
888
889
890
    sigma = 0.023
    init_method = init_method_normal(sigma)
    output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)

891
892
893
    block = TransformerLayer(
        config.hidden_size,
        4 * config.hidden_size,
894
        config.num_heads,
895
896
897
898
899
900
901
        init_method=init_method,
        output_layer_init_method=output_layer_init_method,
        hidden_dropout=0.1,
        attention_dropout=0.1,
        kv_channels=config.kv_channels,
        params_dtype=torch.float32,
        device="cuda",
Przemek Tredak's avatar
Przemek Tredak committed
902
903
    )

904
    _test_sanity_e2e_amp(block, dtype, config, fp8_recipe, skip_wgrad)
Przemek Tredak's avatar
Przemek Tredak committed
905
906
907
908


@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
909
@pytest.mark.parametrize("model", ["small"])
910
def test_sanity_drop_path(dtype, fp8_recipe, model):
Przemek Tredak's avatar
Przemek Tredak committed
911
912
    config = model_configs[model]

913
    if fp8_recipe is not None:
914
        if not is_fp8_supported(config):
915
            pytest.skip("Model config does not support FP8")
916
917
        if fp8_recipe.nvfp4() and dtype == torch.float16:
            pytest.skip("FP16 output for NVFP4 not supported")
918

Przemek Tredak's avatar
Przemek Tredak committed
919
920
921
922
    sigma = 0.023
    init_method = init_method_normal(sigma)
    output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)

923
924
925
    block = TransformerLayer(
        config.hidden_size,
        4 * config.hidden_size,
926
        config.num_heads,
927
928
929
930
931
932
933
934
935
936
        init_method=init_method,
        output_layer_init_method=output_layer_init_method,
        hidden_dropout=0.1,
        attention_dropout=0.1,
        kv_channels=config.kv_channels,
        params_dtype=dtype,
        apply_residual_connection_post_layernorm=False,
        output_layernorm=False,
        drop_path_rate=1.0,
        device="cuda",
Przemek Tredak's avatar
Przemek Tredak committed
937
938
    )

939
    _test_sanity_e2e(block, dtype, config, fp8_recipe, False)
Przemek Tredak's avatar
Przemek Tredak committed
940
941
942
943


@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
944
@pytest.mark.parametrize("model", ["small"])
945
@pytest.mark.parametrize("skip_wgrad", all_boolean)
946
def test_sanity_fused_qkv_params(dtype, fp8_recipe, model, skip_wgrad):
Przemek Tredak's avatar
Przemek Tredak committed
947
948
    config = model_configs[model]

949
    if fp8_recipe is not None:
950
        if not is_fp8_supported(config):
951
            pytest.skip("Model config does not support FP8")
952
953
        if fp8_recipe.nvfp4() and dtype == torch.float16:
            pytest.skip("FP16 output for NVFP4 not supported")
954

Przemek Tredak's avatar
Przemek Tredak committed
955
956
957
958
    sigma = 0.023
    init_method = init_method_normal(sigma)
    output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)

959
960
961
    block = TransformerLayer(
        config.hidden_size,
        4 * config.hidden_size,
962
        config.num_heads,
963
964
965
966
967
968
969
970
971
972
        init_method=init_method,
        output_layer_init_method=output_layer_init_method,
        hidden_dropout=0.1,
        attention_dropout=0.1,
        kv_channels=config.kv_channels,
        params_dtype=dtype,
        apply_residual_connection_post_layernorm=False,
        output_layernorm=False,
        fuse_qkv_params=True,
        device="cuda",
Przemek Tredak's avatar
Przemek Tredak committed
973
974
    )

975
    _test_sanity_e2e(block, dtype, config, fp8_recipe, skip_wgrad)
976
977
978
979


@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
980
@pytest.mark.parametrize("model", ["small"])
981
@pytest.mark.parametrize("skip_wgrad", all_boolean)
982
def test_sanity_gradient_accumulation_fusion(dtype, fp8_recipe, model, skip_wgrad):
983
984
    config = model_configs[model]

985
    if fp8_recipe is not None:
986
        if not is_fp8_supported(config):
987
            pytest.skip("Model config does not support FP8")
988
989
        if fp8_recipe.nvfp4() and dtype == torch.float16:
            pytest.skip("FP16 output for NVFP4 not supported")
990

991
992
993
994
    sigma = 0.023
    init_method = init_method_normal(sigma)
    output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)

995
996
997
    block = TransformerLayer(
        config.hidden_size,
        4 * config.hidden_size,
998
        config.num_heads,
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
        init_method=init_method,
        output_layer_init_method=output_layer_init_method,
        hidden_dropout=0.1,
        attention_dropout=0.1,
        kv_channels=config.kv_channels,
        params_dtype=dtype,
        apply_residual_connection_post_layernorm=False,
        output_layernorm=False,
        fuse_qkv_params=True,
        fuse_wgrad_accumulation=True,
        device="cuda",
1010
1011
    )

1012
    _test_sanity_e2e_gradient_accumulation_fusion(block, dtype, config, fp8_recipe, skip_wgrad)
1013
1014


1015
def test_model_multiple_cast():
1016
1017
    a = torch.zeros((16, 16), device="cuda")
    m = Linear(16, 32)
1018
1019
1020
1021
1022
1023
1024
1025
1026

    y = m(a)
    assert y.dtype == torch.float32

    m.half()
    a = a.half()

    y2 = m(a)
    assert y2.dtype == torch.float16
1027
1028
1029
1030
1031
1032


@pytest.mark.parametrize("N", [32])
@pytest.mark.parametrize("offset", [1, 3, 5])
@pytest.mark.parametrize("datatype", param_types)
def test_sanity_gemm_with_unalignment(N, offset, datatype):
1033
    scratchpad = torch.randn(N * N + 2 * offset, device="cuda", dtype=datatype)
1034
    inp = torch.reshape(scratchpad[offset:-offset], (N, N))
1035
    weight = torch.reshape(scratchpad[offset * 2 :], (N, N))
1036

1037
    _ = general_gemm(A=weight, B=inp)
1038
1039
1040
1041
1042
1043
1044
1045
    torch.cuda.synchronize()


@pytest.mark.skipif(not fp8_available, reason=reason_for_no_fp8)
@pytest.mark.parametrize("N", [32])
@pytest.mark.parametrize("datatype", [torch.float16, torch.bfloat16])
def test_sanity_fp8_gemm_with_unalignment(N, datatype):
    offset = 16
1046
    scratchpad = torch.randn(N, N * N + offset, device="cuda", dtype=datatype)
1047

1048
1049
1050
1051
    scales = torch.ones(1).cuda().squeeze()
    amaxes = torch.ones(1).cuda().squeeze()
    dtype = tex.DType.kFloat8E4M3
    fp8_quantizer = Float8Quantizer(scales, amaxes, dtype)
1052
1053
1054

    outp_type = datatype

1055
1056
1057
1058
    scratchpad_fp8 = fp8_quantizer(scratchpad)
    inp_fp8 = torch.reshape(scratchpad_fp8[0][:-offset], (N, N))
    weight_fp8 = torch.reshape(scratchpad_fp8[0][offset:], (N, N))
    general_gemm(
1059
1060
        weight_fp8,
        inp_fp8,
1061
        outp_type,
1062
1063
1064
        bias=None,
        use_split_accumulator=False,
    )
1065
    torch.cuda.synchronize()
1066
1067


1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
@pytest.mark.skipif(not fp8_available, reason=reason_for_no_fp8)
def test_replace_raw_data_for_float8tensor():
    """Test the functionality of replace_raw_data"""
    torch.manual_seed(12345)
    torch.cuda.manual_seed(12345)

    fp8_quantizer = Float8CurrentScalingQuantizer(fp8_dtype=tex.DType.kFloat8E4M3, device="cuda")
    fp8_tensor = fp8_quantizer.make_empty([128, 128], dtype=torch.bfloat16, device="cuda")
    random_bf16_data = torch.randn(fp8_tensor.shape, dtype=torch.bfloat16, device="cuda")
    fp8_quantizer.update_quantized(random_bf16_data, fp8_tensor)

1079
1080
1081
1082
1083
1084
1085
    attrs_to_check = [
        "_quantizer",
        "_fp8_dtype",
        "_scale_inv",
        "_transpose",
        "_transpose_invalid",
    ]
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
    attrs = {}
    for attr in attrs_to_check:
        attrs[attr] = getattr(fp8_tensor, attr)

    old_data = fp8_tensor._data
    new_data = torch.empty_like(old_data)
    replace_raw_data(fp8_tensor, new_data)

    # Make sure the new_data is properly assigned.
    assert fp8_tensor._data.data_ptr() != old_data.data_ptr()
    assert fp8_tensor._data.data_ptr() == new_data.data_ptr()
    # Make sure the values are not changed.
    torch.testing.assert_close(old_data, fp8_tensor._data, atol=0, rtol=0)
    # Make sure other attributes are not changed (totally identical)
    for attr in attrs_to_check:
        assert id(getattr(fp8_tensor, attr)) == id(attrs[attr])


@pytest.mark.skipif(not fp8_available, reason=reason_for_no_fp8)
1105
1106
1107
def test_quantized_model_init_high_precision_init_val():
    """Test quantized_model_init with preserve_high_precision_init_val=True"""
    with quantized_model_init(preserve_high_precision_init_val=True):
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
        model = Linear(768, 768)

    weight = model.weight

    assert isinstance(weight, QuantizedTensor), "Weight should be QuantizedTensor"
    assert hasattr(weight, "_high_precision_init_val"), "_high_precision_init_val not found"
    assert hasattr(weight, "get_high_precision_init_val"), "get_high_precision_init_val() not found"
    assert hasattr(
        weight, "clear_high_precision_init_val"
    ), "clear_high_precision_init_val() not found"

    high_precision = weight.get_high_precision_init_val()
    assert high_precision.device.type == "cpu", "high_precision_init_val is not on the CPU"

    new_weight = weight._get_quantizer().make_empty(
        shape=weight.shape, dtype=weight.dtype, device=weight.device
    )
    weight._get_quantizer().update_quantized(high_precision.to(weight.device), new_weight)

    torch.testing.assert_close(
        new_weight.dequantize(dtype=weight.dtype),
        weight.dequantize(dtype=weight.dtype),
        rtol=0,
        atol=0,
    )

    weight.clear_high_precision_init_val()
    assert weight.get_high_precision_init_val() is None, "clear_high_precision_init_val() not work"
    assert not hasattr(
        weight, "._high_precision_init_val"
    ), "clear_high_precision_init_val() not work"
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166


def test_sanity_checkpointing_on_callables():
    """Test that TE checkpointing works correctly on callable modules."""

    # torch.autograf.function
    class MyFunction(torch.autograd.Function):
        @staticmethod
        def forward(ctx, inp):
            return inp

        @staticmethod
        def backward(ctx, grad_output):
            return grad_output

    module = MyFunction.apply
    inp = torch.randn(10, 10, device="cuda", requires_grad=True)

    out_checkpoint = checkpoint(module, inp)
    out_checkpoint.sum().backward()
    grad_checkpoint = inp.grad

    out_standard = module(inp)
    out_standard.sum().backward()
    grad_standard = inp.grad

    # Assert that gradients are the same
    torch.testing.assert_close(grad_checkpoint, grad_standard)
1167
1168


1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
@pytest.mark.skipif(not fp8_available, reason=reason_for_no_fp8)
def test_linear_frozen_weights_memory_default_recipe():
    """Test that memory usage is optimized when weights are frozen for MXFP8."""
    dim = 1024
    linear = Linear(dim, dim, bias=False)
    x = torch.randn(dim, dim, requires_grad=True, device="cuda")

    # Freeze weights
    linear.weight.requires_grad = False

    # Forward and backward pass with FP8
1180
    with autocast():
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
        o = linear(x)
        g_o = torch.randn_like(o)

    max_memory_before_backward = torch.cuda.max_memory_allocated()
    o.backward(g_o)
    max_memory_after_backward = torch.cuda.max_memory_allocated()

    memory_diff = (max_memory_after_backward - max_memory_before_backward) / 1e6
    assert memory_diff < 5.5, (
        f"Memory usage with frozen weights ({memory_diff}MB) should be less than 5.5MB as the"
        " grad_output should be quantized only columnwise."
    )


1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
@pytest.mark.parametrize(
    "module_name",
    ("Linear", "LayerNormLinear", "LayerNormMLP", "GroupedLinear", "ops.Linear"),
)
@pytest.mark.parametrize(
    "quantization",
    (None, "fp8_delayed_scaling", "fp8_current_scaling", "mxfp8"),
)
def test_inference_mode(
    module_name: str,
    quantization: Optional[str],
) -> None:
    """Test heuristics for initializing quantized weights"""
1208
1209
    if NVTE_TEST_NVINSPECT_ENABLED and quantization is not None:
        pytest.skip("Quantized model parameters are not supported in debug mode.")
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233

    # Tensor dimensions
    sequence_length = 32
    hidden_size = 32

    # Skip invalid configurations
    if quantization in ("fp8_delayed_scaling", "fp8_current_scaling") and not fp8_available:
        pytest.skip(reason_for_no_fp8)
    if quantization == "mxfp8" and not mxfp8_available:
        pytest.skip(reason_for_no_mxfp8)

    # Construct quantization recipe
    with_quantization = quantization not in (None, "None")
    quantization_recipe = None
    if quantization == "fp8_delayed_scaling":
        quantization_recipe = recipe.DelayedScaling()
    elif quantization == "fp8_current_scaling":
        quantization_recipe = recipe.Float8CurrentScaling()
    elif quantization == "mxfp8":
        quantization_recipe = recipe.MXFP8BlockScaling()

    # Construct module
    module = None
    with torch.no_grad():
1234
        with quantized_model_init(enabled=with_quantization, recipe=quantization_recipe):
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
            if module_name == "Linear":
                module = Linear(hidden_size, hidden_size)
            elif module_name == "LayerNormLinear":
                module = LayerNormLinear(hidden_size, hidden_size)
            elif module_name == "LayerNormMLP":
                module = LayerNormMLP(hidden_size, hidden_size)
            elif module_name == "GroupedLinear":
                module = GroupedLinear(1, hidden_size, hidden_size)
            elif module_name == "ops.Linear":
                module = transformer_engine.pytorch.ops.Linear(hidden_size, hidden_size)

    def check_weights():
        """Helper function to check that weight parameters have expected data"""
        for param in module.parameters():
            if isinstance(param, Float8Tensor):
                assert param._data is not None, "Missing FP8 data"
                assert (
                    param._transpose is None and param._transpose_invalid
                ), "FP8 transpose is not expected for inference"
            if isinstance(param, MXFP8Tensor):
                assert param._rowwise_data is not None, "Missing row-wise MXFP8 data"
                assert (
                    param._columnwise_data is None
                ), "Column-wise MXFP8 data is not expected for inference"

    # Check that modules have expected weights after initialization
    check_weights()

    # Check that modules have expected weights after forward pass
    with torch.inference_mode():
        x = torch.zeros(sequence_length, hidden_size, device="cuda")
        kwargs = {}
        if module_name == "GroupedLinear":
            kwargs["m_splits"] = [sequence_length]
1269
        with autocast(enabled=with_quantization, recipe=quantization_recipe):
1270
1271
            y = module(x, **kwargs)
    check_weights()