test_onnx_export.py 62.1 KB
Newer Older
1
2
3
4
5
6
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.

"""
This file contains tests for exporting TransformerEngine models to ONNX.
7
8
9
10
11
12
13
14

The purpose of these tests is validation that TE models are converted to their correct ONNX
representation. Toward this end, each test captures the output of a TE module forward pass,
converts the TE module to ONNX, and uses ONNX Runtime (ORT) to execute the ONNX graph and
validate the output against TE's output.

Until FP8 is introduced to the ONNX standard, FP8 QuantizeLinear/DequantizeLinear is implemented
using custom ORT operations.
15
16
17
18
19
20

To run many repetitive tests use pytest-loop:
    $ python3 -m pip install pytest-loop
    $ pytest --loop 1000 tests/pytorch/test_onnx_export.py::test_export_layernorm

For reproducability use: torch.manual_seed(0)
21
22
23
"""

import os
24
import tempfile
25
26
27
28
29
30
import pytest
import warnings
import numpy as np
import onnxruntime as ort
import torch
from torch import nn as nn
31
from typing import Optional, Union, Tuple, List
32
33
34
import transformer_engine.pytorch as te
from transformer_engine.common import recipe
import transformer_engine_extensions as tex
35
from transformer_engine.pytorch.cpp_extensions import gemm, fp8_gemm, gelu, cast_to_fp8, cast_from_fp8
36
from transformer_engine.pytorch.module.base import get_workspace
37
38
39
import transformer_engine.pytorch.cpp_extensions as texcpp
import transformer_engine.pytorch.softmax as softmax_defs
from transformer_engine.pytorch.utils import get_default_init_method
40
from transformer_engine.pytorch.export import is_in_onnx_export_mode
41
from transformer_engine.pytorch.fp8 import FP8GlobalStateManager
42

43
# Global test configuration knobs.
44

45
# Enable this to serialize test inputs and outputs to file (as a Polygraphy RunResults instance).
46
47
SAVE_TEST_IO = bool(int(os.getenv("NVTE_ONNX_EXPORT_SAVE_TEST_IO", "0")))

48
49
50
51
52
if SAVE_TEST_IO:
    from polygraphy.json import save_json
    from polygraphy.comparator import RunResults

# The directory where generated ONNX test models are stored.
Neta Zmora's avatar
Neta Zmora committed
53
54
55
NVTE_TEST_ARTIFACTS_DIR = os.environ.get('NVTE_TEST_ARTIFACTS_DIR')
NVTE_TEST_ARTIFACTS_DIR = NVTE_TEST_ARTIFACTS_DIR or os.path.join(tempfile.gettempdir(), "./gen_onnx_models")

56
57
58

# The directory where this file is stored.
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
59
60
61
62

# ScaledUpperTriangMaskedSoftmax is exported via ONNX::Trilu which was introduced in opset 14.
TRILU_OPSET = 14
# Opset used in the ONNX files generated by the tests.
Neta Zmora's avatar
Neta Zmora committed
63
OPSET = 17
64
65
assert OPSET >= TRILU_OPSET

66
67
68
# Shared library implementing custom FP8 Q/DQ operators for ONNX Runtime (ORT).
ORT_CUSTOM_OPS_LIB = os.path.join(TESTS_DIR, "./libcustom_ort_fp8_qdq_ops.so")

69
fp8_available, reason_for_no_fp8 = FP8GlobalStateManager.is_fp8_available()
70
skip_FP8 = pytest.mark.skipif(not fp8_available, reason=reason_for_no_fp8)
71

72
73
supported_activations = ["gelu", "relu", "reglu", "geglu", "swiglu"]

74
75
all_normalizations = ["LayerNorm", "RMSNorm"]

Neta Zmora's avatar
Neta Zmora committed
76

77
78
79
80
81
82
83
84
85
86
87
88
@pytest.fixture()
def seed_default_rng():
    """Reseed the PRNG for test reproducibility"""
    torch.random.seed()


@pytest.fixture()
def set_max_seq_len(max_seq_len=128):
    """Set the maximum sequence length that can be used for attention masking"""
    os.environ["NVTE_ONNX_KVCACHE_MAX_SEQ_LEN"] = f"{max_seq_len}"


89
90
91
92
93
94
95
96
97
98
def create_fp8_recipe():
    return recipe.DelayedScaling(margin=0, interval=1, fp8_format=recipe.Format.E4M3)


def do_export(
    model: torch.nn.Module,
    inp: torch.Tensor,
    fname: str,
    use_fp8: bool=True,
    opset: int=OPSET,
99
100
101
    input_names: List[str]=None,
    output_names: List[str]=None,
    dynamic_axes: List[str]=None
102
103
104
):
    """Export to ONNX"""
    fp8_recipe = create_fp8_recipe()
105
106
    input_names = input_names or ["input"]
    output_names = output_names or ["output"]
107
108
109
110
111
112
113
114
115

    with torch.inference_mode(), te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe), warnings.catch_warnings():
        warnings.filterwarnings(
            action='ignore',
            category=torch.jit.TracerWarning,
            module=r'.*'
        )

        model.cuda().eval()
Neta Zmora's avatar
Neta Zmora committed
116
117
        os.makedirs(NVTE_TEST_ARTIFACTS_DIR, exist_ok=True)
        fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
118

119
        inps = inp if isinstance(inp, list) or isinstance(inp, tuple) else (inp,)
120
121
122
123
        assert len(inps) == len(input_names)
        inds_to_del = [i for i in range(len(inps)) if inps[i] is None]
        input_names = [input_names[i] for i in range(len(inps)) if i not in inds_to_del]

124
        with te.onnx_export(True):
125
126
127
128
129
            torch.onnx.export(
                model,
                inps,
                fname,
                verbose=True,
130
                dynamic_axes=dynamic_axes,
131
132
133
                opset_version=opset,
                input_names=input_names,
                output_names=output_names,
134
                do_constant_folding=True,
135
                operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH)
136
137
138


def to_numpy(tensor):
139
140
141
142
143
    if isinstance(tensor, torch.Tensor):
        if tensor.dtype == torch.bfloat16:
            tensor = tensor.type(torch.float32)
        tensor = tensor.detach().cpu().numpy()
    return tensor
144
145


146
147
148
149
150
def set_layer_scale(module: torch.nn.Module, scale: float, num_gemms: int):
    """Initialize the FP8 quantization scales in module"""
    NB_SCALES_PER_GEMM = 3  # One scale per: input, weights, and output GEMM tensors.
    nb_total_scales = num_gemms * NB_SCALES_PER_GEMM
    module.fp8_init(num_gemms)
151
    module.fp8_meta["scaling_fwd"].scale = torch.ones(
152
        nb_total_scales, dtype=torch.float32, device="cuda") / scale
153
    module.fp8_meta["scaling_fwd"].scale_inv = torch.ones(
154
        nb_total_scales, dtype=torch.float32, device="cuda") * scale
155
156
157


def te_infer(model: torch.nn.Module, inps: Union[Tuple[torch.tensor], torch.tensor], is_fp8: bool):
158
    """Transformer Engine forward propagation."""
159
160
161
162
163
    fp8_recipe = create_fp8_recipe()
    with torch.inference_mode(), te.fp8_autocast(enabled=is_fp8, fp8_recipe=fp8_recipe), warnings.catch_warnings():
        te_outputs = model(*inps if isinstance(inps, tuple) else (inps,))
        if not isinstance(te_outputs, tuple):
            te_outputs = (te_outputs,)
164
        return te_outputs
165
166


167
168
169
170
171
172
def compare_outputs(onnx_outputs, te_outputs, atol, rtol, max_errors_printed, allow_cnt_errors, fname):
    """ Compare ORT and TE outputs."""
    assert len(onnx_outputs) == len(te_outputs)
    # Compare ORT and PyTorch outputs.
    for onnx_output, te_output in zip(onnx_outputs, te_outputs):
        # np.isclose: abs(a - b) <= (atol + rtol * abs(b))
173
174
        te_output = to_numpy(te_output)
        onnx_output = to_numpy(onnx_output)
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
        ac = ~np.isclose(onnx_output, te_output, atol=atol, rtol=rtol)
        mismatches = ac.nonzero()
        mismatched_ids = [loc for loc in zip(*mismatches)]
        if mismatched_ids:
            # Log some information in case of error.
            print("*" * 100)
            nb_errors = len(mismatched_ids)
            nb_vals = min(nb_errors, max_errors_printed)
            print(f"Detected {nb_errors} diverging values (output shape={onnx_output.shape})")
            print(f"Showing first {nb_vals} errors (ONNX -- TE):")
            abs_err = np.abs(onnx_output - te_output)
            errors = abs_err[mismatches]
            for loc in mismatched_ids[:nb_vals]:
                ref = te_output[loc]
                print(f"{onnx_output[loc]} -- {te_output[loc]} err={abs_err[loc]} > {atol + rtol * abs(ref)}")
            print(f"Max error: {np.max(errors)}")
            if nb_errors > allow_cnt_errors:
                raise ValueError(f"Output validation of {fname} failed with {nb_errors} errors")

194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
def serialize_inputs_outputs(
    fname: str,
    inputs: Union[Tuple[torch.Tensor], torch.Tensor],
    te_outputs: List[torch.Tensor],
    input_names: Optional[List[str]] = None,
    output_names: Optional[List[str]] = None,
):
    if not SAVE_TEST_IO:
        return

    fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)

    input_names = input_names or ["input"]
    output_names = output_names or ["output"]
    inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else (inputs,)
    named_inputs = zip(input_names, inputs)
    input_data = [{k: v.cpu() for k, v in named_inputs if v is not None}]
    json_fname = fname[:-len(".onnx")] + "_inputs.json"
    save_json(input_data, json_fname, description="custom input data")

    json_fname = fname[:-len(".onnx")] + "_output.json"
    named_outputs = zip(output_names, te_outputs)
216
    output_data = {k: v.detach().cpu() for k, v in named_outputs if v is not None}
217
218
219
220
    custom_outputs = RunResults()
    custom_outputs.add([output_data], runner_name="custom_runner")
    custom_outputs.save(json_fname)

221

222
223
224
225
226
227
228
229
def validate_result(
    fname: str,
    inps: Union[Tuple[torch.Tensor], torch.Tensor],
    model: torch.nn.Module,
    atol: float=1.e-8, # np.isclose default atol
    rtol: float=1.e-5, # np.isclose default rtol
    max_errors_printed: int=10,
    is_fp8: bool=False,
230
    allow_cnt_errors: int=0,
231
232
233
    input_names: List[str]=None,
    output_names: List[str]=None,
    te_outputs: List[torch.Tensor]=None,
234
):
235
236
237
238
239
240
241
242
243
244
245
    """Compare the outputs of a Transformer Engine (TE) module vs the outputs of its ONNX
    representation using ONNX Runtime (ORT) and ensure they are close.

    The purpose of the output comparison is to validate that TE models are converted to
    their correct ONNX representation by testing that TE and ORT outputs match within some
    small threshold (allowing for finite precision errors).

    Argument `allow_cnt_errors` reduces test failure noise due to spurious errors by ignoring,
    a very small number (0-3) of outliers. This is fine to do because these outliers are due to
    small kernel implementation differences between TE and ORT and do not imply an incorrect ONNX
    representation (the tests assume both ORT or TE kernels are correct).
246
247

    Argument `te_outputs` can be used to provide pre-computed TE outputs.
248
    """
249
250
251
252
253
254
255
256
257
258

    def create_ort_session(fname: str, is_fp8: bool):
        def load_custom_ops(session_opts: ort.SessionOptions):
            """For FP8 validation with ORT we need to load our custom FP8 Q/DQ extension."""
            if not os.path.exists(ORT_CUSTOM_OPS_LIB):
                raise FileNotFoundError(f"Unable to find {ORT_CUSTOM_OPS_LIB}")
            session_opts.register_custom_ops_library(ORT_CUSTOM_OPS_LIB)
            print("registered custom FP8 Q/DQ ops!")

        """Create an ONNX Runtime session for validation."""
259
        kwargs = {"providers": ['CUDAExecutionProvider', 'CPUExecutionProvider']}
260
261
262
        if is_fp8:
            sess_options = ort.SessionOptions()
            load_custom_ops(sess_options)
263
264
265
            kwargs["sess_options"] = sess_options

        s = ort.InferenceSession(fname, **kwargs)
266
267
        return s

268
269
270
271
272
    def create_ort_input_dict(session, inputs):
        inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else (inputs,)
        input_names = [x.name for x in session.get_inputs()]
        inps = [to_numpy(x) for x in inputs if x is not None]
        inp_dict = dict(zip(input_names, inps))
273
274
        return inp_dict

275
276
    input_names = input_names or ["input"]
    output_names = output_names or ["output"]
277

278
    # Run ORT session and TE model.
Neta Zmora's avatar
Neta Zmora committed
279
    fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
280
281
    if not te_outputs:
        te_outputs = te_infer(model, inps, is_fp8)
Neta Zmora's avatar
Neta Zmora committed
282
283
284
    ort_s = create_ort_session(fname, is_fp8)
    input_feed = create_ort_input_dict(ort_s, inps)
    onnx_outputs = ort_s.run(None, input_feed=input_feed)
285
    compare_outputs(onnx_outputs, te_outputs, atol, rtol, max_errors_printed, allow_cnt_errors, fname)
286
287
288
289
290
291
292
293
294
295


def create_meta(scale_factor: float, size: int=1):
    meta = tex.FP8TensorMeta()
    meta.amax_history = torch.zeros(1, size, dtype=torch.float32, device="cuda")
    meta.scale_inv = torch.ones(size, dtype=torch.float32, device="cuda") / scale_factor
    meta.scale = torch.ones(size, dtype=torch.float32, device="cuda") * scale_factor
    return meta


296
297
298
299
def dtype2str(dtype: torch.dtype, fake_bf16_io=False):
    if fake_bf16_io:
        assert dtype == torch.bfloat16
        return "_fake_bf16"
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
    return {
        torch.float32: "_fp32",
        torch.float16: "_fp16",
        torch.bfloat16: "_bf16",
    }[dtype]


def as_te_type(dtype: torch.dtype):
    return {
        torch.float32: tex.DType.kFloat32,
        torch.float16: tex.DType.kFloat16,
        torch.bfloat16: tex.DType.kBFloat16,
    }[dtype]


def get_attn_mask_str(use_mask, attn_mask_type):
    # See FusedScaleMaskSoftmax::forward_fused_softmax for logic behind names.
    if attn_mask_type is None:
        return "_mask" if use_mask else "_no-mask"
    attn_mask_str = "_padding-no-mask"
    attn_mask_str = "_causal-mask" if attn_mask_type == "causal" else attn_mask_str
    attn_mask_str = "_padding-mask" if use_mask and attn_mask_type == "padding" else attn_mask_str
    return attn_mask_str


Neta Zmora's avatar
Neta Zmora committed
325
326
327
328
329
"""
Tests cases begin here.
"""


330
@skip_FP8
Neta Zmora's avatar
Neta Zmora committed
331
332
@pytest.mark.parametrize("scale_factor", [1, 224])
@pytest.mark.parametrize(
333
334
335
336
337
    "precision,             atol", [
    [torch.float32,         1e-7],
    [torch.float16,         1e-7],
    [torch.bfloat16,        5e-3],
    ["fake-torch.bfloat16", 5e-3],
338
])
339
def test_export_cast_ops(seed_default_rng, scale_factor: float, atol: float, precision: torch.dtype):
340
341
342
343
    fake_bf16_io = precision == "fake-torch.bfloat16"
    # reset precision to torch.bfloat16 after capturing fake BF16 mode
    precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision

344
    class TestFP8_QDQ(nn.Module):
Neta Zmora's avatar
Neta Zmora committed
345
        def __init__(self, fake_bf16_io):
346
347
348
349
350
            super().__init__()
            self.fp8_tensor = 0
            self.meta = create_meta(scale_factor)
            self.highprec_type = as_te_type(precision)
            self.fp8_type = tex.DType.kFloat8E4M3
Neta Zmora's avatar
Neta Zmora committed
351
            self.fake_bf16_io = fake_bf16_io
352
353
354
355
356
357
358
359
360
361
362
363
364
365

        def forward(self, inp):
            ret = cast_to_fp8(
                inp,
                self.meta,
                self.fp8_tensor,
                self.fp8_type)

            ret = cast_from_fp8(
                ret,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
                self.highprec_type)
Neta Zmora's avatar
Neta Zmora committed
366
367
            if self.fake_bf16_io:
                ret = ret.type(torch.float32)
368
369
370
371
372
            return ret

    # Set dimensions (these are arbitrary).
    in_features = 64
    hidden_size = 256
Neta Zmora's avatar
Neta Zmora committed
373
374
    inp = torch.randn(hidden_size, in_features, device="cuda",
        dtype=torch.float if fake_bf16_io else precision)
375
    high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
376
    fname = f"te.cast_fp8_{scale_factor}{high_prec_str}.onnx"
Neta Zmora's avatar
Neta Zmora committed
377
    model = TestFP8_QDQ(fake_bf16_io)
378

379
    do_export(model, inp, fname)
380
381
382
383
    te_outputs = te_infer(model, inp, is_fp8=True)
    serialize_inputs_outputs(fname, inp, te_outputs)
    if fake_bf16_io or precision != torch.bfloat16:
        validate_result(fname, inp, model, atol=atol, is_fp8=True, te_outputs=te_outputs)
384
385
386
387

@skip_FP8
@pytest.mark.parametrize("scale_factor", [448])
@pytest.mark.parametrize(
388
389
390
391
392
    "precision,             atol", [
    [torch.float32,         1e-5],
    [torch.float16,         1e-5],
    [torch.bfloat16,        5e-3],
    ["fake-torch.bfloat16", 5e-3]
393
394
])
def test_export_gelu_fp8(scale_factor: float, precision: torch.dtype, atol: float):
395
396
397
398
    fake_bf16_io = precision == "fake-torch.bfloat16"
    # reset precision to torch.bfloat16 after capturing fake BF16 mode
    precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision

399
    class TestFP8_Gelu(nn.Module):
Neta Zmora's avatar
Neta Zmora committed
400
        def __init__(self, fake_bf16_io):
401
402
403
404
405
            super().__init__()
            self.fp8_tensor = 0
            self.meta = create_meta(scale_factor)
            self.highprec_type = as_te_type(precision)
            self.fp8_type = tex.DType.kFloat8E4M3
Neta Zmora's avatar
Neta Zmora committed
406
            self.fake_bf16_io = fake_bf16_io
407
408

        def forward(self, inp):
409
            ret = gelu(
410
411
412
413
414
415
416
417
418
419
                inp,
                self.meta,
                self.fp8_tensor,
                self.fp8_type)
            ret = cast_from_fp8(
                ret,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
                self.highprec_type)
Neta Zmora's avatar
Neta Zmora committed
420
421
            if self.fake_bf16_io:
                ret = ret.type(torch.float32)
422
423
424
425
426
            return ret

    # Set dimensions (these are arbitrary).
    in_features = 64
    hidden_size = 256
Neta Zmora's avatar
Neta Zmora committed
427
428
    inp = torch.randn(hidden_size, in_features, device="cuda",
        dtype=torch.float if fake_bf16_io else precision)
429
    high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
430
    fname = f"te.gelu_fp8_{scale_factor}{high_prec_str}.onnx"
Neta Zmora's avatar
Neta Zmora committed
431
    model = TestFP8_Gelu(fake_bf16_io)
432
    do_export(model, inp, fname)
433
434
435
436
    te_outputs = te_infer(model, inp, is_fp8=True)
    serialize_inputs_outputs(fname, inp, te_outputs)
    if fake_bf16_io or precision != torch.bfloat16:
        validate_result(fname, inp, model, rtol=0, atol=atol, is_fp8=True, allow_cnt_errors=2, te_outputs=te_outputs)
437
438
439
440
441
442


@pytest.mark.parametrize("scale_factors",
    [(224, 224,),
])
@pytest.mark.parametrize(
443
444
445
    "precision,             use_fp8, use_bias, use_gelu", [
    (torch.float32,         False,   False,    False),
    (torch.float16,         False,   False,    False),
446
    (torch.bfloat16,        False,   False,    False),
447
448
    (torch.float32,         False,   True,     False),
    (torch.float16,         False,   True,     False),
449
    (torch.bfloat16,        False,   True,     False),
450
451
    (torch.float32,         False,   True,     True),
    (torch.float16,         False,   True,     True),
452
    (torch.bfloat16,        False,   True,     True),
453
454

    # For FP8 GEMM GeLU is not used.
455
456
    (torch.float32,         True,    False,    False),
    (torch.float16,         True,    False,    False),
457
    (torch.bfloat16,        True,    False,    False),
458
    # When enabling bias we must use float16 or bfloat16 (because of kernel limitations)
459
460
    (torch.float16,         True,    True,     False),
    (torch.bfloat16,        True,    True,     False),
461
462
])
def test_export_gemm(
463
    seed_default_rng,
464
465
466
467
468
469
470
    precision, # Precision of inputs, weights, output and bias
    use_fp8,
    use_bias,
    use_gelu,
    scale_factors
):
    # Skip FP8 tests on non-hopper devices
471
472
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552

    class TestFP8_GEMM(nn.Module):
        def __init__(self, precision, use_bias, gelu, scale_factors):
            super().__init__()
            self.use_bias = use_bias
            self.gelu = gelu
            self.precision = precision

            self.fp8_tensor_inp = tex.FP8FwdTensors.GEMM1_INPUT
            self.fp8_tensor_weight = tex.FP8FwdTensors.GEMM1_WEIGHT
            nb_inp_scales, nb_weight_scales = 1, out_features
            act_scale_factor, weight_scale_factor = scale_factors
            self.meta_inp = create_meta(act_scale_factor, nb_inp_scales)
            self.meta_weight = create_meta(weight_scale_factor, nb_weight_scales)

            bias_size = nb_weight_scales
            self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
            self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")

            self.inp_type = tex.DType.kFloat8E4M3
            self.weights_type = tex.DType.kFloat8E4M3
            self.outp_type = precision

        def forward(self, inp, weight):
            inp_fp8 = cast_to_fp8(
                inp,
                self.meta_inp,
                self.fp8_tensor_inp,
                self.inp_type)

            weight_fp8 = cast_to_fp8(
                weight,
                self.meta_weight,
                self.fp8_tensor_weight,
                self.weights_type)

            ret = fp8_gemm(
                weight_fp8,
                self.meta_weight.scale_inv,
                self.fp8_tensor_weight,
                self.inp_type,
                inp_fp8,
                self.meta_inp.scale_inv,
                self.fp8_tensor_inp,
                self.weights_type,
                self.outp_type,
                get_workspace(),
                bias=self.bias,
                use_bias=self.use_bias,
                use_split_accumulator=False)
            return ret

    class Test_GEMM(nn.Module):
        def __init__(self, precision, use_bias=False, gelu=False):
            super().__init__()
            self.use_bias = use_bias
            self.gelu = gelu
            self.precision = precision
            bias_size = out_features
            self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
            self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")

        def forward(self, inp, weight):
            outp_type = self.precision

            # note: due to logic in lines 104:116 and L129 in cpp_extensions.py
            # it appears either bias OR gelu can be activated, not both
            ret, _, _ = gemm(
                weight,
                inp,
                outp_type,
                get_workspace(),

                # test bias
                bias=self.bias,
                use_bias=self.use_bias,

                # test gelu
                gelu=self.gelu,
                gelu_input=self.gelu_input,
Neta Zmora's avatar
Neta Zmora committed
553
554
                grad=False, # only True for backward pass
                accumulate=False,
555
556
557
558
559
560
561
562
563
            )
            return ret

    # If gelu is applied then bias must be added, as defined by TE kernel.
    if use_gelu: assert use_bias
    # Set dimensions (these are arbitrary).
    out_features = 128
    hidden_size = 256
    in_features = 64
564
565
    inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision)
    weight = torch.randn(out_features, in_features, device="cuda", dtype=precision)
566
567
568
569
570
    fp8_str = "_fp8" if use_fp8 else ""
    bias_str = "_bias" if use_bias else ""
    gelu_str = "_gelu" if use_gelu else ""
    high_prec_str = dtype2str(precision)
    fname = f"te.gemm{fp8_str}{bias_str}{gelu_str}{high_prec_str}.onnx"
571
    input_names = ['input', 'weight']
572
573
    if use_fp8:
        model = TestFP8_GEMM(precision, use_bias, use_gelu, scale_factors)
574
        do_export(model, (inp, weight), fname, use_fp8, input_names=input_names)
575
576
577
578
579
        te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
        serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
        if precision != torch.bfloat16:
            validate_result(fname, (inp, weight), model, rtol=1e-2, atol=2e-2,
                is_fp8=True, input_names=input_names, te_outputs=te_outputs)
580
581
    else:
        model = Test_GEMM(precision, use_bias, use_gelu)
582
        do_export(model, (inp, weight), fname, use_fp8, input_names=input_names)
583
584
585
586
587
        te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
        serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
        if precision != torch.bfloat16:
            validate_result(fname, (inp, weight), model, rtol=1e-2, atol=2e-2,
                input_names=input_names, te_outputs=te_outputs)
588
589
590


@pytest.mark.parametrize("scale_factor", [448, 112])
591
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
592
593
594
595
596
597
598
599
600
601
602
@pytest.mark.parametrize(
    "use_fp8, precision,             atol", [
    [False,   torch.float32,         1e-7],
    [False,   torch.float16,         1e-7],
    [False,   torch.bfloat16,        1e-7],
    [False,   "fake-torch.bfloat16", 1e-7],
    [True,    torch.float32,         1e-7],
    [True,    torch.float16,         1e-7],
    [True,    torch.bfloat16,        1e-2],
    [True,    "fake-torch.bfloat16", 1e-2]
])
603
def test_export_layernorm(
604
    seed_default_rng,
605
606
    use_fp8: bool,
    scale_factor: float,
607
    precision: torch.dtype,
608
609
    zero_centered_gamma: bool,
    atol: float
610
):
611
612
613
614
    fake_bf16_io = precision == "fake-torch.bfloat16"
    # reset precision to torch.bfloat16 after capturing fake BF16 mode
    precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision

615
    # Skip FP8 tests on non-hopper devices
616
617
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
618
619
620
621
622
623
624

    # Set dimensions (these are arbitrary).
    inp_shape = [64, 32]

    class Test_Layernorm(nn.Module):
        def __init__(self) -> None:
            super().__init__()
625
626
627
628
            eps = 1e-6 # An arbitrary small value
            dtype = torch.float if fake_bf16_io else precision
            self.ln = te.LayerNorm(inp_shape[1], eps, params_dtype=dtype,
                zero_centered_gamma=False).eval().cuda()
629
630

        def forward(self, inp):
631
            ret = self.ln(inp)
632
633
634
635
636
637
            return ret

    class TestFP8_Layernorm(nn.Module):
        def __init__(self) -> None:
            super().__init__()
            normalized_shape = torch.Size(inp.shape[1:])
638
639
640
641
            self.weight = torch.randn(*normalized_shape, device="cuda",
                dtype=torch.float32 if fake_bf16_io else precision)
            self.bias = torch.zeros(*normalized_shape, device="cuda",
                dtype=torch.float32 if fake_bf16_io else precision)
642
643
644
645
646
647
648
649
650
651
652
653
654
655
            self.eps = 1e-6 # An arbitrary small value

            self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
            self.meta = create_meta(scale_factor)
            self.fp8_type = tex.DType.kFloat8E4M3

        def forward(self, inp):
            ret = texcpp.layernorm_fwd_fp8_inf(
                inp,
                self.weight,
                self.bias,
                self.eps,
                self.meta,
                self.fp8_tensor,
656
657
                self.fp8_type,
                zero_centered_gamma)
658
659
660
661
662
663

            ret = cast_from_fp8(
                ret,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
664
665
666
                as_te_type(precision))
            if fake_bf16_io:
                ret = ret.type(torch.float32)
667
668
            return ret

669
    inp = torch.randn(*inp_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision)
670
    model = TestFP8_Layernorm() if use_fp8 else Test_Layernorm()
671
    high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
672
673
674
    fp8_str = f"_fp8-{scale_factor}" if use_fp8 else ""
    fname = f"te.layernorm{fp8_str}{high_prec_str}.onnx"
    do_export(model, inp, fname, use_fp8=use_fp8)
675
676
677
    te_outputs = te_infer(model, inp, is_fp8=use_fp8)
    serialize_inputs_outputs(fname, inp, te_outputs)
    if fake_bf16_io or precision != torch.bfloat16:
678
        validate_result(
Neta Zmora's avatar
Neta Zmora committed
679
            fname, inp, model, atol=atol, is_fp8=use_fp8, allow_cnt_errors=3, te_outputs=te_outputs)
680

681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
@pytest.mark.parametrize("scale_factor", [448, 112])
@pytest.mark.parametrize(
    "use_fp8, precision,             atol", [
    [False,   torch.float32,         1e-7],
    [False,   torch.float16,         1e-7],
    [False,   torch.bfloat16,        1e-7],
    [False,   "fake-torch.bfloat16", 1e-7],
    [True,    torch.float32,         1e-7],
    [True,    torch.float16,         1e-7],
    [True,    torch.bfloat16,        1e-2],
    [True,    "fake-torch.bfloat16", 1e-2]
])
def test_export_rmsnorm(
    seed_default_rng,
    use_fp8: bool,
    scale_factor: float,
    precision: torch.dtype,
    atol: float
):
    fake_bf16_io = precision == "fake-torch.bfloat16"
    # reset precision to torch.bfloat16 after capturing fake BF16 mode
    precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision

    # Skip FP8 tests on non-hopper devices
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)

    # Set dimensions (these are arbitrary).
    inp_shape = [64, 32]

    class Test_RMSnorm(nn.Module):
        def __init__(self) -> None:
            super().__init__()
            eps = 1e-6 # An arbitrary small value
            dtype = torch.float if fake_bf16_io else precision
            self.ln = te.RMSNorm(inp_shape[1], eps, params_dtype=dtype).eval().cuda()

        def forward(self, inp):
            ret = self.ln(inp)
            return ret

    class TestFP8_RMSnorm(nn.Module):
        def __init__(self) -> None:
            super().__init__()
            normalized_shape = torch.Size(inp.shape[1:])
            self.weight = torch.randn(*normalized_shape, device="cuda",
                dtype=torch.float32 if fake_bf16_io else precision)
            self.eps = 1e-6 # An arbitrary small value

            self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
            self.meta = create_meta(scale_factor)
            self.fp8_type = tex.DType.kFloat8E4M3

        def forward(self, inp):
            ret = texcpp.rmsnorm_fwd_fp8_inf(
                inp,
                self.weight,
                self.eps,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
                False)

            ret = cast_from_fp8(
                ret,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
                as_te_type(precision))
            if fake_bf16_io:
                ret = ret.type(torch.float32)
            return ret

    inp = torch.randn(*inp_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision)
    model = TestFP8_RMSnorm() if use_fp8 else Test_RMSnorm()
    high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
    fp8_str = f"_fp8-{scale_factor}" if use_fp8 else ""
    fname = f"te.layernorm{fp8_str}{high_prec_str}.onnx"
    do_export(model, inp, fname, use_fp8=use_fp8)
    te_outputs = te_infer(model, inp, is_fp8=use_fp8)
    serialize_inputs_outputs(fname, inp, te_outputs)
    if fake_bf16_io or precision != torch.bfloat16:
        validate_result(
            fname, inp, model, atol=atol, is_fp8=use_fp8, allow_cnt_errors=3, te_outputs=te_outputs)
765
766

@skip_FP8
767
@pytest.mark.parametrize("softmax_fn", [
768
769
770
    softmax_defs.ScaledUpperTriangMaskedSoftmax,
    softmax_defs.ScaledMaskedSoftmax,
    softmax_defs.ScaledSoftmax,
771
    te.softmax.FusedScaleMaskSoftmax,
772
773
])
# Softmax kernel only supports FP16 or BF16!
774
@pytest.mark.parametrize("precision", [torch.float16, torch.bfloat16, "fake-torch.bfloat16"])
775
def test_export_softmax(seed_default_rng, set_max_seq_len, softmax_fn, precision):
776
    class Test_Softmax(nn.Module):
777
        def __init__(self, softmax_fn, fake_bf16_io, mask_inp=False):
778
            super().__init__()
779
780
            self.softmax_fn = softmax_fn
            self.scale = 8 # arbitrary value
781
            self.mask_inp = mask_inp
782
            self.fused_scaled_softmax = None
783
            self.fake_bf16_io = fake_bf16_io
784
785
786
787
788
789
            if self.softmax_fn == te.softmax.FusedScaleMaskSoftmax:
                self.fused_scaled_softmax = te.softmax.FusedScaleMaskSoftmax(
                    attn_mask_type="causal",
                    mask_func=te.utils.attention_mask_func,
                    softmax_in_fp32=True,
                )
790
791

        def forward(self, inp, mask):
Neta Zmora's avatar
Neta Zmora committed
792
793
794
            if self.fake_bf16_io:
                inp = inp.type(torch.bfloat16)

795
796
            if self.fused_scaled_softmax:
                ret = self.fused_scaled_softmax(inp, mask, self.scale)
797
            else:
798
799
800
801
                if self.mask_inp:
                    ret = self.softmax_fn.apply(inp, mask, self.scale)
                else:
                    ret = self.softmax_fn.apply(inp, self.scale)
802
            if self.fake_bf16_io:
Neta Zmora's avatar
Neta Zmora committed
803
                ret = ret.type(torch.float32)
804
805
            return ret

Neta Zmora's avatar
Neta Zmora committed
806
807
808
    fake_bf16_io = precision == "fake-torch.bfloat16"
    precision = torch.bfloat16 if fake_bf16_io else precision

809
    # Set dimensions (these are arbitrary).
810
    batch_size, n_heads, seq_len_q, seq_len_k = 64, 96, 32, 32
811
    mask = None
812
    input_names = ["input", "mask"]
813
    inp_shape = [batch_size, n_heads, seq_len_q, seq_len_k]
814
    if softmax_fn == softmax_defs.ScaledUpperTriangMaskedSoftmax:
815
        inp_shape = [batch_size, seq_len_q, seq_len_k]
816
        kernel_str = "ScaledUpperTriangMaskedSoftmax"
817
        model = Test_Softmax(softmax_fn, fake_bf16_io)
818
    elif softmax_fn == softmax_defs.ScaledMaskedSoftmax:
819
        # Generate a random mask with 50% probability for 0 or 1.
820
        probs = 0.5 * torch.ones(1, 1, seq_len_q, seq_len_k, device="cuda", dtype=precision)
821
822
        mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
        kernel_str = "ScaledMaskedSoftmax"
823
        model = Test_Softmax(softmax_fn, fake_bf16_io, mask_inp=True)
824
    elif softmax_fn == softmax_defs.ScaledSoftmax:
825
        kernel_str = "ScaledSoftmax"
826
        model = Test_Softmax(softmax_fn, fake_bf16_io)
827
828
    elif softmax_fn == te.softmax.FusedScaleMaskSoftmax:
        kernel_str = "TorchSoftmax"
829
        model = Test_Softmax(softmax_fn, fake_bf16_io)
Neta Zmora's avatar
Neta Zmora committed
830
831

    input_tensor = torch.randn(*inp_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision)
832
    high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
833
834
    fname = f"{kernel_str}{high_prec_str}.onnx"
    inp = (input_tensor, mask)
835
836
837
838
    dynamic_axes = {}
    if mask is not None:
        dynamic_axes = {"mask": {2:"seq_len_q", 3:"seq_len_k"}}
    do_export(model, inp, fname, input_names=input_names, dynamic_axes=dynamic_axes)
839
840
841
    te_outputs = te_infer(model, inp, is_fp8=False)
    serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
    if fake_bf16_io or precision != torch.bfloat16:
Neta Zmora's avatar
Neta Zmora committed
842
843
        atol = 5e-2 if fake_bf16_io else 1e-3
        validate_result(fname, inp, model, atol=atol, input_names=input_names, te_outputs=te_outputs)
844
845


846
847
848
# Test dynamically generated softmax mask.
# Softmax kernel only supports FP16 or BF16!
@skip_FP8
849
@pytest.mark.parametrize("precision", [torch.float16, torch.bfloat16, "fake-torch.bfloat16"])
850
def test_softmax_mask_fn(seed_default_rng, precision):
851
852
    fake_bf16_io = precision == "fake-torch.bfloat16"
    # reset precision to torch.bfloat16 after capturing fake BF16 mode
Neta Zmora's avatar
Neta Zmora committed
853
    precision = torch.bfloat16 if fake_bf16_io else precision
854

855
    class Test_Softmax(nn.Module):
856
        def __init__(self, use_default_te_mask_fn: bool, fake_bf16_io: bool):
857
            super().__init__()
858
859
860
861
862
863
864
865
            self.scale = 1 # arbitrary value
            self.fake_bf16_io = fake_bf16_io

            if use_default_te_mask_fn:
                os.environ["NVTE_ONNX_KVCACHE_MAX_SEQ_LEN"] = "0"
            else:
                os.environ["NVTE_ONNX_KVCACHE_MAX_SEQ_LEN"] = f"{seq_len_q}"

866
867
868
869
870
871
872
873
874
875
            # Use NVTE_MASKED_SOFTMAX_FUSION to force TE to use forward_torch_softmax
            # even when is_in_onnx_export_mode()==False.
            os.environ["NVTE_MASKED_SOFTMAX_FUSION"] = "0"
            self.fused_scaled_softmax = te.softmax.FusedScaleMaskSoftmax(
                attn_mask_type="causal",
                mask_func=te.utils.attention_mask_func,
                softmax_in_fp32=True,
            )

        def forward(self, inp, mask):
Neta Zmora's avatar
Neta Zmora committed
876
877
            if self.fake_bf16_io:
                inp = inp.type(torch.bfloat16)
878
            ret = self.fused_scaled_softmax(inp, mask, self.scale)
879
            if self.fake_bf16_io:
Neta Zmora's avatar
Neta Zmora committed
880
                ret = ret.type(torch.float)
881
882
883
884
            return ret

    # Set dimensions (these are arbitrary).
    mask = None
885
886
887
    batch_size, n_heads, seq_len_q, seq_len_k = 64, 96, 32, 32
    assert seq_len_q == seq_len_k # This is a causal (TRILU) mask
    inp_shape = [batch_size, n_heads, seq_len_q, seq_len_k]
Neta Zmora's avatar
Neta Zmora committed
888
889
    input_tensor = torch.randn(
            *inp_shape, device="cuda", dtype=torch.float if fake_bf16_io else precision)
890
    inp = (input_tensor, mask)
891
    high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
892
893
894

    # Compare the outputs of TE when using the default softmax mask
    # to the TE outputs produced when using the ONNX-compatible causal mask.
895
896
    # This verifies that _get_onnx_export_causal_mask generates a correct mask.
    model = Test_Softmax(use_default_te_mask_fn=True, fake_bf16_io=fake_bf16_io)
897
898
899
    te_outputs_default_mask = te_infer(model, inp, is_fp8=True)
    with te.onnx_export(True):
        # ONNX export mode forces use of the ONNX-compatible causal mask.
900
        model_onnx_mask = Test_Softmax(use_default_te_mask_fn=False, fake_bf16_io=fake_bf16_io)
901
902
903
904
905
906
907
908
909
910
        te_outputs_onnx_mask = te_infer(model_onnx_mask, inp, is_fp8=True)
    compare_outputs(te_outputs_default_mask, te_outputs_onnx_mask,
        atol=0, rtol=0, max_errors_printed=10, allow_cnt_errors=0, fname="softmax masking")

    # Compare the outputs of TE when using the default softmax mask
    # to the ORT ONNX outputs produced when using the ONNX-compatible causal mask.
    input_names = ["input", "mask"]
    kernel_str = "FusedScaleMaskSoftmax"
    fname = f"{kernel_str}{high_prec_str}.onnx"
    do_export(model, inp, fname, input_names=input_names)
911
912
    serialize_inputs_outputs(fname, inp, te_outputs=te_outputs_default_mask, input_names=input_names)
    if fake_bf16_io or precision != torch.bfloat16:
Neta Zmora's avatar
Neta Zmora committed
913
914
915
916
        atol = 1e-2 if fake_bf16_io else 1e-3
        validate_result(
                fname, inp, model_onnx_mask, atol=atol,
                input_names=input_names, te_outputs=te_outputs_default_mask)
917
918


919
920
921
922
923
@pytest.mark.parametrize("scale_factor", [1])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize(
924
925
926
927
928
    "precision,      use_bias",[
    (torch.float32,  False),
    (torch.float32,  True),
    (torch.float16,  False),
    (torch.float16,  True),
929
930
931
    # Todo: cannot configure BF16 when bias is disabled (ORT issue?)
    (torch.bfloat16, False),
    # Todo: cannot configure BF16 when bias is enabled (ORT issue?)
932
    (torch.bfloat16, True),
933
934
])
def test_export_linear(
935
    seed_default_rng,
936
937
938
939
940
941
942
    scale_factor: float,
    use_fp8: bool,
    use_bias: bool,
    return_bias: bool,
    precision: torch.dtype
):
    # Skip FP8 tests on non-hopper devices
943
944
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985

    # Set dimensions (these are arbitrary).
    in_features = 64
    out_features = 256
    hidden_size = 256

    class Test_Linear(nn.Module):
        def __init__(self,
                in_features,
                out_features,
                use_bias,
                return_bias,
                precision
            ):
            super().__init__()
            self.linear = te.Linear(
                in_features,
                out_features,
                bias=use_bias,
                return_bias=return_bias,
                params_dtype=precision
            )

        def forward(self, inp):
            ret = self.linear(inp)
            return ret

    inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision)
    fp8_str = "_fp8" if use_fp8 else ""
    bias_str = "_bias" if use_bias else ""
    high_prec_str = dtype2str(precision)
    fname = f"te.linear{fp8_str}{bias_str}{high_prec_str}.onnx"
    with te.fp8_autocast(enabled=use_fp8):
        model = Test_Linear(
            in_features,
            out_features,
            use_bias,
            return_bias,
            precision
        ).to(device='cuda')
        if use_fp8:
986
            set_layer_scale(model.linear, scale_factor, num_gemms=1)
987
        do_export(model, inp, fname, use_fp8)
988
989
        te_outputs = te_infer(model, inp, is_fp8=use_fp8)
        serialize_inputs_outputs(fname, inp, te_outputs)
990
991
992
993

        if precision in (torch.bfloat16, ):
            return
        if not use_fp8:
994
            validate_result(fname, inp, model, atol=1e-3, te_outputs=te_outputs)
995
        else:
996
            validate_result(fname, inp, model, atol=1e-3, is_fp8=use_fp8, te_outputs=te_outputs)
997
998
999
1000
1001
1002
1003
1004


@pytest.mark.parametrize("scale_factor", [112])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize(
1005
1006
1007
1008
1009
1010
1011
    "precision,      use_bias",[
    (torch.float32,  False),
    (torch.float32,  True),
    (torch.float16,  True),
    (torch.float16,  False),
    (torch.bfloat16, True),
    (torch.bfloat16, False),
1012
])
1013
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
1014
@pytest.mark.parametrize("normalization", all_normalizations)
1015
def test_export_layernorm_linear(
1016
    seed_default_rng,
1017
1018
1019
1020
1021
    scale_factor: float,
    use_fp8: bool,
    use_bias: bool,
    return_bias: bool,
    return_layernorm_output: bool,
1022
    precision: torch.dtype,
1023
1024
    zero_centered_gamma: bool,
    normalization: str,
1025
1026
):
    # Skip FP8 tests on non-hopper devices
1027
1028
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
1029

1030
1031
1032
    if normalization == "RMSNorm" and zero_centered_gamma:
        pytest.skip("RMSNorm does not support zero_centered_gamma yet!")

1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
    # Set dimensions (these are arbitrary).
    in_features = 64
    out_features = 256
    hidden_size = 256

    inp = torch.randn(in_features, out_features, device="cuda", dtype=precision)
    fp8_str = "_fp8" if use_fp8 else ""
    bias_str = "_bias" if use_bias else ""
    high_prec_str = dtype2str(precision)
    fname = f"te.layernorm_linear{fp8_str}{bias_str}{high_prec_str}.onnx"
1043

1044
1045
1046
1047
1048
1049
1050
1051
    with te.fp8_autocast(enabled=use_fp8):
        model = te.LayerNormLinear(
            hidden_size,
            3 * hidden_size,
            bias=use_bias,
            return_bias=return_bias,
            return_layernorm_output=return_layernorm_output,
            params_dtype=precision,
1052
            zero_centered_gamma=zero_centered_gamma,
1053
            normalization=normalization,
1054
1055
        ).to(device='cuda')
        if use_fp8:
1056
            set_layer_scale(model, scale_factor, num_gemms=1)
1057
        do_export(model, inp, fname, use_fp8)
1058
1059
1060
1061
        te_outputs = te_infer(model, inp, is_fp8=use_fp8)
        serialize_inputs_outputs(fname, inp, te_outputs)
        if precision in (torch.bfloat16, ):
            return
1062
        if not use_fp8:
1063
            validate_result(fname, inp, model, atol=1e-3, te_outputs=te_outputs)
Neta Zmora's avatar
Neta Zmora committed
1064
        elif precision != torch.bfloat16:
1065
            validate_result(fname, inp, model, atol=1e-6, is_fp8=use_fp8, te_outputs=te_outputs)
1066
1067
1068
1069
1070
1071
1072
1073


@pytest.mark.parametrize("scale_factor", [112])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize(
1074
1075
1076
1077
1078
1079
1080
    "precision,      use_bias",[
    (torch.float32,  False),
    (torch.float32,  True),
    (torch.float16,  True),
    (torch.float16,  False),
    (torch.bfloat16, True),
    (torch.bfloat16, False),
1081
])
1082
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
1083
@pytest.mark.parametrize("activation", supported_activations)
1084
@pytest.mark.parametrize("normalization", all_normalizations)
1085
def test_export_layernorm_mlp(
1086
    seed_default_rng,
1087
1088
1089
1090
1091
    scale_factor: float,
    use_fp8: bool,
    use_bias: bool,
    return_bias: bool,
    return_layernorm_output: bool,
1092
    precision: torch.dtype,
1093
1094
    zero_centered_gamma: bool,
    activation: str,
1095
    normalization: str,
1096
1097
):
    # Skip FP8 tests on non-hopper devices
1098
1099
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
1100

1101
1102
1103
    if normalization == "RMSNorm" and zero_centered_gamma:
        pytest.skip("RMSNorm does not support zero_centered_gamma yet!")

1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
    # Set dimensions (these are arbitrary).
    in_features = 64
    out_features = 256
    hidden_size = 256
    ffn_hidden_size = 256

    inp = torch.randn(in_features, out_features, device="cuda", dtype=precision)
    fp8_str = "_fp8" if use_fp8 else ""
    bias_str = "_bias" if use_bias else ""
    high_prec_str = dtype2str(precision)
1114
    fname = f"te.layernorm_mlp{fp8_str}{bias_str}{high_prec_str}_{activation}.onnx"
1115
1116
1117
1118
1119
1120
1121
1122
    with te.fp8_autocast(enabled=use_fp8):
        model = te.LayerNormMLP(
            hidden_size,
            ffn_hidden_size,
            bias=use_bias,
            return_bias=return_bias,
            return_layernorm_output=return_layernorm_output,
            params_dtype=precision,
1123
            zero_centered_gamma=zero_centered_gamma,
1124
            activation=activation,
1125
            normalization=normalization,
1126
1127
        ).to(device='cuda')
        if use_fp8:
1128
            set_layer_scale(model, scale_factor, num_gemms=2)
1129
        do_export(model, inp, fname, use_fp8)
1130
1131
1132
1133
        te_outputs = te_infer(model, inp, is_fp8=use_fp8)
        serialize_inputs_outputs(fname, inp, te_outputs)
        if precision in (torch.bfloat16, ):
            return
1134
1135
1136
        atol = 1e-6 if use_fp8 else (5e-1 if activation=="swiglu" else 1e-3)
        validate_result(fname, inp, model, atol=atol, is_fp8=use_fp8, te_outputs=te_outputs)

1137
1138
1139

@skip_FP8
@pytest.mark.parametrize(
1140
    "precision,      use_mask, attn_mask_type", [
1141
1142
1143
1144
1145
1146
1147
1148
    (torch.float32,  True,     "padding"), # calls forward_torch_softmax (apply user mask)
    (torch.float32,  False,    "no_mask"), # calls forward_torch_softmax (apply no mask)
    (torch.float16,  False,    "causal"),  # calls forward_torch_softmax (apply dynamic onnx mask)
    (torch.float16,  True,     "padding"), # calls forward_torch_softmax (apply user mask)
    (torch.float16,  False,    "no_mask"), # calls forward_torch_softmax (apply no mask)
    (torch.bfloat16, False,    "causal"),  # calls forward_torch_softmax (apply dynamic onnx mask)
    (torch.bfloat16, True,     "padding"), # calls forward_torch_softmax (apply user mask)
    (torch.bfloat16, False,    "no_mask"), # calls forward_torch_softmax (apply no mask)
1149
1150
])
def test_export_core_attention(
1151
1152
    seed_default_rng,
    set_max_seq_len,
1153
1154
1155
1156
1157
    precision: torch.dtype,
    use_mask: bool,
    attn_mask_type: str,
):
    # Set dimensions (these are arbitrary).
1158
1159
    seq_len, batch_size, num_attention_heads, kv_channels = (64, 4, 1, 64)
    qkv_size = (seq_len, batch_size, num_attention_heads, kv_channels)
1160
1161
1162
1163

    query_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
    key_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
    value_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
1164
    input_names = ["query", "key", "value", "attention_mask"]
1165
1166
1167
1168
1169
1170
1171
1172
1173
    attention_mask = None
    if use_mask:
        # Generate a random mask with 50% probability for 0 or 1.
        probs = 0.5 * torch.ones(qkv_size[1], qkv_size[2], qkv_size[0], qkv_size[0], device="cuda", dtype=precision)
        attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
    inp = (query_layer, key_layer, value_layer, attention_mask)

    mask_str = get_attn_mask_str(use_mask, attn_mask_type)
    high_prec_str = dtype2str(precision)
1174
    fname = f"te.core_attention{mask_str}{high_prec_str}.onnx"
1175

1176
    model = te.attention.DotProductAttention(
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
        num_attention_heads=num_attention_heads,
        kv_channels=kv_channels,
        attention_dropout=0.5,
        attn_mask_type=attn_mask_type,
    ).to(device='cuda')
    do_export(model,
            inp,
            fname,
            input_names=input_names,
            use_fp8=True)
1187
1188
1189
1190
1191
    te_outputs = te_infer(model, inp, is_fp8=True)
    serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
    if precision in (torch.bfloat16, ):
        return
    validate_result(fname, inp, model, is_fp8=True, atol=1e-2, input_names=input_names, te_outputs=te_outputs)
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205


test_configs_multihead_attention = [
    #"use_mask, attn_mask_type"
    (False,    "causal"),  # calls ScaledUpperTriangMaskedSoftmax
    (True,     "padding"), # calls ScaledMaskedSoftmax
    (False,    "padding"), # calls ScaledSoftmax
]
test_configs_attention_type = [
    #"input_layernorm, attention_type, fuse_qkv_params"
    (True,             "self",         True),
    (False,            "self",         True),
    (True,             "self",         False),
    (False,            "self",         False),
Neta Zmora's avatar
Neta Zmora committed
1206
1207
    (True,             "cross",        True),
    (False,            "cross",        True),
1208
    (True,             "cross",        False),
Neta Zmora's avatar
Neta Zmora committed
1209
    (False,            "cross",        False),
1210
1211
1212
]
@pytest.mark.parametrize("use_fp8", [False, True])
@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention)
1213
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
1214
1215
1216
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize("input_layernorm, attention_type, fuse_qkv_params", test_configs_attention_type)
def test_export_multihead_attention(
1217
1218
    seed_default_rng,
    set_max_seq_len,
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
    use_fp8: bool,
    use_mask: bool,
    attn_mask_type: str,
    precision: torch.dtype,
    return_layernorm_output: bool,
    input_layernorm: bool,
    attention_type: str,
    fuse_qkv_params: bool
):
    # Skip FP8 tests on non-hopper devices
1229
1230
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249

    hidden_size = 256
    sequence_length = 128
    batch_size = 4
    num_attention_heads = 32
    kv_channels = 8
    attention_dropout = 0.1
    layernorm_epsilon = 1e-5
    init_method = output_layer_init_method = get_default_init_method()
    attention_args = (
        hidden_size,
        num_attention_heads,
        kv_channels,
        attention_dropout,
        layernorm_epsilon,
        init_method,
        output_layer_init_method,
    )

1250
    hidden_states_context = torch.randn(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
1251
1252
1253
1254
1255
1256
1257
    attention_mask = None
    if use_mask and attn_mask_type != "causal":
        # Generate a random mask with 50% probability for 0 or 1.
        probs = 0.5 * torch.ones(batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision)
        attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)

    encoder_output = None
1258

1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
    if attention_type == "cross":
        encoder_output = torch.randn(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")

    fp8_str = "_fp8" if use_fp8 else ""
    dtype_str = dtype2str(precision)
    attn_type_str = "_self-attention" if attention_type == "self" else "_cross-attention"
    fuse_qkv_str = "_fused-qkv" if fuse_qkv_params else ""
    attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
    input_ln_str = "_input-ln" if input_layernorm else ""
    fname = f"te.multihead_attention{fp8_str}{attn_mask_str}{attn_type_str}{input_ln_str}{fuse_qkv_str}{dtype_str}.onnx"

1270
    model = te.MultiheadAttention(
1271
1272
1273
1274
1275
1276
1277
        *attention_args,
        attn_mask_type=attn_mask_type,
        params_dtype=precision,
        return_layernorm_output=return_layernorm_output,
        input_layernorm=input_layernorm,
        attention_type=attention_type,
        fuse_qkv_params=fuse_qkv_params,
1278
        return_bias=True,
1279
    ).to(device='cuda')
1280
1281
1282
1283
1284
1285
1286

    inp_context = (hidden_states_context, attention_mask, encoder_output)
    input_names = ["hidden_states", "attention_mask", "encoder_output"]
    output_names=["attention_output", "attention_bias"]
    do_export(model, inp_context, fname, use_fp8, input_names=input_names, output_names=output_names,
        dynamic_axes={"hidden_states": {0: "seq", 1:"bs"},
                      "attention_output": {0: "seq", 1:"bs"}})
1287
1288
1289
1290
1291
    te_outputs = te_infer(model, inp_context, is_fp8=use_fp8)
    serialize_inputs_outputs(fname, inp_context, te_outputs, input_names=input_names, output_names=output_names)
    if precision in (torch.bfloat16, ):
        return

1292
    if not use_fp8:
1293
1294
        validate_result(fname, inp_context, model, atol=1e-3, input_names=input_names,
            output_names=output_names, te_outputs=te_outputs)
1295
    else:
1296
        validate_result(fname, inp_context, model, atol=1e-2, is_fp8=use_fp8,
1297
1298
            input_names=input_names, output_names=output_names, allow_cnt_errors=3,
            te_outputs=te_outputs)
1299

1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
    # In GPT generative phase (inference) the input sequence is smaller than the maximum
    # allowed sequence length and we want to test this condition.
    # Pretend that we're in generative phase when it makes sense (causal mask and self-attention).
    is_generative_phase = (attn_mask_type == "causal" and attention_type == "self")
    if is_generative_phase:
        seq_len_offset = 8
        hidden_states_generative = torch.randn(sequence_length-seq_len_offset, batch_size, hidden_size, dtype=precision, device="cuda")
        inp_generative = (hidden_states_generative, attention_mask, encoder_output)
        if not use_fp8:
            validate_result(fname, inp_generative, model, atol=1e-3, input_names=input_names, output_names=output_names)
        else:
            validate_result(fname, inp_generative, model, atol=1e-2, is_fp8=use_fp8,
                input_names=input_names, output_names=output_names, allow_cnt_errors=3)



1316
1317
1318
1319
1320
1321
@pytest.mark.parametrize("use_fp8", [False, True])
@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention)
@pytest.mark.parametrize("output_layernorm", [
    #True, # TO DO: handle this
    False
])
1322
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
1323
@pytest.mark.parametrize("fuse_qkv_params", [False, True])
1324
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
1325
@pytest.mark.parametrize("activation", supported_activations)
1326
def test_export_transformer_layer(
1327
1328
    seed_default_rng,
    set_max_seq_len,
1329
1330
1331
1332
1333
1334
    use_fp8: bool,
    use_mask: bool,
    attn_mask_type: str,
    output_layernorm: bool,
    precision: torch.dtype,
    fuse_qkv_params: bool,
1335
1336
    zero_centered_gamma: bool,
    activation: str,
1337
1338
):
    # Skip FP8 tests on non-hopper devices
1339
1340
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
1341
1342
1343
1344
1345
1346
1347
1348
1349

    # Layer configuration
    hidden_size = 64
    sequence_length = 128
    batch_size = 1
    ffn_hidden_size = 256
    num_attention_heads = 4

    input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
1350
    input_names = ["input", "attention_mask"]
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
    attention_mask = None
    if use_mask and attn_mask_type != "causal":
        # Generate a random mask with 50% probability for 0 or 1.
        probs = 0.5 * torch.ones(batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision)
        attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
    inp = (input_tensor, attention_mask)

    fp8_str = "_fp8" if use_fp8 else ""
    fuse_qkv_params_str = "_fused-qkv" if fuse_qkv_params else ""
    high_prec_str = dtype2str(precision)
    attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
1362
    fname = f"te.transformer_layer{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{high_prec_str}_{activation}.onnx"
1363
1364
1365
1366
1367
1368
1369
1370
1371

    model = te.TransformerLayer(
        hidden_size,
        ffn_hidden_size,
        num_attention_heads,
        self_attn_mask_type=attn_mask_type,
        output_layernorm=output_layernorm,
        params_dtype=precision,
        fuse_qkv_params=fuse_qkv_params,
1372
1373
        zero_centered_gamma=zero_centered_gamma,
        activation=activation).to(device='cuda')
1374
    do_export(model, inp, fname, use_fp8, input_names=input_names)
1375
1376
1377
1378
    te_outputs = te_infer(model, inp, is_fp8=use_fp8)
    serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
    if precision in (torch.bfloat16, ):
        return
1379
1380
    atol = 5e-1 if use_fp8 else (5e-1 if activation=="swiglu" else 1e-3)
    validate_result(fname, inp, model, atol=atol, is_fp8=use_fp8, input_names=input_names, te_outputs=te_outputs)
1381

Neta Zmora's avatar
Neta Zmora committed
1382
1383
1384
1385
1386
1387
1388

@pytest.mark.parametrize("use_fp8", [True])
@pytest.mark.parametrize("ln_scale_factor", [448*2])
@pytest.mark.parametrize("gemm_scale_factors", [(224, 224,),])
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
def test_export_gemm_layernorm(
1389
    seed_default_rng,
Neta Zmora's avatar
Neta Zmora committed
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
    use_fp8: bool,
    ln_scale_factor: float,
    gemm_scale_factors: Tuple[float, float],
    precision: torch.dtype,
    zero_centered_gamma: bool
):
    """This is a regression test for testing that all LN inputs have the same type.

    The test sets up GEMM with FP32 output which feeds into an LN that is configured
    with FP16 or BF16 weights and bias.
    """

    # Skip FP8 tests on non-hopper devices
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
    class TestFP8_GemmLayernorm(nn.Module):
        def __init__(self) -> None:
            super().__init__()
            normalized_shape = torch.Size(inp.shape[1:])
            self.weight = torch.randn(*normalized_shape, dtype=precision, device="cuda")
            self.bias = torch.zeros(*normalized_shape, dtype=precision, device="cuda")
            self.eps = 1e-6 # An arbitrary small value

            self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
            self.meta = create_meta(ln_scale_factor)
            self.fp8_type = tex.DType.kFloat8E4M3
            self.gemm = TestFP8_GEMM(
                precision, use_bias=False, gelu=False, scale_factors=gemm_scale_factors)

        def forward(self, inp, weight):
            x = self.gemm(inp, weight)
            x = texcpp.layernorm_fwd_fp8_inf(
                x,
                self.weight,
                self.bias,
                self.eps,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
                zero_centered_gamma)

            x = cast_from_fp8(
                x,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
                tex.DType.kFloat32 if precision == torch.float32 else tex.DType.kFloat16)
            return x

    out_features = 128
    hidden_size = 128
    in_features = 128
    class TestFP8_GEMM(nn.Module):
        def __init__(self, precision, use_bias, gelu, scale_factors):
            super().__init__()
            self.use_bias = use_bias
            self.gelu = gelu
            self.precision = precision

            self.fp8_tensor_inp = tex.FP8FwdTensors.GEMM1_INPUT
            self.fp8_tensor_weight = tex.FP8FwdTensors.GEMM1_WEIGHT
            nb_inp_scales, nb_weight_scales = 1, out_features
            act_scale_factor, weight_scale_factor = scale_factors
            self.meta_inp = create_meta(act_scale_factor, nb_inp_scales)
            self.meta_weight = create_meta(weight_scale_factor, nb_weight_scales)

            bias_size = nb_weight_scales
            self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
            self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")

            self.inp_type = tex.DType.kFloat8E4M3
            self.weights_type = tex.DType.kFloat8E4M3
            self.outp_type = precision

        def forward(self, inp, weight):
            inp_fp8 = cast_to_fp8(
                inp,
                self.meta_inp,
                self.fp8_tensor_inp,
                self.inp_type)

            weight_fp8 = cast_to_fp8(
                weight,
                self.meta_weight,
                self.fp8_tensor_weight,
                self.weights_type)

            ret = fp8_gemm(
                weight_fp8,
                self.meta_weight.scale_inv,
                self.fp8_tensor_weight,
                self.inp_type,
                inp_fp8,
                self.meta_inp.scale_inv,
                self.fp8_tensor_inp,
                self.weights_type,
                self.outp_type,
                get_workspace(),
                bias=self.bias,
                use_bias=self.use_bias,
                use_split_accumulator=False)
            return ret

    inp = torch.randn(hidden_size, in_features, dtype=precision, device="cuda")
    weight = torch.randn(out_features, in_features, dtype=precision, device="cuda")
    model = TestFP8_GemmLayernorm()
    high_prec_str = dtype2str(precision)
    fp8_str = f"_fp8" if use_fp8 else ""
    fname = f"te.gemm_layernorm{fp8_str}{high_prec_str}.onnx"
1499
1500
    input_names = ['input', 'weight']
    do_export(model, (inp, weight), fname, use_fp8=use_fp8, input_names=input_names)
1501
1502
    te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
    serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
Neta Zmora's avatar
Neta Zmora committed
1503
1504
    if precision not in (torch.bfloat16, ):
        validate_result(
1505
1506
            fname, (inp, weight), model, atol=5e-2, is_fp8=use_fp8, allow_cnt_errors=2,
            input_names=input_names, te_outputs=te_outputs)
Neta Zmora's avatar
Neta Zmora committed
1507
1508


1509
1510
@skip_FP8
@pytest.mark.parametrize("use_fp8", [True, False])
1511
@pytest.mark.parametrize("precision", [torch.float16, torch.bfloat16])
1512
1513
1514
1515
1516
1517
@pytest.mark.parametrize("zero_centered_gamma", [True])
def test_export_gpt_generation(
    seed_default_rng,
    set_max_seq_len,
    use_fp8: bool,
    precision: torch.dtype,
1518
    zero_centered_gamma: bool,
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
):
    """Test that the ONNX model can correctly handle inputs with different shapes and that
    the attention mask it adjusted on-the-fly to different sequence lengths.
    """

    # Skip FP8 tests on non-hopper devices
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)

    # Layer configuration
    hidden_size = 64
    sequence_length = 128
    batch_size = 1
    ffn_hidden_size = 256
    num_attention_heads = 4
    attention_mask = None
    use_mask = True
    attn_mask_type = "causal"
    fuse_qkv_params = True
    output_layernorm = False

    fp8_str = "_fp8" if use_fp8 else ""
    fuse_qkv_params_str = "_fused-qkv" if fuse_qkv_params else ""
    high_prec_str = dtype2str(precision)
    attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
    fname = f"te.transformer_layer_generative{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{high_prec_str}.onnx"

    model = te.TransformerLayer(
        hidden_size,
        ffn_hidden_size,
        num_attention_heads,
        self_attn_mask_type=attn_mask_type,
        output_layernorm=output_layernorm,
        params_dtype=precision,
        fuse_qkv_params=fuse_qkv_params,
        zero_centered_gamma=zero_centered_gamma).to(device='cuda')

    # "Context phase": use full input sequence length
    input_names = ["input"]
    output_names = ["output"]
    input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
    inp = (input_tensor,)
    do_export(model, inp, fname, use_fp8,
        input_names=input_names, output_names=output_names,
        dynamic_axes={"input": {0: "seq", 1:"bs"},
                      "output": {0: "seq", 1:"bs"}, })
1565
1566
1567
1568
1569
    te_outputs = te_infer(model, inp, is_fp8=use_fp8)
    serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names, output_names=output_names)
    if precision not in (torch.bfloat16, ):
        validate_result(fname, inp, model, atol=6e-3, is_fp8=use_fp8, input_names=input_names,
            te_outputs=te_outputs)
1570
1571
1572
1573
1574

    # "Generative phase": use a single input (sequence len=1). For FP8 we need to pad the sequence to mult of 8.
    sequence_length = 1 if not use_fp8 else 8
    input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
    inp = (input_tensor, attention_mask)
1575
1576
1577
1578
1579
    te_outputs = te_infer(model, inp, is_fp8=use_fp8)
    serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
    if precision not in (torch.bfloat16, ):
        validate_result(fname, inp, model, atol=6e-3, is_fp8=use_fp8, input_names=input_names,
            te_outputs=te_outputs)
1580
1581


1582
1583
1584
1585
1586
1587
@pytest.mark.parametrize("enabled", [True, False])
def test_export_ctx_manager(enabled):
    assert is_in_onnx_export_mode() == False
    with te.onnx_export(enabled):
        assert is_in_onnx_export_mode() == enabled
    assert is_in_onnx_export_mode() == False