test_onnx_export.py 54.5 KB
Newer Older
1
# Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
4
5
6
#
# See LICENSE for license information.

"""
This file contains tests for exporting TransformerEngine models to ONNX.
7
8
9
10
11
12
13
14

The purpose of these tests is validation that TE models are converted to their correct ONNX
representation. Toward this end, each test captures the output of a TE module forward pass,
converts the TE module to ONNX, and uses ONNX Runtime (ORT) to execute the ONNX graph and
validate the output against TE's output.

Until FP8 is introduced to the ONNX standard, FP8 QuantizeLinear/DequantizeLinear is implemented
using custom ORT operations.
15
16
17
18
19
20

To run many repetitive tests use pytest-loop:
    $ python3 -m pip install pytest-loop
    $ pytest --loop 1000 tests/pytorch/test_onnx_export.py::test_export_layernorm

For reproducability use: torch.manual_seed(0)
21
22
23
"""

import os
24
import tempfile
25
26
27
28
29
30
import pytest
import warnings
import numpy as np
import onnxruntime as ort
import torch
from torch import nn as nn
31
from typing import Optional, Union, Tuple, List
32
33
import transformer_engine.pytorch as te
from transformer_engine.common import recipe
34
import transformer_engine_torch as tex
35
36
37
38
39
40
41
from transformer_engine.pytorch.cpp_extensions import (
    gemm,
    fp8_gemm,
    gelu,
    cast_to_fp8,
    cast_from_fp8,
)
42
from transformer_engine.pytorch.module.base import get_workspace
43
44
45
import transformer_engine.pytorch.cpp_extensions as texcpp
import transformer_engine.pytorch.softmax as softmax_defs
from transformer_engine.pytorch.utils import get_default_init_method
46
from transformer_engine.pytorch.export import is_in_onnx_export_mode
47
from transformer_engine.pytorch.fp8 import FP8GlobalStateManager
48

49
# Global test configuration knobs.
50

51
# Enable this to serialize test inputs and outputs to file (as a Polygraphy RunResults instance).
52
53
SAVE_TEST_IO = bool(int(os.getenv("NVTE_ONNX_EXPORT_SAVE_TEST_IO", "0")))

54
55
56
57
58
if SAVE_TEST_IO:
    from polygraphy.json import save_json
    from polygraphy.comparator import RunResults

# The directory where generated ONNX test models are stored.
59
60
61
62
NVTE_TEST_ARTIFACTS_DIR = os.environ.get("NVTE_TEST_ARTIFACTS_DIR")
NVTE_TEST_ARTIFACTS_DIR = NVTE_TEST_ARTIFACTS_DIR or os.path.join(
    tempfile.gettempdir(), "./gen_onnx_models"
)
Neta Zmora's avatar
Neta Zmora committed
63

64
65
66

# The directory where this file is stored.
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
67
68
69
70

# ScaledUpperTriangMaskedSoftmax is exported via ONNX::Trilu which was introduced in opset 14.
TRILU_OPSET = 14
# Opset used in the ONNX files generated by the tests.
Neta Zmora's avatar
Neta Zmora committed
71
OPSET = 17
72
73
assert OPSET >= TRILU_OPSET

74
75
76
# Shared library implementing custom FP8 Q/DQ operators for ONNX Runtime (ORT).
ORT_CUSTOM_OPS_LIB = os.path.join(TESTS_DIR, "./libcustom_ort_fp8_qdq_ops.so")

77
fp8_available, reason_for_no_fp8 = FP8GlobalStateManager.is_fp8_available()
78
skip_FP8 = pytest.mark.skipif(not fp8_available, reason=reason_for_no_fp8)
79

80
81
supported_activations = ["gelu", "relu", "reglu", "geglu", "swiglu"]

82
83
all_normalizations = ["LayerNorm", "RMSNorm"]

Neta Zmora's avatar
Neta Zmora committed
84

85
86
87
88
89
90
91
92
93
94
95
96
@pytest.fixture()
def seed_default_rng():
    """Reseed the PRNG for test reproducibility"""
    torch.random.seed()


@pytest.fixture()
def set_max_seq_len(max_seq_len=128):
    """Set the maximum sequence length that can be used for attention masking"""
    os.environ["NVTE_ONNX_KVCACHE_MAX_SEQ_LEN"] = f"{max_seq_len}"


97
98
99
100
101
102
@pytest.fixture(autouse=True)
def reset_global_fp8_state():
    yield
    FP8GlobalStateManager.reset()


103
def create_fp8_recipe():
104
    return recipe.DelayedScaling(margin=0, fp8_format=recipe.Format.E4M3)
105
106
107
108
109
110


def do_export(
    model: torch.nn.Module,
    inp: torch.Tensor,
    fname: str,
111
112
113
114
115
    use_fp8: bool = True,
    opset: int = OPSET,
    input_names: List[str] = None,
    output_names: List[str] = None,
    dynamic_axes: List[str] = None,
116
117
118
):
    """Export to ONNX"""
    fp8_recipe = create_fp8_recipe()
119
120
    input_names = input_names or ["input"]
    output_names = output_names or ["output"]
121

122
123
124
125
    with torch.inference_mode(), te.fp8_autocast(
        enabled=use_fp8, fp8_recipe=fp8_recipe
    ), warnings.catch_warnings():
        warnings.filterwarnings(action="ignore", category=torch.jit.TracerWarning, module=r".*")
126
127

        model.cuda().eval()
Neta Zmora's avatar
Neta Zmora committed
128
129
        os.makedirs(NVTE_TEST_ARTIFACTS_DIR, exist_ok=True)
        fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
130

131
        inps = inp if isinstance(inp, list) or isinstance(inp, tuple) else (inp,)
132
133
134
135
        assert len(inps) == len(input_names)
        inds_to_del = [i for i in range(len(inps)) if inps[i] is None]
        input_names = [input_names[i] for i in range(len(inps)) if i not in inds_to_del]

136
        with te.onnx_export(True):
137
138
139
140
141
            torch.onnx.export(
                model,
                inps,
                fname,
                verbose=True,
142
                dynamic_axes=dynamic_axes,
143
144
145
                opset_version=opset,
                input_names=input_names,
                output_names=output_names,
146
                do_constant_folding=True,
147
148
                operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH,
            )
149
150
151


def to_numpy(tensor):
152
153
154
155
156
    if isinstance(tensor, torch.Tensor):
        if tensor.dtype == torch.bfloat16:
            tensor = tensor.type(torch.float32)
        tensor = tensor.detach().cpu().numpy()
    return tensor
157
158


159
160
161
162
def set_layer_scale(module: torch.nn.Module, scale: float, num_gemms: int):
    """Initialize the FP8 quantization scales in module"""
    NB_SCALES_PER_GEMM = 3  # One scale per: input, weights, and output GEMM tensors.
    nb_total_scales = num_gemms * NB_SCALES_PER_GEMM
163
    module.init_fp8_metadata(num_gemms)
164
165
166
167
168
169
    module.fp8_meta["scaling_fwd"].scale = (
        torch.ones(nb_total_scales, dtype=torch.float32, device="cuda") / scale
    )
    module.fp8_meta["scaling_fwd"].scale_inv = (
        torch.ones(nb_total_scales, dtype=torch.float32, device="cuda") * scale
    )
170
171
172


def te_infer(model: torch.nn.Module, inps: Union[Tuple[torch.tensor], torch.tensor], is_fp8: bool):
173
    """Transformer Engine forward propagation."""
174
    fp8_recipe = create_fp8_recipe()
175
176
177
    with torch.inference_mode(), te.fp8_autocast(
        enabled=is_fp8, fp8_recipe=fp8_recipe
    ), warnings.catch_warnings():
178
179
180
        te_outputs = model(*inps if isinstance(inps, tuple) else (inps,))
        if not isinstance(te_outputs, tuple):
            te_outputs = (te_outputs,)
181
        return te_outputs
182
183


184
185
186
187
def compare_outputs(
    onnx_outputs, te_outputs, atol, rtol, max_errors_printed, allow_cnt_errors, fname
):
    """Compare ORT and TE outputs."""
188
189
190
191
    assert len(onnx_outputs) == len(te_outputs)
    # Compare ORT and PyTorch outputs.
    for onnx_output, te_output in zip(onnx_outputs, te_outputs):
        # np.isclose: abs(a - b) <= (atol + rtol * abs(b))
192
193
        te_output = to_numpy(te_output)
        onnx_output = to_numpy(onnx_output)
194
195
196
197
198
199
200
201
202
203
204
205
206
207
        ac = ~np.isclose(onnx_output, te_output, atol=atol, rtol=rtol)
        mismatches = ac.nonzero()
        mismatched_ids = [loc for loc in zip(*mismatches)]
        if mismatched_ids:
            # Log some information in case of error.
            print("*" * 100)
            nb_errors = len(mismatched_ids)
            nb_vals = min(nb_errors, max_errors_printed)
            print(f"Detected {nb_errors} diverging values (output shape={onnx_output.shape})")
            print(f"Showing first {nb_vals} errors (ONNX -- TE):")
            abs_err = np.abs(onnx_output - te_output)
            errors = abs_err[mismatches]
            for loc in mismatched_ids[:nb_vals]:
                ref = te_output[loc]
208
209
210
211
                print(
                    f"{onnx_output[loc]} -- {te_output[loc]} err={abs_err[loc]} >"
                    f" {atol + rtol * abs(ref)}"
                )
212
213
214
215
            print(f"Max error: {np.max(errors)}")
            if nb_errors > allow_cnt_errors:
                raise ValueError(f"Output validation of {fname} failed with {nb_errors} errors")

216

217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
def serialize_inputs_outputs(
    fname: str,
    inputs: Union[Tuple[torch.Tensor], torch.Tensor],
    te_outputs: List[torch.Tensor],
    input_names: Optional[List[str]] = None,
    output_names: Optional[List[str]] = None,
):
    if not SAVE_TEST_IO:
        return

    fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)

    input_names = input_names or ["input"]
    output_names = output_names or ["output"]
    inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else (inputs,)
    named_inputs = zip(input_names, inputs)
    input_data = [{k: v.cpu() for k, v in named_inputs if v is not None}]
234
    json_fname = fname[: -len(".onnx")] + "_inputs.json"
235
236
    save_json(input_data, json_fname, description="custom input data")

237
    json_fname = fname[: -len(".onnx")] + "_output.json"
238
    named_outputs = zip(output_names, te_outputs)
239
    output_data = {k: v.detach().cpu() for k, v in named_outputs if v is not None}
240
241
242
243
    custom_outputs = RunResults()
    custom_outputs.add([output_data], runner_name="custom_runner")
    custom_outputs.save(json_fname)

244

245
246
247
248
def validate_result(
    fname: str,
    inps: Union[Tuple[torch.Tensor], torch.Tensor],
    model: torch.nn.Module,
249
250
251
252
253
254
255
256
    atol: float = 1.0e-8,  # np.isclose default atol
    rtol: float = 1.0e-5,  # np.isclose default rtol
    max_errors_printed: int = 10,
    is_fp8: bool = False,
    allow_cnt_errors: int = 0,
    input_names: List[str] = None,
    output_names: List[str] = None,
    te_outputs: List[torch.Tensor] = None,
257
):
258
259
260
261
262
263
264
265
266
267
268
    """Compare the outputs of a Transformer Engine (TE) module vs the outputs of its ONNX
    representation using ONNX Runtime (ORT) and ensure they are close.

    The purpose of the output comparison is to validate that TE models are converted to
    their correct ONNX representation by testing that TE and ORT outputs match within some
    small threshold (allowing for finite precision errors).

    Argument `allow_cnt_errors` reduces test failure noise due to spurious errors by ignoring,
    a very small number (0-3) of outliers. This is fine to do because these outliers are due to
    small kernel implementation differences between TE and ORT and do not imply an incorrect ONNX
    representation (the tests assume both ORT or TE kernels are correct).
269
270

    Argument `te_outputs` can be used to provide pre-computed TE outputs.
271
    """
272
273
274
275
276
277
278
279
280
281

    def create_ort_session(fname: str, is_fp8: bool):
        def load_custom_ops(session_opts: ort.SessionOptions):
            """For FP8 validation with ORT we need to load our custom FP8 Q/DQ extension."""
            if not os.path.exists(ORT_CUSTOM_OPS_LIB):
                raise FileNotFoundError(f"Unable to find {ORT_CUSTOM_OPS_LIB}")
            session_opts.register_custom_ops_library(ORT_CUSTOM_OPS_LIB)
            print("registered custom FP8 Q/DQ ops!")

        """Create an ONNX Runtime session for validation."""
282
        kwargs = {"providers": ["CUDAExecutionProvider", "CPUExecutionProvider"]}
283
284
285
        if is_fp8:
            sess_options = ort.SessionOptions()
            load_custom_ops(sess_options)
286
287
288
            kwargs["sess_options"] = sess_options

        s = ort.InferenceSession(fname, **kwargs)
289
290
        return s

291
292
293
294
295
    def create_ort_input_dict(session, inputs):
        inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else (inputs,)
        input_names = [x.name for x in session.get_inputs()]
        inps = [to_numpy(x) for x in inputs if x is not None]
        inp_dict = dict(zip(input_names, inps))
296
297
        return inp_dict

298
299
    input_names = input_names or ["input"]
    output_names = output_names or ["output"]
300

301
    # Run ORT session and TE model.
Neta Zmora's avatar
Neta Zmora committed
302
    fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
303
304
    if not te_outputs:
        te_outputs = te_infer(model, inps, is_fp8)
Neta Zmora's avatar
Neta Zmora committed
305
306
307
    ort_s = create_ort_session(fname, is_fp8)
    input_feed = create_ort_input_dict(ort_s, inps)
    onnx_outputs = ort_s.run(None, input_feed=input_feed)
308
309
310
    compare_outputs(
        onnx_outputs, te_outputs, atol, rtol, max_errors_printed, allow_cnt_errors, fname
    )
311
312


313
def create_meta(scale_factor: float, size: int = 1):
314
315
316
317
318
319
320
    meta = tex.FP8TensorMeta()
    meta.amax_history = torch.zeros(1, size, dtype=torch.float32, device="cuda")
    meta.scale_inv = torch.ones(size, dtype=torch.float32, device="cuda") / scale_factor
    meta.scale = torch.ones(size, dtype=torch.float32, device="cuda") * scale_factor
    return meta


321
322
323
324
def dtype2str(dtype: torch.dtype, fake_bf16_io=False):
    if fake_bf16_io:
        assert dtype == torch.bfloat16
        return "_fake_bf16"
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
    return {
        torch.float32: "_fp32",
        torch.float16: "_fp16",
        torch.bfloat16: "_bf16",
    }[dtype]


def as_te_type(dtype: torch.dtype):
    return {
        torch.float32: tex.DType.kFloat32,
        torch.float16: tex.DType.kFloat16,
        torch.bfloat16: tex.DType.kBFloat16,
    }[dtype]


def get_attn_mask_str(use_mask, attn_mask_type):
    # See FusedScaleMaskSoftmax::forward_fused_softmax for logic behind names.
    if attn_mask_type is None:
        return "_mask" if use_mask else "_no-mask"
344
    attn_mask_str = "_arbitrary-no-mask"
345
    attn_mask_str = "_causal-mask" if attn_mask_type == "causal" else attn_mask_str
346
347
348
    attn_mask_str = (
        "_arbitrary-mask" if use_mask and attn_mask_type == "arbitrary" else attn_mask_str
    )
349
350
351
    return attn_mask_str


352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
class FP8GemmModule(nn.Module):
    def __init__(self, precision, use_bias, gelu, scale_factors, hidden_size, out_features):
        super().__init__()
        self.use_bias = use_bias
        self.gelu = gelu
        self.precision = precision

        self.fp8_tensor_inp = tex.FP8FwdTensors.GEMM1_INPUT
        self.fp8_tensor_weight = tex.FP8FwdTensors.GEMM1_WEIGHT
        nb_inp_scales, nb_weight_scales = 1, out_features
        act_scale_factor, weight_scale_factor = scale_factors
        self.meta_inp = create_meta(act_scale_factor, nb_inp_scales)
        self.meta_weight = create_meta(weight_scale_factor, nb_weight_scales)

        bias_size = nb_weight_scales
        self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
        self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")

        self.inp_type = tex.DType.kFloat8E4M3
        self.weights_type = tex.DType.kFloat8E4M3
        self.outp_type = precision

    def forward(self, inp, weight):
375
        inp_fp8 = cast_to_fp8(inp, self.meta_inp, self.fp8_tensor_inp, self.inp_type)
376
377

        weight_fp8 = cast_to_fp8(
378
379
            weight, self.meta_weight, self.fp8_tensor_weight, self.weights_type
        )
380
381
382
383
384
385
386
387
388
389
390
391
392
393

        ret, _ = fp8_gemm(
            weight_fp8,
            self.meta_weight.scale_inv,
            self.fp8_tensor_weight,
            self.inp_type,
            inp_fp8,
            self.meta_inp.scale_inv,
            self.fp8_tensor_inp,
            self.weights_type,
            self.outp_type,
            get_workspace(),
            bias=self.bias,
            use_bias=self.use_bias,
394
395
            use_split_accumulator=False,
        )
396
397
        return ret

398

Neta Zmora's avatar
Neta Zmora committed
399
400
401
402
403
"""
Tests cases begin here.
"""


404
@skip_FP8
Neta Zmora's avatar
Neta Zmora committed
405
406
@pytest.mark.parametrize("scale_factor", [1, 224])
@pytest.mark.parametrize(
407
408
409
410
411
412
413
414
415
416
417
    "precision,             atol",
    [
        [torch.float32, 1e-7],
        [torch.float16, 1e-7],
        [torch.bfloat16, 5e-3],
        ["fake-torch.bfloat16", 5e-3],
    ],
)
def test_export_cast_ops(
    seed_default_rng, scale_factor: float, atol: float, precision: torch.dtype
):
418
419
420
421
    fake_bf16_io = precision == "fake-torch.bfloat16"
    # reset precision to torch.bfloat16 after capturing fake BF16 mode
    precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision

422
    class TestFP8_QDQ(nn.Module):
Neta Zmora's avatar
Neta Zmora committed
423
        def __init__(self, fake_bf16_io):
424
425
426
427
428
            super().__init__()
            self.fp8_tensor = 0
            self.meta = create_meta(scale_factor)
            self.highprec_type = as_te_type(precision)
            self.fp8_type = tex.DType.kFloat8E4M3
Neta Zmora's avatar
Neta Zmora committed
429
            self.fake_bf16_io = fake_bf16_io
430
431

        def forward(self, inp):
432
            ret = cast_to_fp8(inp, self.meta, self.fp8_tensor, self.fp8_type)
433

434
            ret = cast_from_fp8(ret, self.meta, self.fp8_tensor, self.fp8_type, self.highprec_type)
Neta Zmora's avatar
Neta Zmora committed
435
436
            if self.fake_bf16_io:
                ret = ret.type(torch.float32)
437
438
439
440
441
            return ret

    # Set dimensions (these are arbitrary).
    in_features = 64
    hidden_size = 256
442
443
444
    inp = torch.randn(
        hidden_size, in_features, device="cuda", dtype=torch.float if fake_bf16_io else precision
    )
445
    high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
446
    fname = f"te.cast_fp8_{scale_factor}{high_prec_str}.onnx"
Neta Zmora's avatar
Neta Zmora committed
447
    model = TestFP8_QDQ(fake_bf16_io)
448

449
    do_export(model, inp, fname)
450
451
452
453
    te_outputs = te_infer(model, inp, is_fp8=True)
    serialize_inputs_outputs(fname, inp, te_outputs)
    if fake_bf16_io or precision != torch.bfloat16:
        validate_result(fname, inp, model, atol=atol, is_fp8=True, te_outputs=te_outputs)
454

455

456
457
458
@skip_FP8
@pytest.mark.parametrize("scale_factor", [448])
@pytest.mark.parametrize(
459
460
461
462
463
464
465
466
    "precision,             atol",
    [
        [torch.float32, 1e-5],
        [torch.float16, 1e-5],
        [torch.bfloat16, 5e-3],
        ["fake-torch.bfloat16", 5e-3],
    ],
)
467
def test_export_gelu_fp8(scale_factor: float, precision: torch.dtype, atol: float):
468
469
470
471
    fake_bf16_io = precision == "fake-torch.bfloat16"
    # reset precision to torch.bfloat16 after capturing fake BF16 mode
    precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision

472
    class TestFP8_Gelu(nn.Module):
Neta Zmora's avatar
Neta Zmora committed
473
        def __init__(self, fake_bf16_io):
474
475
476
477
478
            super().__init__()
            self.fp8_tensor = 0
            self.meta = create_meta(scale_factor)
            self.highprec_type = as_te_type(precision)
            self.fp8_type = tex.DType.kFloat8E4M3
Neta Zmora's avatar
Neta Zmora committed
479
            self.fake_bf16_io = fake_bf16_io
480
481

        def forward(self, inp):
482
483
            ret = gelu(inp, self.meta, self.fp8_tensor, self.fp8_type)
            ret = cast_from_fp8(ret, self.meta, self.fp8_tensor, self.fp8_type, self.highprec_type)
Neta Zmora's avatar
Neta Zmora committed
484
485
            if self.fake_bf16_io:
                ret = ret.type(torch.float32)
486
487
488
489
490
            return ret

    # Set dimensions (these are arbitrary).
    in_features = 64
    hidden_size = 256
491
492
493
    inp = torch.randn(
        hidden_size, in_features, device="cuda", dtype=torch.float if fake_bf16_io else precision
    )
494
    high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
495
    fname = f"te.gelu_fp8_{scale_factor}{high_prec_str}.onnx"
Neta Zmora's avatar
Neta Zmora committed
496
    model = TestFP8_Gelu(fake_bf16_io)
497
    do_export(model, inp, fname)
498
499
500
    te_outputs = te_infer(model, inp, is_fp8=True)
    serialize_inputs_outputs(fname, inp, te_outputs)
    if fake_bf16_io or precision != torch.bfloat16:
501
502
503
504
505
506
507
508
509
510
        validate_result(
            fname,
            inp,
            model,
            rtol=0,
            atol=atol,
            is_fp8=True,
            allow_cnt_errors=2,
            te_outputs=te_outputs,
        )
511
512
513


@pytest.mark.parametrize(
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
    "scale_factors",
    [
        (
            224,
            224,
        ),
    ],
)
@pytest.mark.parametrize(
    "precision,             use_fp8, use_bias, use_gelu",
    [
        (torch.float32, False, False, False),
        (torch.float16, False, False, False),
        (torch.bfloat16, False, False, False),
        (torch.float32, False, True, False),
        (torch.float16, False, True, False),
        (torch.bfloat16, False, True, False),
        (torch.float32, False, True, True),
        (torch.float16, False, True, True),
        (torch.bfloat16, False, True, True),
        # For FP8 GEMM GeLU is not used.
        (torch.float32, True, False, False),
        (torch.float16, True, False, False),
        (torch.bfloat16, True, False, False),
        # When enabling bias we must use float16 or bfloat16 (because of kernel limitations)
        (torch.float16, True, True, False),
        (torch.bfloat16, True, True, False),
    ],
)
543
def test_export_gemm(
544
    seed_default_rng,
545
    precision,  # Precision of inputs, weights, output and bias
546
547
548
    use_fp8,
    use_bias,
    use_gelu,
549
    scale_factors,
550
551
):
    # Skip FP8 tests on non-hopper devices
552
553
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580

    class Test_GEMM(nn.Module):
        def __init__(self, precision, use_bias=False, gelu=False):
            super().__init__()
            self.use_bias = use_bias
            self.gelu = gelu
            self.precision = precision
            bias_size = out_features
            self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
            self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")

        def forward(self, inp, weight):
            outp_type = self.precision

            # note: due to logic in lines 104:116 and L129 in cpp_extensions.py
            # it appears either bias OR gelu can be activated, not both
            ret, _, _ = gemm(
                weight,
                inp,
                outp_type,
                get_workspace(),
                # test bias
                bias=self.bias,
                use_bias=self.use_bias,
                # test gelu
                gelu=self.gelu,
                gelu_input=self.gelu_input,
581
                grad=False,  # only True for backward pass
Neta Zmora's avatar
Neta Zmora committed
582
                accumulate=False,
583
584
585
586
            )
            return ret

    # If gelu is applied then bias must be added, as defined by TE kernel.
587
588
    if use_gelu:
        assert use_bias
589
590
591
592
    # Set dimensions (these are arbitrary).
    out_features = 128
    hidden_size = 256
    in_features = 64
593
594
    inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision)
    weight = torch.randn(out_features, in_features, device="cuda", dtype=precision)
595
596
597
598
599
    fp8_str = "_fp8" if use_fp8 else ""
    bias_str = "_bias" if use_bias else ""
    gelu_str = "_gelu" if use_gelu else ""
    high_prec_str = dtype2str(precision)
    fname = f"te.gemm{fp8_str}{bias_str}{gelu_str}{high_prec_str}.onnx"
600
    input_names = ["input", "weight"]
601
    if use_fp8:
602
603
604
        model = FP8GemmModule(
            precision, use_bias, use_gelu, scale_factors, hidden_size, out_features
        )
605
        do_export(model, (inp, weight), fname, use_fp8, input_names=input_names)
606
607
608
        te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
        serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
        if precision != torch.bfloat16:
609
610
611
612
613
614
615
616
617
618
            validate_result(
                fname,
                (inp, weight),
                model,
                rtol=1e-2,
                atol=2e-2,
                is_fp8=True,
                input_names=input_names,
                te_outputs=te_outputs,
            )
619
620
    else:
        model = Test_GEMM(precision, use_bias, use_gelu)
621
        do_export(model, (inp, weight), fname, use_fp8, input_names=input_names)
622
623
624
        te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
        serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
        if precision != torch.bfloat16:
625
626
627
628
629
630
631
632
633
            validate_result(
                fname,
                (inp, weight),
                model,
                rtol=1e-2,
                atol=2e-2,
                input_names=input_names,
                te_outputs=te_outputs,
            )
634
635
636


@pytest.mark.parametrize("scale_factor", [448, 112])
637
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
638
@pytest.mark.parametrize(
639
640
641
642
643
644
645
646
647
648
649
650
    "use_fp8, precision,             atol",
    [
        [False, torch.float32, 1e-7],
        [False, torch.float16, 1e-7],
        [False, torch.bfloat16, 1e-7],
        [False, "fake-torch.bfloat16", 1e-7],
        [True, torch.float32, 1e-7],
        [True, torch.float16, 1e-7],
        [True, torch.bfloat16, 1e-2],
        [True, "fake-torch.bfloat16", 1e-2],
    ],
)
651
def test_export_layernorm(
652
    seed_default_rng,
653
654
    use_fp8: bool,
    scale_factor: float,
655
    precision: torch.dtype,
656
    zero_centered_gamma: bool,
657
    atol: float,
658
):
659
660
661
662
    fake_bf16_io = precision == "fake-torch.bfloat16"
    # reset precision to torch.bfloat16 after capturing fake BF16 mode
    precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision

663
    # Skip FP8 tests on non-hopper devices
664
665
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
666
667
668
669
670
671
672

    # Set dimensions (these are arbitrary).
    inp_shape = [64, 32]

    class Test_Layernorm(nn.Module):
        def __init__(self) -> None:
            super().__init__()
673
            eps = 1e-6  # An arbitrary small value
674
            dtype = torch.float if fake_bf16_io else precision
675
676
677
678
679
680
681
            self.ln = (
                te.LayerNorm(
                    inp_shape[1], eps, params_dtype=dtype, zero_centered_gamma=zero_centered_gamma
                )
                .eval()
                .cuda()
            )
682
683

        def forward(self, inp):
684
            ret = self.ln(inp)
685
686
687
688
689
690
            return ret

    class TestFP8_Layernorm(nn.Module):
        def __init__(self) -> None:
            super().__init__()
            normalized_shape = torch.Size(inp.shape[1:])
691
692
693
694
695
696
697
            self.weight = torch.randn(
                *normalized_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision
            )
            self.bias = torch.zeros(
                *normalized_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision
            )
            self.eps = 1e-6  # An arbitrary small value
698
699
700
701
702
703
704
705
706
707
708
709
710

            self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
            self.meta = create_meta(scale_factor)
            self.fp8_type = tex.DType.kFloat8E4M3

        def forward(self, inp):
            ret = texcpp.layernorm_fwd_fp8_inf(
                inp,
                self.weight,
                self.bias,
                self.eps,
                self.meta,
                self.fp8_tensor,
711
                self.fp8_type,
712
                0,
713
714
                zero_centered_gamma,
            )
715
716

            ret = cast_from_fp8(
717
718
                ret, self.meta, self.fp8_tensor, self.fp8_type, as_te_type(precision)
            )
719
720
            if fake_bf16_io:
                ret = ret.type(torch.float32)
721
722
            return ret

723
    inp = torch.randn(*inp_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision)
724
    model = TestFP8_Layernorm() if use_fp8 else Test_Layernorm()
725
    high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
726
727
728
    fp8_str = f"_fp8-{scale_factor}" if use_fp8 else ""
    fname = f"te.layernorm{fp8_str}{high_prec_str}.onnx"
    do_export(model, inp, fname, use_fp8=use_fp8)
729
730
731
    te_outputs = te_infer(model, inp, is_fp8=use_fp8)
    serialize_inputs_outputs(fname, inp, te_outputs)
    if fake_bf16_io or precision != torch.bfloat16:
732
        validate_result(
733
734
735
            fname, inp, model, atol=atol, is_fp8=use_fp8, allow_cnt_errors=3, te_outputs=te_outputs
        )

736

737
@pytest.mark.parametrize("scale_factor", [448, 112])
738
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
739
@pytest.mark.parametrize(
740
741
742
743
744
745
746
747
748
749
750
751
    "use_fp8, precision,             atol",
    [
        [False, torch.float32, 1e-7],
        [False, torch.float16, 1e-7],
        [False, torch.bfloat16, 1e-7],
        [False, "fake-torch.bfloat16", 1e-7],
        [True, torch.float32, 1e-7],
        [True, torch.float16, 1e-7],
        [True, torch.bfloat16, 1e-2],
        [True, "fake-torch.bfloat16", 1e-2],
    ],
)
752
753
754
755
756
def test_export_rmsnorm(
    seed_default_rng,
    use_fp8: bool,
    scale_factor: float,
    precision: torch.dtype,
757
    zero_centered_gamma: bool,
758
    atol: float,
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
):
    fake_bf16_io = precision == "fake-torch.bfloat16"
    # reset precision to torch.bfloat16 after capturing fake BF16 mode
    precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision

    # Skip FP8 tests on non-hopper devices
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)

    # Set dimensions (these are arbitrary).
    inp_shape = [64, 32]

    class Test_RMSnorm(nn.Module):
        def __init__(self) -> None:
            super().__init__()
774
            eps = 1e-6  # An arbitrary small value
775
            dtype = torch.float if fake_bf16_io else precision
776
777
778
779
780
781
782
            self.ln = (
                te.RMSNorm(
                    inp_shape[1], eps, params_dtype=dtype, zero_centered_gamma=zero_centered_gamma
                )
                .eval()
                .cuda()
            )
783
784
785
786
787
788
789
790
791

        def forward(self, inp):
            ret = self.ln(inp)
            return ret

    class TestFP8_RMSnorm(nn.Module):
        def __init__(self) -> None:
            super().__init__()
            normalized_shape = torch.Size(inp.shape[1:])
792
793
794
795
            self.weight = torch.randn(
                *normalized_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision
            )
            self.eps = 1e-6  # An arbitrary small value
796
797
798
799
800
801
802
803
804
805
806
807
808

            self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
            self.meta = create_meta(scale_factor)
            self.fp8_type = tex.DType.kFloat8E4M3

        def forward(self, inp):
            ret = texcpp.rmsnorm_fwd_fp8_inf(
                inp,
                self.weight,
                self.eps,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
809
                0,
810
811
                zero_centered_gamma,
            )
812
813

            ret = cast_from_fp8(
814
815
                ret, self.meta, self.fp8_tensor, self.fp8_type, as_te_type(precision)
            )
816
817
818
819
820
821
822
823
824
825
826
827
828
829
            if fake_bf16_io:
                ret = ret.type(torch.float32)
            return ret

    inp = torch.randn(*inp_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision)
    model = TestFP8_RMSnorm() if use_fp8 else Test_RMSnorm()
    high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
    fp8_str = f"_fp8-{scale_factor}" if use_fp8 else ""
    fname = f"te.layernorm{fp8_str}{high_prec_str}.onnx"
    do_export(model, inp, fname, use_fp8=use_fp8)
    te_outputs = te_infer(model, inp, is_fp8=use_fp8)
    serialize_inputs_outputs(fname, inp, te_outputs)
    if fake_bf16_io or precision != torch.bfloat16:
        validate_result(
830
831
            fname, inp, model, atol=atol, is_fp8=use_fp8, allow_cnt_errors=3, te_outputs=te_outputs
        )
832

833

834
835
836
837
838
@pytest.mark.parametrize("scale_factor", [1])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize(
839
840
841
842
843
844
845
846
847
848
849
850
    "precision,      use_bias",
    [
        (torch.float32, False),
        (torch.float32, True),
        (torch.float16, False),
        (torch.float16, True),
        # Todo: cannot configure BF16 when bias is disabled (ORT issue?)
        (torch.bfloat16, False),
        # Todo: cannot configure BF16 when bias is enabled (ORT issue?)
        (torch.bfloat16, True),
    ],
)
851
def test_export_linear(
852
    seed_default_rng,
853
854
855
856
    scale_factor: float,
    use_fp8: bool,
    use_bias: bool,
    return_bias: bool,
857
    precision: torch.dtype,
858
859
):
    # Skip FP8 tests on non-hopper devices
860
861
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
862
863
864
865
866
867
868

    # Set dimensions (these are arbitrary).
    in_features = 64
    out_features = 256
    hidden_size = 256

    class Test_Linear(nn.Module):
869
        def __init__(self, in_features, out_features, use_bias, return_bias, precision):
870
871
872
873
874
875
            super().__init__()
            self.linear = te.Linear(
                in_features,
                out_features,
                bias=use_bias,
                return_bias=return_bias,
876
                params_dtype=precision,
877
878
879
880
881
882
883
884
885
886
887
888
            )

        def forward(self, inp):
            ret = self.linear(inp)
            return ret

    inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision)
    fp8_str = "_fp8" if use_fp8 else ""
    bias_str = "_bias" if use_bias else ""
    high_prec_str = dtype2str(precision)
    fname = f"te.linear{fp8_str}{bias_str}{high_prec_str}.onnx"
    with te.fp8_autocast(enabled=use_fp8):
889
890
891
        model = Test_Linear(in_features, out_features, use_bias, return_bias, precision).to(
            device="cuda"
        )
892
        if use_fp8:
893
            set_layer_scale(model.linear, scale_factor, num_gemms=1)
894
        do_export(model, inp, fname, use_fp8)
895
896
        te_outputs = te_infer(model, inp, is_fp8=use_fp8)
        serialize_inputs_outputs(fname, inp, te_outputs)
897

898
        if precision in (torch.bfloat16,):
899
900
            return
        if not use_fp8:
901
            validate_result(fname, inp, model, atol=1e-3, te_outputs=te_outputs)
902
        else:
903
            validate_result(fname, inp, model, atol=1e-3, is_fp8=use_fp8, te_outputs=te_outputs)
904
905
906
907
908
909
910
911


@pytest.mark.parametrize("scale_factor", [112])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize(
912
913
914
915
916
917
918
919
920
921
    "precision,      use_bias",
    [
        (torch.float32, False),
        (torch.float32, True),
        (torch.float16, True),
        (torch.float16, False),
        (torch.bfloat16, True),
        (torch.bfloat16, False),
    ],
)
922
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
923
@pytest.mark.parametrize("normalization", all_normalizations)
924
def test_export_layernorm_linear(
925
    seed_default_rng,
926
927
928
929
930
    scale_factor: float,
    use_fp8: bool,
    use_bias: bool,
    return_bias: bool,
    return_layernorm_output: bool,
931
    precision: torch.dtype,
932
933
    zero_centered_gamma: bool,
    normalization: str,
934
935
):
    # Skip FP8 tests on non-hopper devices
936
937
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
938
939
940
941
942
943
944
945
946
947
948

    # Set dimensions (these are arbitrary).
    in_features = 64
    out_features = 256
    hidden_size = 256

    inp = torch.randn(in_features, out_features, device="cuda", dtype=precision)
    fp8_str = "_fp8" if use_fp8 else ""
    bias_str = "_bias" if use_bias else ""
    high_prec_str = dtype2str(precision)
    fname = f"te.layernorm_linear{fp8_str}{bias_str}{high_prec_str}.onnx"
949

950
951
952
953
954
955
956
957
    with te.fp8_autocast(enabled=use_fp8):
        model = te.LayerNormLinear(
            hidden_size,
            3 * hidden_size,
            bias=use_bias,
            return_bias=return_bias,
            return_layernorm_output=return_layernorm_output,
            params_dtype=precision,
958
            zero_centered_gamma=zero_centered_gamma,
959
            normalization=normalization,
960
        ).to(device="cuda")
961
        if use_fp8:
962
            set_layer_scale(model, scale_factor, num_gemms=1)
963
        do_export(model, inp, fname, use_fp8)
964
965
        te_outputs = te_infer(model, inp, is_fp8=use_fp8)
        serialize_inputs_outputs(fname, inp, te_outputs)
966
        if precision in (torch.bfloat16,):
967
            return
968
        if not use_fp8:
969
            validate_result(fname, inp, model, atol=1e-3, te_outputs=te_outputs)
Neta Zmora's avatar
Neta Zmora committed
970
        elif precision != torch.bfloat16:
971
            validate_result(fname, inp, model, atol=1e-6, is_fp8=use_fp8, te_outputs=te_outputs)
972
973
974
975
976
977
978
979


@pytest.mark.parametrize("scale_factor", [112])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize(
980
981
982
983
984
985
986
987
988
989
    "precision,      use_bias",
    [
        (torch.float32, False),
        (torch.float32, True),
        (torch.float16, True),
        (torch.float16, False),
        (torch.bfloat16, True),
        (torch.bfloat16, False),
    ],
)
990
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
991
@pytest.mark.parametrize("activation", supported_activations)
992
@pytest.mark.parametrize("normalization", all_normalizations)
993
def test_export_layernorm_mlp(
994
    seed_default_rng,
995
996
997
998
999
    scale_factor: float,
    use_fp8: bool,
    use_bias: bool,
    return_bias: bool,
    return_layernorm_output: bool,
1000
    precision: torch.dtype,
1001
1002
    zero_centered_gamma: bool,
    activation: str,
1003
    normalization: str,
1004
1005
):
    # Skip FP8 tests on non-hopper devices
1006
1007
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018

    # Set dimensions (these are arbitrary).
    in_features = 64
    out_features = 256
    hidden_size = 256
    ffn_hidden_size = 256

    inp = torch.randn(in_features, out_features, device="cuda", dtype=precision)
    fp8_str = "_fp8" if use_fp8 else ""
    bias_str = "_bias" if use_bias else ""
    high_prec_str = dtype2str(precision)
1019
    fname = f"te.layernorm_mlp{fp8_str}{bias_str}{high_prec_str}_{activation}.onnx"
1020
1021
1022
1023
1024
1025
1026
1027
    with te.fp8_autocast(enabled=use_fp8):
        model = te.LayerNormMLP(
            hidden_size,
            ffn_hidden_size,
            bias=use_bias,
            return_bias=return_bias,
            return_layernorm_output=return_layernorm_output,
            params_dtype=precision,
1028
            zero_centered_gamma=zero_centered_gamma,
1029
            activation=activation,
1030
            normalization=normalization,
1031
        ).to(device="cuda")
1032
        if use_fp8:
1033
            set_layer_scale(model, scale_factor, num_gemms=2)
1034
        do_export(model, inp, fname, use_fp8)
1035
1036
        te_outputs = te_infer(model, inp, is_fp8=use_fp8)
        serialize_inputs_outputs(fname, inp, te_outputs)
1037
        if precision in (torch.bfloat16,):
1038
            return
1039
        atol = 1e-6 if use_fp8 else (5e-1 if activation == "swiglu" else 1e-3)
1040
1041
        validate_result(fname, inp, model, atol=atol, is_fp8=use_fp8, te_outputs=te_outputs)

1042
1043
1044

@skip_FP8
@pytest.mark.parametrize(
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
    "precision,      use_mask, attn_mask_type",
    [
        (torch.float32, True, "arbitrary"),  # calls forward_torch_softmax (apply user mask)
        (torch.float32, False, "no_mask"),  # calls forward_torch_softmax (apply no mask)
        (torch.float16, False, "causal"),  # calls forward_torch_softmax (apply dynamic onnx mask)
        (torch.float16, True, "arbitrary"),  # calls forward_torch_softmax (apply user mask)
        (torch.float16, False, "no_mask"),  # calls forward_torch_softmax (apply no mask)
        (torch.bfloat16, False, "causal"),  # calls forward_torch_softmax (apply dynamic onnx mask)
        (torch.bfloat16, True, "arbitrary"),  # calls forward_torch_softmax (apply user mask)
        (torch.bfloat16, False, "no_mask"),  # calls forward_torch_softmax (apply no mask)
    ],
)
1057
def test_export_core_attention(
1058
1059
    seed_default_rng,
    set_max_seq_len,
1060
1061
1062
1063
1064
    precision: torch.dtype,
    use_mask: bool,
    attn_mask_type: str,
):
    # Set dimensions (these are arbitrary).
1065
1066
    seq_len, batch_size, num_attention_heads, kv_channels = (64, 4, 1, 64)
    qkv_size = (seq_len, batch_size, num_attention_heads, kv_channels)
1067
    qkv_format = "sbhd"
1068
1069
1070
1071

    query_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
    key_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
    value_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
1072
    input_names = ["query", "key", "value", "attention_mask"]
1073
1074
1075
    attention_mask = None
    if use_mask:
        # Generate a random mask with 50% probability for 0 or 1.
1076
        probs = 0.5 * torch.ones(batch_size, 1, 1, seq_len, device="cuda", dtype=precision)
1077
        attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
1078
    inp = (query_layer, key_layer, value_layer, attention_mask)
1079
1080
1081

    mask_str = get_attn_mask_str(use_mask, attn_mask_type)
    high_prec_str = dtype2str(precision)
1082
    fname = f"te.core_attention{mask_str}{high_prec_str}.onnx"
1083

1084
    model = te.attention.DotProductAttention(
1085
1086
1087
        num_attention_heads=num_attention_heads,
        kv_channels=kv_channels,
        attention_dropout=0.5,
1088
        qkv_format=qkv_format,
1089
        attn_mask_type=attn_mask_type,
1090
1091
    ).to(device="cuda")
    do_export(model, inp, fname, input_names=input_names, use_fp8=True)
1092
1093
    te_outputs = te_infer(model, inp, is_fp8=True)
    serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
1094
    if precision in (torch.bfloat16,):
1095
        return
1096
1097
1098
    validate_result(
        fname, inp, model, is_fp8=True, atol=1e-2, input_names=input_names, te_outputs=te_outputs
    )
1099
1100
1101


test_configs_multihead_attention = [
1102
1103
1104
    # "use_mask, attn_mask_type"
    (False, "no_mask"),  # calls ScaledSoftmax
    (True, "arbitrary"),  # calls ScaledMaskedSoftmax
1105
1106
]
test_configs_attention_type = [
1107
1108
1109
1110
1111
1112
1113
1114
1115
    # "input_layernorm, attention_type, fuse_qkv_params"
    (True, "self", True),
    (False, "self", True),
    (True, "self", False),
    (False, "self", False),
    (True, "cross", True),
    (False, "cross", True),
    (True, "cross", False),
    (False, "cross", False),
1116
]
1117
1118


1119
1120
@pytest.mark.parametrize("use_fp8", [False, True])
@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention)
1121
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
1122
@pytest.mark.parametrize("return_layernorm_output", [False])
1123
1124
1125
@pytest.mark.parametrize(
    "input_layernorm, attention_type, fuse_qkv_params", test_configs_attention_type
)
1126
def test_export_multihead_attention(
1127
1128
    seed_default_rng,
    set_max_seq_len,
1129
1130
1131
1132
1133
1134
1135
    use_fp8: bool,
    use_mask: bool,
    attn_mask_type: str,
    precision: torch.dtype,
    return_layernorm_output: bool,
    input_layernorm: bool,
    attention_type: str,
1136
    fuse_qkv_params: bool,
1137
1138
):
    # Skip FP8 tests on non-hopper devices
1139
1140
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159

    hidden_size = 256
    sequence_length = 128
    batch_size = 4
    num_attention_heads = 32
    kv_channels = 8
    attention_dropout = 0.1
    layernorm_epsilon = 1e-5
    init_method = output_layer_init_method = get_default_init_method()
    attention_args = (
        hidden_size,
        num_attention_heads,
        kv_channels,
        attention_dropout,
        layernorm_epsilon,
        init_method,
        output_layer_init_method,
    )

1160
1161
1162
    hidden_states_context = torch.randn(
        sequence_length, batch_size, hidden_size, dtype=precision, device="cuda"
    )
1163
1164
1165
    attention_mask = None
    if use_mask and attn_mask_type != "causal":
        # Generate a random mask with 50% probability for 0 or 1.
1166
1167
1168
        probs = 0.5 * torch.ones(
            batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision
        )
1169
1170
1171
        attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)

    encoder_output = None
1172

1173
    if attention_type == "cross":
1174
1175
1176
        encoder_output = torch.randn(
            sequence_length, batch_size, hidden_size, dtype=precision, device="cuda"
        )
1177
1178
1179
1180
1181
1182
1183
1184
1185

    fp8_str = "_fp8" if use_fp8 else ""
    dtype_str = dtype2str(precision)
    attn_type_str = "_self-attention" if attention_type == "self" else "_cross-attention"
    fuse_qkv_str = "_fused-qkv" if fuse_qkv_params else ""
    attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
    input_ln_str = "_input-ln" if input_layernorm else ""
    fname = f"te.multihead_attention{fp8_str}{attn_mask_str}{attn_type_str}{input_ln_str}{fuse_qkv_str}{dtype_str}.onnx"

1186
    model = te.MultiheadAttention(
1187
        *attention_args,
1188
        attn_mask_type=attn_mask_type,
1189
1190
1191
1192
1193
        params_dtype=precision,
        return_layernorm_output=return_layernorm_output,
        input_layernorm=input_layernorm,
        attention_type=attention_type,
        fuse_qkv_params=fuse_qkv_params,
1194
        return_bias=True,
1195
    ).to(device="cuda")
1196

1197
1198
    inp_context = (hidden_states_context, attention_mask, encoder_output)
    input_names = ["hidden_states", "attention_mask", "encoder_output"]
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
    output_names = ["attention_output", "attention_bias"]
    do_export(
        model,
        inp_context,
        fname,
        use_fp8,
        input_names=input_names,
        output_names=output_names,
        dynamic_axes={
            "hidden_states": {0: "seq", 1: "bs"},
            "attention_output": {0: "seq", 1: "bs"},
        },
    )
1212
    te_outputs = te_infer(model, inp_context, is_fp8=use_fp8)
1213
1214
1215
1216
    serialize_inputs_outputs(
        fname, inp_context, te_outputs, input_names=input_names, output_names=output_names
    )
    if precision in (torch.bfloat16,):
1217
1218
        return

1219
    if not use_fp8:
1220
1221
1222
1223
1224
1225
1226
1227
1228
        validate_result(
            fname,
            inp_context,
            model,
            atol=1e-3,
            input_names=input_names,
            output_names=output_names,
            te_outputs=te_outputs,
        )
1229
    else:
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
        validate_result(
            fname,
            inp_context,
            model,
            atol=1e-2,
            is_fp8=use_fp8,
            input_names=input_names,
            output_names=output_names,
            allow_cnt_errors=3,
            te_outputs=te_outputs,
        )
1241

1242
1243
1244
    # In GPT generative phase (inference) the input sequence is smaller than the maximum
    # allowed sequence length and we want to test this condition.
    # Pretend that we're in generative phase when it makes sense (causal mask and self-attention).
1245
    is_generative_phase = attn_mask_type == "causal" and attention_type == "self"
1246
1247
    if is_generative_phase:
        seq_len_offset = 8
1248
1249
1250
1251
1252
1253
1254
        hidden_states_generative = torch.randn(
            sequence_length - seq_len_offset,
            batch_size,
            hidden_size,
            dtype=precision,
            device="cuda",
        )
1255
1256
        inp_generative = (hidden_states_generative, attention_mask, encoder_output)
        if not use_fp8:
1257
1258
1259
1260
1261
1262
1263
1264
            validate_result(
                fname,
                inp_generative,
                model,
                atol=1e-3,
                input_names=input_names,
                output_names=output_names,
            )
1265
        else:
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
            validate_result(
                fname,
                inp_generative,
                model,
                atol=1e-2,
                is_fp8=use_fp8,
                input_names=input_names,
                output_names=output_names,
                allow_cnt_errors=3,
            )
1276
1277


1278
1279
@pytest.mark.parametrize("use_fp8", [False, True])
@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention)
1280
1281
1282
1283
1284
1285
1286
@pytest.mark.parametrize(
    "output_layernorm",
    [
        # True, # TO DO: handle this
        False
    ],
)
1287
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
1288
@pytest.mark.parametrize("fuse_qkv_params", [False, True])
1289
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
1290
@pytest.mark.parametrize("activation", supported_activations)
1291
def test_export_transformer_layer(
1292
1293
    seed_default_rng,
    set_max_seq_len,
1294
1295
1296
1297
1298
1299
    use_fp8: bool,
    use_mask: bool,
    attn_mask_type: str,
    output_layernorm: bool,
    precision: torch.dtype,
    fuse_qkv_params: bool,
1300
1301
    zero_centered_gamma: bool,
    activation: str,
1302
1303
):
    # Skip FP8 tests on non-hopper devices
1304
1305
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
1306
1307
1308
1309
1310
1311
1312
1313

    # Layer configuration
    hidden_size = 64
    sequence_length = 128
    batch_size = 1
    ffn_hidden_size = 256
    num_attention_heads = 4

1314
1315
1316
    input_tensor = torch.rand(
        sequence_length, batch_size, hidden_size, dtype=precision, device="cuda"
    )
1317
    input_names = ["input", "attention_mask"]
1318
1319
1320
    attention_mask = None
    if use_mask and attn_mask_type != "causal":
        # Generate a random mask with 50% probability for 0 or 1.
1321
1322
1323
        probs = 0.5 * torch.ones(
            batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision
        )
1324
        attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
1325
    inp = (input_tensor, attention_mask)
1326
1327
1328
1329
1330

    fp8_str = "_fp8" if use_fp8 else ""
    fuse_qkv_params_str = "_fused-qkv" if fuse_qkv_params else ""
    high_prec_str = dtype2str(precision)
    attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
1331
    fname = f"te.transformer_layer{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{high_prec_str}_{activation}.onnx"
1332
1333
1334
1335
1336

    model = te.TransformerLayer(
        hidden_size,
        ffn_hidden_size,
        num_attention_heads,
1337
        self_attn_mask_type=attn_mask_type,
1338
1339
1340
        output_layernorm=output_layernorm,
        params_dtype=precision,
        fuse_qkv_params=fuse_qkv_params,
1341
        zero_centered_gamma=zero_centered_gamma,
1342
1343
        activation=activation,
    ).to(device="cuda")
1344
    do_export(model, inp, fname, use_fp8, input_names=input_names)
1345
1346
    te_outputs = te_infer(model, inp, is_fp8=use_fp8)
    serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
1347
    if precision in (torch.bfloat16,):
1348
        return
1349
1350
1351
1352
    atol = 5e-1 if use_fp8 else (5e-1 if activation == "swiglu" else 1e-3)
    validate_result(
        fname, inp, model, atol=atol, is_fp8=use_fp8, input_names=input_names, te_outputs=te_outputs
    )
1353

Neta Zmora's avatar
Neta Zmora committed
1354
1355

@pytest.mark.parametrize("use_fp8", [True])
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
@pytest.mark.parametrize("ln_scale_factor", [448 * 2])
@pytest.mark.parametrize(
    "gemm_scale_factors",
    [
        (
            224,
            224,
        ),
    ],
)
Neta Zmora's avatar
Neta Zmora committed
1366
1367
1368
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
def test_export_gemm_layernorm(
1369
    seed_default_rng,
Neta Zmora's avatar
Neta Zmora committed
1370
1371
1372
1373
    use_fp8: bool,
    ln_scale_factor: float,
    gemm_scale_factors: Tuple[float, float],
    precision: torch.dtype,
1374
    zero_centered_gamma: bool,
Neta Zmora's avatar
Neta Zmora committed
1375
1376
1377
1378
1379
1380
):
    """This is a regression test for testing that all LN inputs have the same type.

    The test sets up GEMM with FP32 output which feeds into an LN that is configured
    with FP16 or BF16 weights and bias.
    """
1381
1382
1383
    out_features = 128
    hidden_size = 128
    in_features = 128
Neta Zmora's avatar
Neta Zmora committed
1384
1385
1386
1387

    # Skip FP8 tests on non-hopper devices
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)
1388

Neta Zmora's avatar
Neta Zmora committed
1389
1390
1391
1392
1393
1394
    class TestFP8_GemmLayernorm(nn.Module):
        def __init__(self) -> None:
            super().__init__()
            normalized_shape = torch.Size(inp.shape[1:])
            self.weight = torch.randn(*normalized_shape, dtype=precision, device="cuda")
            self.bias = torch.zeros(*normalized_shape, dtype=precision, device="cuda")
1395
            self.eps = 1e-6  # An arbitrary small value
Neta Zmora's avatar
Neta Zmora committed
1396
1397
1398
1399

            self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
            self.meta = create_meta(ln_scale_factor)
            self.fp8_type = tex.DType.kFloat8E4M3
1400
            self.gemm = FP8GemmModule(
1401
1402
1403
1404
1405
1406
1407
                precision,
                use_bias=False,
                gelu=False,
                scale_factors=gemm_scale_factors,
                hidden_size=hidden_size,
                out_features=out_features,
            )
Neta Zmora's avatar
Neta Zmora committed
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418

        def forward(self, inp, weight):
            x = self.gemm(inp, weight)
            x = texcpp.layernorm_fwd_fp8_inf(
                x,
                self.weight,
                self.bias,
                self.eps,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
1419
                0,
1420
1421
                zero_centered_gamma,
            )
Neta Zmora's avatar
Neta Zmora committed
1422
1423
1424
1425
1426
1427

            x = cast_from_fp8(
                x,
                self.meta,
                self.fp8_tensor,
                self.fp8_type,
1428
1429
                tex.DType.kFloat32 if precision == torch.float32 else tex.DType.kFloat16,
            )
Neta Zmora's avatar
Neta Zmora committed
1430
1431
1432
1433
1434
1435
1436
1437
            return x

    inp = torch.randn(hidden_size, in_features, dtype=precision, device="cuda")
    weight = torch.randn(out_features, in_features, dtype=precision, device="cuda")
    model = TestFP8_GemmLayernorm()
    high_prec_str = dtype2str(precision)
    fp8_str = f"_fp8" if use_fp8 else ""
    fname = f"te.gemm_layernorm{fp8_str}{high_prec_str}.onnx"
1438
    input_names = ["input", "weight"]
1439
    do_export(model, (inp, weight), fname, use_fp8=use_fp8, input_names=input_names)
1440
1441
    te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
    serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
1442
    if precision not in (torch.bfloat16,):
Neta Zmora's avatar
Neta Zmora committed
1443
        validate_result(
1444
1445
1446
1447
1448
1449
1450
1451
1452
            fname,
            (inp, weight),
            model,
            atol=5e-2,
            is_fp8=use_fp8,
            allow_cnt_errors=2,
            input_names=input_names,
            te_outputs=te_outputs,
        )
Neta Zmora's avatar
Neta Zmora committed
1453
1454


1455
1456
@skip_FP8
@pytest.mark.parametrize("use_fp8", [True, False])
1457
@pytest.mark.parametrize("precision", [torch.float16, torch.bfloat16])
1458
1459
1460
1461
1462
1463
@pytest.mark.parametrize("zero_centered_gamma", [True])
def test_export_gpt_generation(
    seed_default_rng,
    set_max_seq_len,
    use_fp8: bool,
    precision: torch.dtype,
1464
    zero_centered_gamma: bool,
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
):
    """Test that the ONNX model can correctly handle inputs with different shapes and that
    the attention mask it adjusted on-the-fly to different sequence lengths.
    """

    # Skip FP8 tests on non-hopper devices
    if use_fp8 and not fp8_available:
        pytest.skip(reason_for_no_fp8)

    # Layer configuration
    hidden_size = 64
    sequence_length = 128
    batch_size = 1
    ffn_hidden_size = 256
    num_attention_heads = 4
    attention_mask = None
    use_mask = True
    attn_mask_type = "causal"
    fuse_qkv_params = True
    output_layernorm = False

    fp8_str = "_fp8" if use_fp8 else ""
    fuse_qkv_params_str = "_fused-qkv" if fuse_qkv_params else ""
    high_prec_str = dtype2str(precision)
    attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
    fname = f"te.transformer_layer_generative{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{high_prec_str}.onnx"

    model = te.TransformerLayer(
        hidden_size,
        ffn_hidden_size,
        num_attention_heads,
1496
        self_attn_mask_type=attn_mask_type,
1497
1498
1499
        output_layernorm=output_layernorm,
        params_dtype=precision,
        fuse_qkv_params=fuse_qkv_params,
1500
1501
        zero_centered_gamma=zero_centered_gamma,
    ).to(device="cuda")
1502
1503

    # "Context phase": use full input sequence length
1504
    input_names = ["input"]
1505
    output_names = ["output"]
1506
1507
1508
    input_tensor = torch.rand(
        sequence_length, batch_size, hidden_size, dtype=precision, device="cuda"
    )
1509
    inp = (input_tensor,)
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
    do_export(
        model,
        inp,
        fname,
        use_fp8,
        input_names=input_names,
        output_names=output_names,
        dynamic_axes={
            "input": {0: "seq", 1: "bs"},
            "output": {0: "seq", 1: "bs"},
        },
    )
1522
    te_outputs = te_infer(model, inp, is_fp8=use_fp8)
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
    serialize_inputs_outputs(
        fname, inp, te_outputs, input_names=input_names, output_names=output_names
    )
    if precision not in (torch.bfloat16,):
        validate_result(
            fname,
            inp,
            model,
            atol=6e-3,
            is_fp8=use_fp8,
            input_names=input_names,
            te_outputs=te_outputs,
        )
1536
1537
1538

    # "Generative phase": use a single input (sequence len=1). For FP8 we need to pad the sequence to mult of 8.
    sequence_length = 1 if not use_fp8 else 8
1539
1540
1541
    input_tensor = torch.rand(
        sequence_length, batch_size, hidden_size, dtype=precision, device="cuda"
    )
1542
    inp = (input_tensor, attention_mask)
1543
1544
    te_outputs = te_infer(model, inp, is_fp8=use_fp8)
    serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
    if precision not in (torch.bfloat16,):
        validate_result(
            fname,
            inp,
            model,
            atol=6e-3,
            is_fp8=use_fp8,
            input_names=input_names,
            te_outputs=te_outputs,
        )
1555
1556


1557
1558
1559
1560
1561
1562
@pytest.mark.parametrize("enabled", [True, False])
def test_export_ctx_manager(enabled):
    assert is_in_onnx_export_mode() == False
    with te.onnx_export(enabled):
        assert is_in_onnx_export_mode() == enabled
    assert is_in_onnx_export_mode() == False